From 51a4439737f6f6e0241cff84781a6a59cd94945e Mon Sep 17 00:00:00 2001 From: x54-729 <17307130121@fudan.edu.cn> Date: Tue, 17 May 2022 17:05:19 +0000 Subject: [PATCH] =?UTF-8?q?torch=20test=5Fdist=5Futils.py=E4=B8=ADtorch.fu?= =?UTF-8?q?ll=E6=B7=BB=E5=8A=A0dtype=E4=BB=A5=E5=85=BC=E5=AE=B9=E4=B8=8D?= =?UTF-8?q?=E5=90=8C=E7=9A=84torchban=E8=B9=A6?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../drivers/torch_driver/test_dist_utils.py | 164 +++++++++--------- 1 file changed, 84 insertions(+), 80 deletions(-) diff --git a/tests/core/drivers/torch_driver/test_dist_utils.py b/tests/core/drivers/torch_driver/test_dist_utils.py index 0edfa3cd..a118e562 100644 --- a/tests/core/drivers/torch_driver/test_dist_utils.py +++ b/tests/core/drivers/torch_driver/test_dist_utils.py @@ -15,69 +15,20 @@ from tests.helpers.utils import re_run_current_cmd_for_torch, magic_argv_env_con @pytest.mark.torch @magic_argv_env_context def test_fastnlp_torch_all_gather(): - os.environ['MASTER_ADDR'] = '127.0.0.1' - os.environ['MASTER_PORT'] = '29500' - if 'LOCAL_RANK' not in os.environ and 'RANK' not in os.environ and 'WORLD_SIZE' not in os.environ: - os.environ['LOCAL_RANK'] = '0' - os.environ['RANK'] = '0' - os.environ['WORLD_SIZE'] = '2' - re_run_current_cmd_for_torch(1, output_from_new_proc='all') - torch.distributed.init_process_group(backend='nccl') - torch.distributed.barrier() - local_rank = int(os.environ['LOCAL_RANK']) - torch.cuda.set_device(local_rank) - obj = { - 'tensor': torch.full(size=(2,), fill_value=local_rank).cuda(), - 'numpy': np.full(shape=(2, ), fill_value=local_rank), - 'bool': local_rank%2==0, - 'float': local_rank + 0.1, - 'int': local_rank, - 'dict': { - 'rank': local_rank - }, - 'list': [local_rank]*2, - 'str': f'{local_rank}', - 'tensors': [torch.full(size=(2,), fill_value=local_rank).cuda(), - torch.full(size=(2,), fill_value=local_rank).cuda()] - } - data = fastnlp_torch_all_gather(obj) - world_size = int(os.environ['WORLD_SIZE']) - assert len(data) == world_size - for i in range(world_size): - assert (data[i]['tensor']==i).sum()==world_size - assert data[i]['numpy'][0]==i - assert data[i]['bool']==(i%2==0) - assert np.allclose(data[i]['float'], i+0.1) - assert data[i]['int'] == i - assert data[i]['dict']['rank'] == i - assert data[i]['list'][0] == i - assert data[i]['str'] == f'{i}' - assert data[i]['tensors'][0][0] == i - - for obj in [1, True, 'xxx']: - data = fastnlp_torch_all_gather(obj) - assert len(data)==world_size - assert data[0]==data[1] - - dist.destroy_process_group() - -@pytest.mark.torch -@magic_argv_env_context -def test_fastnlp_torch_broadcast_object(): - os.environ['MASTER_ADDR'] = '127.0.0.1' - os.environ['MASTER_PORT'] = '29500' - if 'LOCAL_RANK' not in os.environ and 'RANK' not in os.environ and 'WORLD_SIZE' not in os.environ: - os.environ['LOCAL_RANK'] = '0' - os.environ['RANK'] = '0' - os.environ['WORLD_SIZE'] = '2' - re_run_current_cmd_for_torch(1, output_from_new_proc='all') - torch.distributed.init_process_group(backend='nccl') - torch.distributed.barrier() - local_rank = int(os.environ['LOCAL_RANK']) - torch.cuda.set_device(local_rank) - if os.environ['LOCAL_RANK']=="0": + try: + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + if 'LOCAL_RANK' not in os.environ and 'RANK' not in os.environ and 'WORLD_SIZE' not in os.environ: + os.environ['LOCAL_RANK'] = '0' + os.environ['RANK'] = '0' + os.environ['WORLD_SIZE'] = '2' + re_run_current_cmd_for_torch(1, output_from_new_proc='all') + torch.distributed.init_process_group(backend='nccl') + torch.distributed.barrier() + local_rank = int(os.environ['LOCAL_RANK']) + torch.cuda.set_device(local_rank) obj = { - 'tensor': torch.full(size=(2,), fill_value=local_rank).cuda(), + 'tensor': torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda(), 'numpy': np.full(shape=(2, ), fill_value=local_rank), 'bool': local_rank%2==0, 'float': local_rank + 0.1, @@ -87,24 +38,77 @@ def test_fastnlp_torch_broadcast_object(): }, 'list': [local_rank]*2, 'str': f'{local_rank}', - 'tensors': [torch.full(size=(2,), fill_value=local_rank).cuda(), - torch.full(size=(2,), fill_value=local_rank).cuda()] + 'tensors': [torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda(), + torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda()] } - else: - obj = None - data = fastnlp_torch_broadcast_object(obj, src=0, device=torch.cuda.current_device()) - i = 0 - assert data['tensor'][0]==0 - assert data['numpy'][0]==0 - assert data['bool']==(i%2==0) - assert np.allclose(data['float'], i+0.1) - assert data['int'] == i - assert data['dict']['rank'] == i - assert data['list'][0] == i - assert data['str'] == f'{i}' - assert data['tensors'][0][0] == i + data = fastnlp_torch_all_gather(obj) + world_size = int(os.environ['WORLD_SIZE']) + assert len(data) == world_size + for i in range(world_size): + assert (data[i]['tensor']==i).sum()==world_size + assert data[i]['numpy'][0]==i + assert data[i]['bool']==(i%2==0) + assert np.allclose(data[i]['float'], i+0.1) + assert data[i]['int'] == i + assert data[i]['dict']['rank'] == i + assert data[i]['list'][0] == i + assert data[i]['str'] == f'{i}' + assert data[i]['tensors'][0][0] == i - for obj in [int(os.environ['LOCAL_RANK']), bool(os.environ['LOCAL_RANK']=='1'), os.environ['LOCAL_RANK']]: + for obj in [1, True, 'xxx']: + data = fastnlp_torch_all_gather(obj) + assert len(data)==world_size + assert data[0]==data[1] + + finally: + dist.destroy_process_group() + +@pytest.mark.torch +@magic_argv_env_context +def test_fastnlp_torch_broadcast_object(): + try: + os.environ['MASTER_ADDR'] = '127.0.0.1' + os.environ['MASTER_PORT'] = '29500' + if 'LOCAL_RANK' not in os.environ and 'RANK' not in os.environ and 'WORLD_SIZE' not in os.environ: + os.environ['LOCAL_RANK'] = '0' + os.environ['RANK'] = '0' + os.environ['WORLD_SIZE'] = '2' + re_run_current_cmd_for_torch(1, output_from_new_proc='all') + torch.distributed.init_process_group(backend='nccl') + torch.distributed.barrier() + local_rank = int(os.environ['LOCAL_RANK']) + torch.cuda.set_device(local_rank) + if os.environ['LOCAL_RANK']=="0": + obj = { + 'tensor': torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda(), + 'numpy': np.full(shape=(2, ), fill_value=local_rank, dtype=int), + 'bool': local_rank%2==0, + 'float': local_rank + 0.1, + 'int': local_rank, + 'dict': { + 'rank': local_rank + }, + 'list': [local_rank]*2, + 'str': f'{local_rank}', + 'tensors': [torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda(), + torch.full(size=(2,), fill_value=local_rank, dtype=int).cuda()] + } + else: + obj = None data = fastnlp_torch_broadcast_object(obj, src=0, device=torch.cuda.current_device()) - assert int(data)==0 - dist.destroy_process_group() + i = 0 + assert data['tensor'][0]==0 + assert data['numpy'][0]==0 + assert data['bool']==(i%2==0) + assert np.allclose(data['float'], i+0.1) + assert data['int'] == i + assert data['dict']['rank'] == i + assert data['list'][0] == i + assert data['str'] == f'{i}' + assert data['tensors'][0][0] == i + + for obj in [int(os.environ['LOCAL_RANK']), bool(os.environ['LOCAL_RANK']=='1'), os.environ['LOCAL_RANK']]: + data = fastnlp_torch_broadcast_object(obj, src=0, device=torch.cuda.current_device()) + assert int(data)==0 + finally: + dist.destroy_process_group()