test_worker_group_torch.py 3.61 KB
Newer Older
jerrrrry's avatar
jerrrrry committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

os.environ['RAY_DEDUP_LOGS'] = '0'
os.environ['NCCL_DEBUG'] = 'WARN'

import torch
import torch.distributed
import ray

from verl.single_controller.ray.base import RayResourcePool, RayClassWithInitArgs, RayWorkerGroup
from verl.single_controller.base.worker import Worker


@ray.remote
class TestAllGatherActor(Worker):

    def __init__(self, size) -> None:
        super().__init__()
        self.size = size

    def init(self):
        torch.distributed.init_process_group()
        self.tensor = torch.zeros(size=(self.size,), dtype=torch.int64, device='cuda')
        self.tensor += self.rank

    def all_gather(self):
        world_size = self._world_size
        output = torch.zeros(size=(self.tensor.shape[0] * world_size,),
                             dtype=self.tensor.dtype,
                             device=self.tensor.device)
        torch.distributed.all_gather_into_tensor(output, self.tensor, async_op=False)
        return output


@ray.remote
class TestAllGatherActorV2(Worker):

    def __init__(self, size) -> None:
        super().__init__()
        self.size = size

        torch.distributed.init_process_group()
        self.tensor = torch.zeros(size=(self.size,), dtype=torch.int64, device='cuda')
        self.tensor += self.rank

    def all_gather(self):
        world_size = self._world_size
        output = torch.zeros(size=(self.tensor.shape[0] * world_size,),
                             dtype=self.tensor.dtype,
                             device=self.tensor.device)
        torch.distributed.all_gather_into_tensor(output, self.tensor, async_op=False)
        return output


def test_all_gather_torch():
    """
    In this test, we instantiate 4 GPUs in a group and test the all_gather
    """
    ray.init()

    # create 4 workers, each hold a GPU
    resource_pool = RayResourcePool([4], use_gpu=True)
    class_with_args = RayClassWithInitArgs(cls=TestAllGatherActor, size=2)

    worker_group = RayWorkerGroup(resource_pool, class_with_args, name_prefix="worker_group_torch")

    worker_group.execute_all_sync('init')
    output = worker_group.execute_all_sync('all_gather')
    for i in range(1, len(output)):
        assert torch.all(output[i] == output[0])

    output = output[0].cpu()
    print(output)
    assert torch.all(output == torch.tensor([0, 0, 1, 1, 2, 2, 3, 3], dtype=torch.int64))

    ray.shutdown()


def test_all_gather_torch_v2():
    """
    In this test, we instantiate 4 GPUs in a group and test the all_gather
    """
    ray.init()

    # create 4 workers, each hold a GPU
    resource_pool = RayResourcePool([4], use_gpu=True)
    class_with_args = RayClassWithInitArgs(cls=TestAllGatherActorV2, size=2)

    worker_group = RayWorkerGroup(resource_pool, class_with_args, name_prefix="worker_group_torch")

    output = worker_group.execute_all_sync('all_gather')
    for i in range(1, len(output)):
        assert torch.all(output[i] == output[0])

    output = output[0].cpu()
    print(output)
    assert torch.all(output == torch.tensor([0, 0, 1, 1, 2, 2, 3, 3], dtype=torch.int64))

    ray.shutdown()