test_parallel_state.py 20.9 KB
Newer Older
liangjing's avatar
liangjing committed
1
2
3
import os

import pytest
shanmugamr's avatar
shanmugamr committed
4
import torch
liangjing's avatar
liangjing committed
5

shanmugamr's avatar
shanmugamr committed
6
import megatron.core.parallel_state as ps
7
from tests.unit_tests.test_utilities import Utils
shanmugamr's avatar
shanmugamr committed
8

Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
9
10
rank = Utils.rank
world_size = Utils.world_size
liangjing's avatar
liangjing committed
11
test_parallel_order = ['tp-cp-ep-dp-pp', 'tp-cp-pp-ep-dp']
shanmugamr's avatar
shanmugamr committed
12

liangjing's avatar
liangjing committed
13
14
15

@pytest.mark.parametrize('order', test_parallel_order)
def test_initialize_and_destroy_model_parallel(order):
shanmugamr's avatar
shanmugamr committed
16
    with pytest.raises(AssertionError):
liangjing's avatar
liangjing committed
17
        assert ps.initialize_model_parallel(order=order)
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
18
    Utils.initialize_distributed()
shanmugamr's avatar
shanmugamr committed
19
    with pytest.raises(RuntimeError):
liangjing's avatar
liangjing committed
20
        assert ps.initialize_model_parallel(tensor_model_parallel_size=2 * world_size, order=order)
shanmugamr's avatar
shanmugamr committed
21
    with pytest.raises(RuntimeError):
liangjing's avatar
liangjing committed
22
23
24
        assert ps.initialize_model_parallel(
            pipeline_model_parallel_size=2 * world_size, order=order
        )
shanmugamr's avatar
shanmugamr committed
25
    with pytest.raises(RuntimeError):
liangjing's avatar
liangjing committed
26
27
28
29
30
        assert ps.initialize_model_parallel(
            pipeline_model_parallel_size=world_size,
            tensor_model_parallel_size=world_size,
            order=order,
        )
shanmugamr's avatar
shanmugamr committed
31
    with pytest.raises(RuntimeError):
liangjing's avatar
liangjing committed
32
33
34
35
        assert ps.initialize_model_parallel(virtual_pipeline_model_parallel_size=2, order=order)
    Utils.initialize_model_parallel(
        tensor_model_parallel_size=2, pipeline_model_parallel_size=4, order=order
    )
shanmugamr's avatar
shanmugamr committed
36

liangjing's avatar
liangjing committed
37
38
39
40
41
    assert ps.model_parallel_is_initialized()
    assert ps.get_model_parallel_group() is not None
    assert ps.get_tensor_model_parallel_group() is not None
    assert ps.get_pipeline_model_parallel_group() is not None
    assert ps.get_data_parallel_group() is not None
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
42
    Utils.destroy_model_parallel()
liangjing's avatar
liangjing committed
43
44
    assert ps._MODEL_PARALLEL_GROUP is None

shanmugamr's avatar
shanmugamr committed
45

liangjing's avatar
liangjing committed
46
47
48
49
50
51
52
53
54
@pytest.mark.parametrize('order', test_parallel_order)
def test_pipeline_parallel_initializations(order):
    Utils.initialize_model_parallel(
        tensor_model_parallel_size=2, pipeline_model_parallel_size=4, order=order
    )
    assert ps.get_pipeline_model_parallel_first_rank() == rank % 2
    assert ps.get_data_parallel_src_rank() == rank
    assert ps.get_pipeline_model_parallel_next_rank() == ((rank + 2) % world_size)
    assert ps.get_pipeline_model_parallel_prev_rank() == ((rank - 2) % world_size)
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
55
56
    Utils.destroy_model_parallel()

liangjing's avatar
liangjing committed
57
58
59
60
61
62
63

@pytest.mark.parametrize('order', test_parallel_order)
def test_data_parallel_initializations(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
    assert ps.get_data_parallel_src_rank() == rank
    assert ps.get_data_parallel_world_size() == 1
    assert ps.get_data_parallel_rank() == 0
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
64
65
    Utils.destroy_model_parallel()

liangjing's avatar
liangjing committed
66
67
68
69
70

@pytest.mark.parametrize('order', test_parallel_order)
def test_tensor_model_parellel_world_size(order):
    Utils.initialize_model_parallel(tensor_model_parallel_size=world_size, order=order)
    assert ps.get_tensor_model_parallel_world_size() == world_size
shanmugamr's avatar
shanmugamr committed
71
    ps.set_tensor_model_parallel_world_size(None)
liangjing's avatar
liangjing committed
72
    assert ps.get_tensor_model_parallel_world_size() == world_size
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
73
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
74

liangjing's avatar
liangjing committed
75
76
77
78
79

@pytest.mark.parametrize('order', test_parallel_order)
def test_pipeline_model_parallel_world_size(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
    assert ps.get_pipeline_model_parallel_world_size() == world_size
shanmugamr's avatar
shanmugamr committed
80
    ps.set_pipeline_model_parallel_world_size(None)
liangjing's avatar
liangjing committed
81
82
    assert ps.get_pipeline_model_parallel_world_size() == world_size
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
83

liangjing's avatar
liangjing committed
84
85
86
87
88

@pytest.mark.parametrize('order', test_parallel_order)
def test_tensor_model_parallel_rank(order):
    Utils.initialize_model_parallel(tensor_model_parallel_size=world_size, order=order)
    assert ps.get_tensor_model_parallel_rank() == rank
shanmugamr's avatar
shanmugamr committed
89
    ps.set_tensor_model_parallel_rank(None)
liangjing's avatar
liangjing committed
90
91
    assert ps.get_tensor_model_parallel_rank() == rank
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
92

liangjing's avatar
liangjing committed
93
94
95
96
97

@pytest.mark.parametrize('order', test_parallel_order)
def test_pipeline_model_parallel_rank(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
    assert ps.get_pipeline_model_parallel_rank() == rank
shanmugamr's avatar
shanmugamr committed
98
    ps.set_pipeline_model_parallel_rank(None)
liangjing's avatar
liangjing committed
99
    assert ps.get_pipeline_model_parallel_rank() == rank
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
100
101
    Utils.destroy_model_parallel()

liangjing's avatar
liangjing committed
102
103
104
105

def test_context_parallel_rank():
    Utils.initialize_model_parallel(context_parallel_size=world_size)
    assert ps.get_context_parallel_rank() == rank
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
106
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
107

liangjing's avatar
liangjing committed
108
109
110
111
112
113

def test_expert_model_parallel_rank():
    Utils.initialize_model_parallel(expert_model_parallel_size=world_size)
    assert ps.get_expert_model_parallel_rank() == rank
    ps.set_expert_model_parallel_rank(None)
    assert ps.get_expert_model_parallel_rank() == rank
Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
114
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
115

liangjing's avatar
liangjing committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

@pytest.mark.parametrize('order', test_parallel_order)
def test_is_pipeline_first_stage(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
    assert ps.is_pipeline_first_stage(ignore_virtual=True) == (rank == 0)
    assert ps.is_pipeline_first_stage() == (rank == 0)
    Utils.destroy_model_parallel()


@pytest.mark.parametrize('order', test_parallel_order)
def test_is_pipeline_last_stage(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
    assert ps.is_pipeline_last_stage(ignore_virtual=True) == (rank == world_size - 1)
    assert ps.is_pipeline_last_stage() == (rank == world_size - 1)
    Utils.destroy_model_parallel()


@pytest.mark.parametrize('order', test_parallel_order)
def test_virtual_pipeline_model_parallel_rank(order):
    Utils.initialize_model_parallel(pipeline_model_parallel_size=world_size, order=order)
shanmugamr's avatar
shanmugamr committed
136
    ps.set_virtual_pipeline_model_parallel_rank(rank)
liangjing's avatar
liangjing committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
    assert ps.get_virtual_pipeline_model_parallel_rank() == rank
    Utils.destroy_model_parallel()


@pytest.mark.parametrize('order', test_parallel_order)
def test_get_tensor_model_parallel_src_rank(order):
    Utils.initialize_model_parallel(tensor_model_parallel_size=world_size, order=order)
    assert ps.get_tensor_model_parallel_src_rank() == ((rank // world_size) * world_size)
    Utils.destroy_model_parallel()


@pytest.mark.parametrize('order', test_parallel_order)
def test_encoder_tensor_pipeline_parallelism(order):
    Utils.initialize_model_parallel(
        tensor_model_parallel_size=5,
        pipeline_model_parallel_size=1,
        encoder_pipeline_model_parallel_size=1,
        encoder_tensor_model_parallel_size=3,
        order=order,
    )
    if rank < 2:
        assert ps.get_tensor_model_parallel_world_size() == 3
        assert isinstance(ps._PIPELINE_GLOBAL_RANKS[0], list)
    elif rank == 2:
        assert ps.get_tensor_model_parallel_world_size() == 3
        assert isinstance(ps._PIPELINE_GLOBAL_RANKS[0], int)
    else:
        assert ps.get_tensor_model_parallel_world_size() == 5
        assert isinstance(ps._PIPELINE_GLOBAL_RANKS[0], int)
    Utils.destroy_model_parallel()


@pytest.mark.parametrize(
    'src_tp_pp, ep_size',
    [
        ((1, 8), 1),
        ((2, 4), 1),
        ((4, 2), 1),
        ((8, 1), 1),
        ((4, 1), 2),
        ((1, 1), 8),
        ((1, 1), 2),
        ((2, 1), 4),
    ],
)
def test_different_initialize_order_consistency(src_tp_pp, ep_size):
    Utils.initialize_model_parallel(
        *src_tp_pp, expert_model_parallel_size=ep_size, order='tp-ep-dp-pp'
    )
    tp_rank = ps.get_tensor_model_parallel_rank()
    dp_rank = ps.get_data_parallel_rank()
    pp_rank = ps.get_pipeline_model_parallel_rank()
    ep_rank = ps.get_expert_model_parallel_rank()

    tp_g = torch.distributed.get_process_group_ranks(ps.get_tensor_model_parallel_group())
    dp_g = torch.distributed.get_process_group_ranks(ps.get_data_parallel_group(False))
    pp_g = torch.distributed.get_process_group_ranks(ps.get_pipeline_model_parallel_group())
    dp_no_ep_g = torch.distributed.get_process_group_ranks(
        ps.get_data_modulo_expert_parallel_group()
    )
    cp_g = torch.distributed.get_process_group_ranks(ps.get_context_parallel_group())
    mp_g = torch.distributed.get_process_group_ranks(ps.get_model_parallel_group())
    tp_ep_g = torch.distributed.get_process_group_ranks(ps.get_tensor_and_expert_parallel_group())
    tp_dp_g = torch.distributed.get_process_group_ranks(
        ps.get_tensor_and_data_parallel_group(False)
    )

    Utils.destroy_model_parallel()

    Utils.initialize_model_parallel(
        *src_tp_pp, expert_model_parallel_size=ep_size, order='tp-pp-ep-dp'
    )
    assert tp_rank == ps.get_tensor_model_parallel_rank()
    assert dp_rank == ps.get_data_parallel_rank()
    assert pp_rank == ps.get_pipeline_model_parallel_rank()
    assert ep_rank == ps.get_expert_model_parallel_rank()

    assert tp_g == torch.distributed.get_process_group_ranks(ps.get_tensor_model_parallel_group())
    assert dp_g == torch.distributed.get_process_group_ranks(ps.get_data_parallel_group(False))
    assert pp_g == torch.distributed.get_process_group_ranks(ps.get_pipeline_model_parallel_group())
    assert dp_no_ep_g == torch.distributed.get_process_group_ranks(
        ps.get_data_modulo_expert_parallel_group()
    )
    assert cp_g == torch.distributed.get_process_group_ranks(ps.get_context_parallel_group())
    assert mp_g == torch.distributed.get_process_group_ranks(ps.get_model_parallel_group())
    assert tp_ep_g == torch.distributed.get_process_group_ranks(
        ps.get_tensor_and_expert_parallel_group()
    )
    assert tp_dp_g == torch.distributed.get_process_group_ranks(
        ps.get_tensor_and_data_parallel_group(False)
    )

Shanmugam Ramasamy's avatar
Shanmugam Ramasamy committed
229
    Utils.destroy_model_parallel()
shanmugamr's avatar
shanmugamr committed
230

liangjing's avatar
liangjing committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512

@pytest.mark.parametrize(
    'src_tp_pp, ep_size',
    [((1, 2), 1), ((1, 4), 1), ((2, 2), 1), ((1, 2), 2), ((1, 4), 2), ((2, 2), 2)],
)
def test_different_initialize_order_unconsistency(src_tp_pp, ep_size):
    Utils.initialize_model_parallel(
        *src_tp_pp, expert_model_parallel_size=ep_size, order='tp-ep-dp-pp'
    )

    tp_g = torch.distributed.get_process_group_ranks(ps.get_tensor_model_parallel_group())
    dp_g = torch.distributed.get_process_group_ranks(ps.get_data_parallel_group(False))
    pp_g = torch.distributed.get_process_group_ranks(ps.get_pipeline_model_parallel_group())
    cp_g = torch.distributed.get_process_group_ranks(ps.get_context_parallel_group())
    amax_g = torch.distributed.get_process_group_ranks(ps.get_amax_reduction_group(False))
    mp_g = torch.distributed.get_process_group_ranks(ps.get_model_parallel_group())

    Utils.destroy_model_parallel()

    Utils.initialize_model_parallel(
        *src_tp_pp, expert_model_parallel_size=ep_size, order='tp-pp-ep-dp'
    )
    assert tp_g == torch.distributed.get_process_group_ranks(ps.get_tensor_model_parallel_group())
    assert dp_g != torch.distributed.get_process_group_ranks(ps.get_data_parallel_group(False))
    assert pp_g != torch.distributed.get_process_group_ranks(ps.get_pipeline_model_parallel_group())
    assert cp_g == torch.distributed.get_process_group_ranks(ps.get_context_parallel_group())
    assert amax_g != torch.distributed.get_process_group_ranks(ps.get_amax_reduction_group(False))
    assert mp_g != torch.distributed.get_process_group_ranks(ps.get_model_parallel_group())

    Utils.destroy_model_parallel()


@pytest.mark.parametrize(
    'nodes, num_gpu, tp, pp, cp, ep',
    [
        (1, 1, 1, 1, 1, 1),
        (1, 8, 8, 1, 1, 1),
        (1, 8, 2, 2, 1, 1),
        (1, 8, 2, 4, 1, 1),
        (3, 8, 8, 3, 1, 1),
        (4, 8, 2, 4, 1, 1),
        (8, 8, 8, 8, 1, 1),
        (8, 8, 2, 1, 1, 4),
        (8, 8, 2, 2, 2, 4),
        (8, 8, 2, 1, 4, 8),
        (8, 8, 2, 2, 2, 8),
        (16, 8, 4, 8, 1, 1),
        (16, 8, 4, 8, 1, 4),
        (16, 8, 4, 8, 4, 1),
        (16, 8, 8, 8, 1, 1),
        (16, 8, 4, 8, 1, 1),
        (16, 8, 8, 8, 1, 1),
        (32, 8, 4, 8, 1, 1),
        (32, 8, 8, 8, 1, 1),
        (32, 8, 4, 8, 1, 4),
        (32, 8, 8, 8, 4, 1),
        (64, 8, 4, 2, 8, 8),
        (64, 8, 4, 8, 1, 1),
        (64, 8, 8, 8, 1, 1),
        (96, 8, 4, 8, 1, 1),
        (128, 8, 4, 2, 8, 8),
        (128, 8, 4, 8, 1, 1),
        (256, 8, 4, 8, 1, 1),
        (316, 8, 4, 8, 1, 1),
        (384, 8, 4, 8, 1, 1),
        (512, 8, 4, 8, 1, 1),
        (768, 8, 4, 8, 1, 1),
        (1024, 8, 4, 8, 1, 1),
        (1280, 8, 4, 8, 1, 1),
        (1344, 8, 4, 8, 1, 1),
    ],
)
def test_rank_generator_for_tp_dp_pp(nodes, num_gpu, tp, pp, cp, ep):
    def golden_rank_result_from_past_code(
        world_size: int,
        tensor_model_parallel_size: int = 1,
        pipeline_model_parallel_size: int = 1,
        context_parallel_size: int = 1,
        expert_model_parallel_size: int = 1,
    ):
        data_parallel_size: int = world_size // (
            tensor_model_parallel_size * pipeline_model_parallel_size * context_parallel_size
        )
        num_tensor_model_parallel_groups: int = world_size // tensor_model_parallel_size
        num_pipeline_model_parallel_groups: int = world_size // pipeline_model_parallel_size

        dp_groups = []
        dp_groups_with_cp = []

        all_data_parallel_group_ranks_with_cp = []
        for i in range(pipeline_model_parallel_size):
            start_rank = i * num_pipeline_model_parallel_groups
            end_rank = (i + 1) * num_pipeline_model_parallel_groups
            for j in range(context_parallel_size * tensor_model_parallel_size):
                ranks = range(
                    start_rank + j, end_rank, context_parallel_size * tensor_model_parallel_size
                )
                dp_groups.append(list(ranks))
            for j in range(tensor_model_parallel_size):
                ranks_with_cp = range(start_rank + j, end_rank, tensor_model_parallel_size)
                all_data_parallel_group_ranks_with_cp.append(list(ranks_with_cp))
                dp_groups_with_cp.append(list(ranks_with_cp))

        cp_group = []
        for i in range(pipeline_model_parallel_size):
            for j in range(data_parallel_size):
                start_rank = (
                    i * num_pipeline_model_parallel_groups
                    + j * tensor_model_parallel_size * context_parallel_size
                )
                end_rank = (
                    i * num_pipeline_model_parallel_groups
                    + (j + 1) * tensor_model_parallel_size * context_parallel_size
                )
                for k in range(tensor_model_parallel_size):
                    ranks = range(start_rank + k, end_rank, tensor_model_parallel_size)
                    cp_group.append(list(ranks))

        mp_group = []
        for i in range(data_parallel_size * context_parallel_size):
            ranks = [
                data_parallel_group_ranks_with_cp[i]
                for data_parallel_group_ranks_with_cp in all_data_parallel_group_ranks_with_cp
            ]
            mp_group.append(list(ranks))

        tp_group = []
        for i in range(num_tensor_model_parallel_groups):
            ranks = range(i * tensor_model_parallel_size, (i + 1) * tensor_model_parallel_size)
            tp_group.append(list(ranks))

        pp_group = []
        for i in range(num_pipeline_model_parallel_groups):
            ranks = range(i, world_size, num_pipeline_model_parallel_groups)
            pp_group.append(list(ranks))

        tp_dp_group = []
        tp_dp_cp_group = []
        tensor_and_data_group_size_with_cp: int = (
            tensor_model_parallel_size * data_parallel_size * context_parallel_size
        )
        num_tensor_and_data_groups_with_cp: int = world_size // tensor_and_data_group_size_with_cp
        for i in range(num_tensor_and_data_groups_with_cp):
            start_rank = i * tensor_and_data_group_size_with_cp
            end_rank = start_rank + tensor_and_data_group_size_with_cp
            ranks = range(start_rank, end_rank)
            tp_dp_cp_group.append(list(ranks))

            for j in range(context_parallel_size):
                ranks = []
                for k in range(data_parallel_size):
                    start_rank = (
                        i * tensor_and_data_group_size_with_cp
                        + j * tensor_model_parallel_size
                        + k * tensor_model_parallel_size * context_parallel_size
                    )
                    end_rank = start_rank + tensor_model_parallel_size
                    ranks = ranks + list(range(start_rank, end_rank))
                tp_dp_group.append(list(ranks))

        tp_ep_group = []
        dp_no_ep_group = []
        dp_no_ep_group_with_cp = []

        all_ranks = torch.arange(world_size).reshape(
            (
                pipeline_model_parallel_size,
                data_parallel_size // expert_model_parallel_size,
                expert_model_parallel_size,
                context_parallel_size,
                tensor_model_parallel_size,
            )
        )
        # 'pp edp ep cp tp -> (pp edp cp) (ep tp)'
        tp_ep_rearrange = torch.transpose(all_ranks, 2, 3)
        tp_ep_rearrange = torch.reshape(
            tp_ep_rearrange, (-1, expert_model_parallel_size * tensor_model_parallel_size)
        )
        tp_ep_rearrange = tp_ep_rearrange.tolist()
        tp_ep_rearrange.sort()
        for tensor_and_expert_parallel_ranks in tp_ep_rearrange:
            tensor_and_expert_parallel_ranks = list(tensor_and_expert_parallel_ranks)
            tensor_and_expert_parallel_ranks.sort()
            tp_ep_group.append(tensor_and_expert_parallel_ranks)
        # 'pp edp ep cp tp -> (pp ep cp tp) edp'
        edp_rearrange = torch.transpose(all_ranks, 1, 4)
        edp_rearrange = torch.reshape(
            edp_rearrange, (-1, data_parallel_size // expert_model_parallel_size)
        )
        edp_rearrange = edp_rearrange.tolist()
        edp_rearrange.sort()
        for expert_data_parallel_ranks in edp_rearrange:
            expert_data_parallel_ranks = list(expert_data_parallel_ranks)
            expert_data_parallel_ranks.sort()
            dp_no_ep_group.append(expert_data_parallel_ranks)
        # 'pp edp ep cp tp -> (pp ep tp) (cp edp)'
        edp_cp_rearrange = torch.transpose(all_ranks, 1, 2)
        edp_cp_rearrange = torch.transpose(edp_cp_rearrange, 2, 4)
        edp_cp_rearrange = torch.reshape(
            edp_cp_rearrange,
            (-1, context_parallel_size * data_parallel_size // expert_model_parallel_size),
        )
        edp_cp_rearrange = edp_cp_rearrange.tolist()
        edp_cp_rearrange.sort()
        for expert_data_parallel_ranksj_with_cp in edp_cp_rearrange:
            expert_data_parallel_ranksj_with_cp = list(expert_data_parallel_ranksj_with_cp)
            expert_data_parallel_ranksj_with_cp.sort()
            dp_no_ep_group_with_cp.append(expert_data_parallel_ranksj_with_cp)

        return (
            dp_groups,
            dp_groups_with_cp,
            cp_group,
            mp_group,
            tp_group,
            pp_group,
            tp_dp_group,
            tp_dp_cp_group,
            tp_ep_group,
            dp_no_ep_group,
            dp_no_ep_group_with_cp,
        )

    world_size = nodes * num_gpu
    dp = world_size // (tp * pp * cp)
    assert dp % ep == 0, f"dp size ({dp}) is not divisible by ep {ep} ."
    assert (
        world_size % (tp * pp * cp) == 0
    ), f"world_size ({world_size}) is not divisible by tp {tp} x pp {pp} x cp {cp}."
    (
        dp_groups,
        dp_groups_with_cp,
        cp_group,
        mp_group,
        tp_group,
        pp_group,
        tp_dp_group,
        tp_dp_cp_group,
        tp_ep_group,
        dp_no_ep_group,
        dp_no_ep_group_with_cp,
    ) = golden_rank_result_from_past_code(
        world_size=world_size,
        tensor_model_parallel_size=tp,
        pipeline_model_parallel_size=pp,
        context_parallel_size=cp,
        expert_model_parallel_size=ep,
    )
    rank_generator = ps.RankGenerator(tp=tp, ep=ep, dp=dp, pp=pp, cp=cp, order="tp-cp-ep-dp-pp")
    assert dp_groups == rank_generator.get_ranks(
        "dp"
    ), f"{dp_groups} != {rank_generator.get_ranks('dp')}"
    assert dp_groups_with_cp == rank_generator.get_ranks(
        'dp-cp'
    ), f"{dp_groups_with_cp} != {rank_generator.get_ranks('dp-cp')}"
    assert cp_group == rank_generator.get_ranks(
        "cp"
    ), f"{cp_group} != {rank_generator.get_ranks('cp')}."
    assert mp_group == rank_generator.get_ranks(
        "tp-pp"
    ), f"{mp_group} != {rank_generator.get_ranks('tp-pp')}"
    assert tp_group == rank_generator.get_ranks(
        "tp"
    ), f"{tp_group} != {rank_generator.get_ranks('tp')}"
    assert pp_group == rank_generator.get_ranks(
        "pp"
    ), f"{pp_group} != {rank_generator.get_ranks('pp')}"
    assert tp_dp_group == rank_generator.get_ranks(
        "tp-dp"
    ), f"{tp_dp_group} != {rank_generator.get_ranks('tp-dp')}"
    assert tp_dp_cp_group == rank_generator.get_ranks(
        "tp-dp-cp"
    ), f"{tp_dp_cp_group} != {rank_generator.get_ranks('tp-dp-cp')}"
    assert tp_ep_group == rank_generator.get_ranks(
        "tp-ep", independent_ep=True
    ), f"{tp_ep_group} != {rank_generator.get_ranks('tp-ep', independent_ep=True)}."
    assert dp_no_ep_group == rank_generator.get_ranks(
        "dp", independent_ep=True
    ), f"{dp_no_ep_group} != {rank_generator.get_ranks('dp', independent_ep=True)}."
    assert dp_no_ep_group_with_cp == rank_generator.get_ranks(
        "dp-cp", independent_ep=True
    ), f"{dp_no_ep_group_with_cp} != {rank_generator.get_ranks('dp-cp', independent_ep=True)}."