benchmark_moe.py 28.2 KB
Newer Older
1
2
3
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project

zhuwenwen's avatar
zhuwenwen committed
4
import argparse
5
import json
zhuwenwen's avatar
zhuwenwen committed
6
import time
7
from contextlib import nullcontext
zhuwenwen's avatar
zhuwenwen committed
8
from datetime import datetime
9
10
from itertools import product
from typing import Any, TypedDict, Optional
zhuwenwen's avatar
zhuwenwen committed
11
12
13
14
15
16

import ray
import torch
from ray.experimental.tqdm_ray import tqdm

from vllm.model_executor.layers.fused_moe.fused_moe import *
17
18
19
20
21
22
from vllm.transformers_utils.config import get_config
from vllm.triton_utils import triton
from vllm.utils import FlexibleArgumentParser

# 移除全局的 current_platform 导入,改为在需要时局部导入
# FP8_DTYPE = current_platform.fp8_dtype()
laibao's avatar
laibao committed
23
24
25
26
27
28
29
30
31


class BenchmarkConfig(TypedDict):
    BLOCK_SIZE_M: int
    BLOCK_SIZE_N: int
    BLOCK_SIZE_K: int
    GROUP_SIZE_M: int
    num_warps: int
    num_stages: int
32
    num_ldmatrixes: Optional[int]
zhuwenwen's avatar
zhuwenwen committed
33
34
35


def benchmark_config(
laibao's avatar
laibao committed
36
    config: BenchmarkConfig,
zhuwenwen's avatar
zhuwenwen committed
37
38
39
40
41
42
    num_tokens: int,
    num_experts: int,
    shard_intermediate_size: int,
    hidden_size: int,
    topk: int,
    dtype: torch.dtype,
laibao's avatar
laibao committed
43
44
    use_fp8_w8a8: bool,
    use_int8_w8a16: bool,
zhuwenwen's avatar
zhuwenwen committed
45
    num_iters: int = 100,
46
47
48
    block_quant_shape: list[int] = None,
    use_deep_gemm: bool = False,
    nn_moe: Optional[bool] = False
zhuwenwen's avatar
zhuwenwen committed
49
) -> float:
50
51
52
53
   
    from vllm.platforms import current_platform
    device = torch.cuda.current_device()
    
laibao's avatar
laibao committed
54
    init_dtype = torch.float16 if use_fp8_w8a8 else dtype
55
    x = torch.randn(num_tokens, hidden_size, dtype=dtype, device=device)
laibao's avatar
laibao committed
56
    if use_int8_w8a16:
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
        if not nn_moe:
            w1 = torch.randint(
                -127,
                127,
                (
                    num_experts,
                    shard_intermediate_size,
                    hidden_size,
                ),
                dtype=torch.int8,
                device=device,
            )
            w2 = torch.randint(
                -127,
                127,
                (
                    num_experts,
                    hidden_size,
                    shard_intermediate_size // 2,
                ),
                dtype=torch.int8,
                device=device,
            )
        else:
            w1 = torch.randint(
                -127,
                127,
                (
                    num_experts,
                    hidden_size,
                    shard_intermediate_size,
                ),
                dtype=torch.int8,
                device=device,
            )
            w2 = torch.randint(
                -127,
                127,
                (
                    num_experts,
                    shard_intermediate_size // 2,
                    hidden_size,
                ),
                dtype=torch.int8,
                device=device,
            )
laibao's avatar
laibao committed
103
    else:
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
        if not nn_moe:
            w1 = torch.randn(
                num_experts, shard_intermediate_size, hidden_size, dtype=init_dtype, device=device
            )
            w2 = torch.randn(
                num_experts, hidden_size, shard_intermediate_size // 2, dtype=init_dtype, device=device
            )
        else:
            w1 = torch.randn(
                num_experts, hidden_size, shard_intermediate_size, dtype=init_dtype, device=device
            )
            w2 = torch.randn(
                num_experts, shard_intermediate_size // 2, hidden_size, dtype=init_dtype, device=device
            )
    gating_output = torch.randn(num_iters, num_tokens, num_experts, dtype=torch.float32, device=device)
zhuwenwen's avatar
zhuwenwen committed
119
120
121
122
123

    w1_scale = None
    w2_scale = None
    a1_scale = None
    a2_scale = None
laibao's avatar
laibao committed
124
    if use_int8_w8a16:
125
126
127
128
        w1_scale = torch.randn(
            (num_experts, 2 * shard_intermediate_size), dtype=torch.float32, device=device
        )
        w2_scale = torch.randn((hidden_size, num_experts), dtype=torch.float32, device=device)
laibao's avatar
laibao committed
129
    if use_fp8_w8a8:
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
        if block_quant_shape:
            block_n, block_k = block_quant_shape[0], block_quant_shape[1]
            E = num_experts
            N = shard_intermediate_size // 2
            K = hidden_size
            factor_for_scale = 1e-2
            n_tiles_w1 = (2 * N + block_n - 1) // block_n
            n_tiles_w2 = (K + block_n - 1) // block_n
            k_tiles_w1 = (K + block_k - 1) // block_k
            k_tiles_w2 = (N + block_k - 1) // block_k
            w1_scale = (
                torch.rand((E, n_tiles_w1, k_tiles_w1), dtype=torch.float32, device=device)
                * factor_for_scale
            )
            w2_scale = (
                torch.rand((E, n_tiles_w2, k_tiles_w2), dtype=torch.float32, device=device)
                * factor_for_scale
            )
        else:
            w1_scale = torch.randn(num_experts, dtype=torch.float32, device=device)
            w2_scale = torch.randn(num_experts, dtype=torch.float32, device=device)
zhuwenwen's avatar
zhuwenwen committed
151

152
153
        a1_scale = torch.randn(1, dtype=torch.float32, device=device)
        a2_scale = torch.randn(1, dtype=torch.float32, device=device)
zhuwenwen's avatar
zhuwenwen committed
154

155
156
157
158
159
160
        # 获取 FP8_DTYPE
        FP8_DTYPE = current_platform.fp8_dtype()
        w1 = w1.to(FP8_DTYPE)
        w2 = w2.to(FP8_DTYPE)

    input_gating = torch.empty(num_tokens, num_experts, dtype=torch.float32, device=device)
zhuwenwen's avatar
zhuwenwen committed
161
162
163
164
165

    def prepare(i: int):
        input_gating.copy_(gating_output[i])

    def run():
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
        from vllm.model_executor.layers.fused_moe import override_config

        with override_config(config):
            if use_deep_gemm:
                topk_weights, topk_ids, token_expert_indices = fused_topk(
                    x, input_gating, topk, False
                )
                return fused_experts(
                    x,
                    w1,
                    w2,
                    topk_weights,
                    topk_ids,
                    inplace=True,
                    use_fp8_w8a8=use_fp8_w8a8,
                    w1_scale=w1_scale,
                    w2_scale=w2_scale,
                    a1_scale=a1_scale,
                    a2_scale=a2_scale,
                    block_shape=block_quant_shape,
                    allow_deep_gemm=True,
                    use_nn_moe=nn_moe,
                )
            else:
                fused_moe(
                    x,
                    w1,
                    w2,
                    input_gating,
                    topk,
                    renormalize=True,
                    inplace=True,
                    use_fp8_w8a8=use_fp8_w8a8,
                    use_int8_w8a16=use_int8_w8a16,
                    w1_scale=w1_scale,
                    w2_scale=w2_scale,
                    a1_scale=a1_scale,
                    a2_scale=a2_scale,
                    block_shape=block_quant_shape,
                    use_nn_moe=nn_moe,
                )
zhuwenwen's avatar
zhuwenwen committed
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221

    # JIT compilation & warmup
    run()
    torch.cuda.synchronize()

    # Capture 10 invocations with CUDA graph
    graph = torch.cuda.CUDAGraph()
    with torch.cuda.graph(graph):
        for _ in range(10):
            run()
    torch.cuda.synchronize()

    # Warmup
    for _ in range(5):
        graph.replay()
222
        # run()
zhuwenwen's avatar
zhuwenwen committed
223
224
225
226
227
    torch.cuda.synchronize()

    start_event = torch.cuda.Event(enable_timing=True)
    end_event = torch.cuda.Event(enable_timing=True)

228
    latencies: list[float] = []
zhuwenwen's avatar
zhuwenwen committed
229
230
231
232
233
234
    for i in range(num_iters):
        prepare(i)
        torch.cuda.synchronize()

        start_event.record()
        graph.replay()
235
        # run()
zhuwenwen's avatar
zhuwenwen committed
236
237
238
239
240
241
242
243
        end_event.record()
        end_event.synchronize()
        latencies.append(start_event.elapsed_time(end_event))
    avg = sum(latencies) / (num_iters * 10) * 1000  # us
    graph.reset()
    return avg


244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def get_rocm_tuning_space(use_fp16, nn_moe: Optional[bool] = False):
    block_m_range = [16, 32, 64, 128, 256]
    block_n_range = [32, 64, 128, 256]
    block_k_range = [32, 64, 128, 256]
    if not use_fp16:
        block_k_range.remove(16)  # BLOCK_K=16 not supported for fp8
    num_warps_range = [2, 4, 8]
    group_m_range = [1, 16, 32, 64]
    num_stage_range = [2, 3, 4, 5]
    # waves_per_eu_range = [0]
    # matrix_instr_nonkdim_range = [16, 32] if use_fp16 else []
    # kpack_range = [1, 2] if use_fp16 else []

    param_ranges = {
        "BLOCK_SIZE_M": block_m_range,
        "BLOCK_SIZE_N": block_n_range,
        "BLOCK_SIZE_K": block_k_range,
        "GROUP_SIZE_M": group_m_range,
        "num_warps": num_warps_range,
        "num_stages": num_stage_range,
        # "waves_per_eu": waves_per_eu_range,
    }
    if nn_moe:
        param_ranges["num_ldmatrixes"] = [1]
    
    # DCU currently does not support the following parameters
    # if use_fp16:
    #     param_ranges["matrix_instr_nonkdim"] = matrix_instr_nonkdim_range
    #     param_ranges["kpack"] = kpack_range

    return param_ranges


def get_configs_compute_bound(use_fp16, block_quant_shape, nn_moe: Optional[bool] = False) -> list[dict[str, int]]:
    configs: list[BenchmarkConfig] = []
    
    # 局部导入 current_platform
    from vllm.platforms import current_platform

    if current_platform.is_rocm():
        param_ranges = get_rocm_tuning_space(use_fp16, nn_moe)
    else:
        # Reduced search space for faster tuning.
        # TODO(woosuk): Increase the search space and use a performance model to
        # prune the search space.
        block_m_range = [16, 32, 64, 128, 256]
        block_n_range = [32, 64, 128, 256]
        block_k_range = [64, 128, 256]
        num_warps_range = [4, 8]
        group_m_range = [1, 16, 32, 64]
        num_stage_range = [2, 3, 4, 5]

        param_ranges = {
            "BLOCK_SIZE_M": block_m_range,
            "BLOCK_SIZE_N": block_n_range,
            "BLOCK_SIZE_K": block_k_range,
            "GROUP_SIZE_M": group_m_range,
            "num_warps": num_warps_range,
            "num_stages": num_stage_range,
        }

    keys, values = zip(*param_ranges.items())
    for config_values in product(*values):
        config = dict(zip(keys, config_values))
        configs.append(config)

    # Remove configs that are not compatible with fp8 block quantization
    # BLOCK_SIZE_K must be a multiple of block_k
    # BLOCK_SIZE_N must be a multiple of block_n
    if block_quant_shape is not None and not use_fp16:
        block_n, block_k = block_quant_shape[0], block_quant_shape[1]
        for config in configs[:]:
            if (
                config["BLOCK_SIZE_K"] % block_k != 0
                or config["BLOCK_SIZE_N"] % block_n != 0
            ):
                configs.remove(config)
zhuwenwen's avatar
zhuwenwen committed
321
322
323
    return configs


324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def prune_rocm_search_space(
    num_tokens, shard_intermediate_size, hidden_size, search_space, is_fp16, topk
):
    N1, K1 = shard_intermediate_size, hidden_size
    N2, K2 = hidden_size, shard_intermediate_size // 2
    pruned_space_1 = prune_rocm_configs(
        num_tokens * topk, N1, K1, search_space, is_fp16
    )
    pruned_space_2 = prune_rocm_configs(
        num_tokens * topk, N2, K2, search_space, is_fp16
    )
    search_space = merge_unique_dicts(pruned_space_1, pruned_space_2)
    return search_space


# The following code is inspired by ROCm/Triton GEMM tuning script:
# https://github.com/ROCm/triton/blob/triton-mlir/scripts/amd/gemm/tune_gemm.py#L89
def prune_rocm_configs(M, N, K, configs, is_fp16=True):
    pruned_configs = []
    elemBytes_a = 2 if is_fp16 else 1
    elemBytes_b = 2 if is_fp16 else 1

    mfma = 16 if M < 32 or N < 32 else 32

    # TODO (zhanglx): figure out the boundary between large and small gemms
    large_gemm = False
    if M >= 2048 and N >= 2048:
        large_gemm = True

    for config in configs:
        BLOCK_SIZE_M = config.get("BLOCK_SIZE_M")
        BLOCK_SIZE_N = config.get("BLOCK_SIZE_N")
        BLOCK_SIZE_K = config.get("BLOCK_SIZE_K")
        num_warps = config.get("num_warps")

        # DCU currently does not support matrix_instr_nonkdim param
        # if is_fp16:
        #     matrix_instr_nonkdim = config.get("matrix_instr_nonkdim")
        #     if matrix_instr_nonkdim > mfma:
        #         continue
        if mfma == 4 and BLOCK_SIZE_K < 64:
            continue
        # some layouts could not work properly in case
        # number elements per thread is less 1
        if BLOCK_SIZE_M * BLOCK_SIZE_N < 64:
            continue
        SPLIT_K = config.get("SPLIT_K", 1)
        GROUP_M = config.get("GROUP_SIZE_M")

        # DCU currently does not support matrix_instr_nonkdim param
        # if is_fp16:
        #     if (
        #         matrix_instr_nonkdim > BLOCK_SIZE_M
        #         or matrix_instr_nonkdim > BLOCK_SIZE_N
        #     ):
        #         continue
        #     if matrix_instr_nonkdim >= M and matrix_instr_nonkdim != BLOCK_SIZE_M:
        #         continue
        #     if matrix_instr_nonkdim >= N and matrix_instr_nonkdim != BLOCK_SIZE_N:
        #         continue
        
        # Skip BLOCK_SIZE that is too large compare to M/N
        # unless BLOCK_SIZE is already small enough
        if M * 2 < BLOCK_SIZE_M and BLOCK_SIZE_M != 16:
            continue
        if N * 2 < BLOCK_SIZE_N and BLOCK_SIZE_N != 16:
            continue
        # skip large split_k when not necessary
        if SPLIT_K != 1 and not need_split_k(M, N, K):
            continue
        # skip split_k that leads to EVEN_K = false
        leap = SPLIT_K * BLOCK_SIZE_K
        modv = K % leap
        if modv != 0:
            continue
        # skip large GROUP_M
        if GROUP_M * BLOCK_SIZE_M > M and GROUP_M != 1:
            continue
        # out of shared memory resource
        # TODO (zhanglx): This does not consider the LDS usage in the epilogue
        LDS = (
            BLOCK_SIZE_K * BLOCK_SIZE_M * elemBytes_a
            + BLOCK_SIZE_K * BLOCK_SIZE_N * elemBytes_b
        )
        if LDS > 65536:
            continue
        # Skip small block sizes and num_warps for large gemm
        # For fp16 and f8, we want to only use BLOCK_SIZE >= 64
        if large_gemm:
            if BLOCK_SIZE_M < 64 or BLOCK_SIZE_N < 64:
                continue
            if BLOCK_SIZE_K < 64:
                continue
            if num_warps < 4:
                continue

        pruned_configs.append(config)

    return pruned_configs


def need_split_k(SIZE_M, SIZE_N, SIZE_K):
    return (SIZE_M < 64 or SIZE_N < 64) and SIZE_K > 1024


def merge_unique_dicts(list1, list2):
    result = []
    combined_list = list1.copy()
    combined_list.extend(list2)
    for dictionary in combined_list:
        if dictionary not in result:
            result.append(dictionary)
    return result


zhuwenwen's avatar
zhuwenwen committed
439
440
@ray.remote(num_gpus=1)
class BenchmarkWorker:
441
442
443
444
445
446
447
448
449
450
451
    def __init__(self, seed: int, device_id: int) -> None:
        from vllm.platforms import current_platform
        import os
        
        if current_platform.is_rocm():
            # In ROCm environment with Ray, let Ray handle device assignment
            # Don't manually set default device as it may conflict with Ray's device mapping
            pass
        else:
            torch.set_default_device("cuda:"+ str(device_id))
        current_platform.seed_everything(seed)
zhuwenwen's avatar
zhuwenwen committed
452
        self.seed = seed
453
454
        # Store the logical device ID for Ray
        self.device_id = device_id
zhuwenwen's avatar
zhuwenwen committed
455
456
457
458
459
460
461
462
463

    def benchmark(
        self,
        num_tokens: int,
        num_experts: int,
        shard_intermediate_size: int,
        hidden_size: int,
        topk: int,
        dtype: torch.dtype,
laibao's avatar
laibao committed
464
465
        use_fp8_w8a8: bool,
        use_int8_w8a16: bool,
466
467
468
469
470
471
472
473
474
475
476
477
478
479
        block_quant_shape: list[int] = None,
        use_deep_gemm: bool = False,
        nn_moe: Optional[bool] = False,
    ) -> tuple[dict[str, int], float]:
        # 局部导入 current_platform
        from vllm.platforms import current_platform
        current_platform.seed_everything(self.seed)
        
        from vllm.model_executor.layers.fused_moe.fused_moe import (
            get_config_dtype_str, get_moe_configs, get_default_config
        )
        dtype_str = get_config_dtype_str(
            dtype, use_int8_w8a16=use_int8_w8a16, use_fp8_w8a8=use_fp8_w8a8
        )
zhuwenwen's avatar
zhuwenwen committed
480
481
        # NOTE(woosuk): The current naming convention uses w2.shape[2], which
        # is the intermediate size after silu_and_mul.
482
483
484
        op_config = get_moe_configs(
            num_experts, shard_intermediate_size // 2, dtype_str, use_nn_moe=nn_moe
        )
zhuwenwen's avatar
zhuwenwen committed
485
        if op_config is None:
486
487
488
489
490
491
492
493
494
495
            config = get_default_config(
                num_tokens,
                num_experts,
                shard_intermediate_size,
                hidden_size,
                topk,
                dtype_str,
                is_marlin=False,
                use_nn_moe=nn_moe
            )
zhuwenwen's avatar
zhuwenwen committed
496
        else:
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
            config = op_config[min(op_config.keys(), key=lambda x: abs(x - num_tokens))]
        kernel_time = benchmark_config(
            config,
            num_tokens,
            num_experts,
            shard_intermediate_size,
            hidden_size,
            topk,
            dtype,
            use_fp8_w8a8,
            use_int8_w8a16,
            num_iters=100,
            block_quant_shape=block_quant_shape,
            use_deep_gemm=use_deep_gemm,
            use_nn_moe=nn_moe
        )
zhuwenwen's avatar
zhuwenwen committed
513
514
515
516
517
518
519
520
521
522
        return config, kernel_time

    def tune(
        self,
        num_tokens: int,
        num_experts: int,
        shard_intermediate_size: int,
        hidden_size: int,
        topk: int,
        dtype: torch.dtype,
laibao's avatar
laibao committed
523
524
        use_fp8_w8a8: bool,
        use_int8_w8a16: bool,
525
526
527
528
529
530
531
532
        search_space: list[dict[str, int]],
        block_quant_shape: list[int],
        use_deep_gemm: bool,
        nn_moe: Optional[bool] = False,
    ) -> dict[str, int]:
        from vllm.platforms import current_platform
        import os
        
zhuwenwen's avatar
zhuwenwen committed
533
534
        best_config = None
        best_time = float("inf")
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
        if current_platform.is_rocm():
            is_fp16 = not (use_fp8_w8a8 or use_int8_w8a16)
            search_space = prune_rocm_search_space(
                num_tokens,
                shard_intermediate_size,
                hidden_size,
                search_space,
                is_fp16,
                topk,
            )

        # In ROCm environments with Ray, device context is already handled by Ray
        # Using torch.cuda.device() may cause device ordinal conflicts
        need_device_guard = False
        if current_platform.is_rocm():
            # For ROCm with Ray, skip additional device context management
            need_device_guard = False
        else:
            # For other platforms, use device guard if needed
            visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", None)
            if visible_devices is not None and len(visible_devices.split(',')) > 1:
                need_device_guard = True

        with torch.cuda.device(self.device_id) if need_device_guard else nullcontext():
            for config in tqdm(search_space):
                try:
                    kernel_time = benchmark_config(
                        config,
                        num_tokens,
                        num_experts,
                        shard_intermediate_size,
                        hidden_size,
                        topk,
                        dtype,
                        use_fp8_w8a8,
                        use_int8_w8a16,
                        num_iters=20,
                        block_quant_shape=block_quant_shape,
                        use_deep_gemm=use_deep_gemm,
                        nn_moe=nn_moe)
                except triton.runtime.autotuner.OutOfResources:
                    # Some configurations may be invalid and fail to compile.
                    continue

                if kernel_time < best_time:
                    best_time = kernel_time
                    best_config = config
zhuwenwen's avatar
zhuwenwen committed
582
583
        now = datetime.now()
        print(f"{now.ctime()}] Completed tuning for batch_size={num_tokens}")
laibao's avatar
laibao committed
584
        assert best_config is not None
zhuwenwen's avatar
zhuwenwen committed
585
586
587
        return best_config


laibao's avatar
laibao committed
588
def sort_config(config: BenchmarkConfig) -> BenchmarkConfig:
589

zhuwenwen's avatar
zhuwenwen committed
590
591
592
593
594
595
596
    return {
        "BLOCK_SIZE_M": config["BLOCK_SIZE_M"],
        "BLOCK_SIZE_N": config["BLOCK_SIZE_N"],
        "BLOCK_SIZE_K": config["BLOCK_SIZE_K"],
        "GROUP_SIZE_M": config["GROUP_SIZE_M"],
        "num_warps": config["num_warps"],
        "num_stages": config["num_stages"],
597
598
599
600
601
602
603
604
605
606
607
608
        **(
            {"num_ldmatrixes": config["num_ldmatrixes"]} if "num_ldmatrixes" in config else {}
        ),
        **(
            {"waves_per_eu": config["waves_per_eu"]} if "waves_per_eu" in config else {}
        ),
        **(
            {"matrix_instr_nonkdim": config["matrix_instr_nonkdim"]}
            if "matrix_instr_nonkdim" in config
            else {}
        ),
        **({"kpack": config["kpack"]} if "kpack" in config else {}),
zhuwenwen's avatar
zhuwenwen committed
609
610
611
    }


612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
def save_configs(
    configs: dict[int, BenchmarkConfig],
    num_experts: int,
    shard_intermediate_size: int,
    hidden_size: int,
    topk: int,
    dtype: torch.dtype,
    use_fp8_w8a8: bool,
    use_int8_w8a16: bool,
    block_quant_shape: list[int],
    use_nn_moe: Optional[bool] = False,
) -> None:
    from vllm.model_executor.layers.fused_moe.fused_moe import (
        get_config_dtype_str, get_config_file_name
    )
    
    dtype_str = get_config_dtype_str(
        dtype, use_int8_w8a16=use_int8_w8a16, use_fp8_w8a8=use_fp8_w8a8
    )
laibao's avatar
laibao committed
631

zhuwenwen's avatar
zhuwenwen committed
632
633
    # NOTE(woosuk): The current naming convention uses w2.shape[2], which
    # is the intermediate size after silu_and_mul.
634
635
636
    filename = get_config_file_name(
        num_experts, shard_intermediate_size // 2, dtype_str, block_quant_shape, use_nn_moe=use_nn_moe
    )
laibao's avatar
laibao committed
637

zhuwenwen's avatar
zhuwenwen committed
638
639
640
641
642
643
    print(f"Writing best config to {filename}...")
    with open(filename, "w") as f:
        json.dump(configs, f, indent=4)
        f.write("\n")


644
645
646
647
648
649
650
def get_weight_block_size_safety(config, default_value=None):
    quantization_config = getattr(config, "quantization_config", {})
    if isinstance(quantization_config, dict):
        return quantization_config.get("weight_block_size", default_value)
    return default_value


zhuwenwen's avatar
zhuwenwen committed
651
def main(args: argparse.Namespace):
652
653
654
655
656
657
    import os
    import logging

    from vllm.platforms import current_platform
    
    logger = logging.getLogger(__name__)
zhuwenwen's avatar
zhuwenwen committed
658
    print(args)
659
660
661
662
663
    
    tp_size = args.tp_size
    config = get_config(model=args.model, trust_remote_code=args.trust_remote_code)
    if args.model_prefix:
        config = getattr(config, args.model_prefix)
zhuwenwen's avatar
zhuwenwen committed
664
665
666
667
668

    if config.architectures[0] == "DbrxForCausalLM":
        E = config.ffn_config.moe_num_experts
        topk = config.ffn_config.moe_top_k
        intermediate_size = config.ffn_config.ffn_hidden_size
669
        shard_intermediate_size = 2 * intermediate_size // tp_size
laibao's avatar
laibao committed
670
671
672
673
    elif config.architectures[0] == "JambaForCausalLM":
        E = config.num_experts
        topk = config.num_experts_per_tok
        intermediate_size = config.intermediate_size
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
        shard_intermediate_size = 2 * intermediate_size // tp_size
    elif config.architectures[0] in ("DeepseekV3ForCausalLM", "DeepseekV2ForCausalLM", "Glm4MoeForCausalLM"):
        E = config.n_routed_experts
        topk = config.num_experts_per_tok
        intermediate_size = config.moe_intermediate_size
        shard_intermediate_size = 2 * intermediate_size // tp_size
    elif config.architectures[0] in ("Qwen2MoeForCausalLM", "Qwen3MoeForCausalLM"):
        E = config.num_experts
        topk = config.num_experts_per_tok
        intermediate_size = config.moe_intermediate_size
        shard_intermediate_size = 2 * intermediate_size // tp_size
    elif config.architectures[0] in ("Step3VLForConditionalGeneration"):
        E = config.text_config.moe_num_experts
        topk = config.text_config.moe_top_k
        intermediate_size = config.text_config.moe_intermediate_size
        shard_intermediate_size = 2 * intermediate_size // tp_size
zhuwenwen's avatar
zhuwenwen committed
690
    else:
691
692
        # Support for llama4
        config = config.get_text_config()
zhuwenwen's avatar
zhuwenwen committed
693
694
695
696
        # Default: Mixtral.
        E = config.num_local_experts
        topk = config.num_experts_per_tok
        intermediate_size = config.intermediate_size
697
        shard_intermediate_size = 2 * intermediate_size // tp_size
zhuwenwen's avatar
zhuwenwen committed
698
699

    hidden_size = config.hidden_size
700
    dtype = torch.float16 if current_platform.is_rocm() else config.torch_dtype
laibao's avatar
laibao committed
701
702
    use_fp8_w8a8 = args.dtype == "fp8_w8a8"
    use_int8_w8a16 = args.dtype == "int8_w8a16"
703
    block_quant_shape = get_weight_block_size_safety(config)
zhuwenwen's avatar
zhuwenwen committed
704
705
706

    if args.batch_size is None:
        batch_sizes = [
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
            1,
            2,
            4,
            8,
            16,
            24,
            32,
            48,
            64,
            96,
            128,
            256,
            512,
            1024,
            1536,
            2048,
            3072,
            4096,
zhuwenwen's avatar
zhuwenwen committed
725
726
        ]
    else:
727
        batch_sizes = args.batch_size
zhuwenwen's avatar
zhuwenwen committed
728

729
730
731
732
733
734
735
736
737
738
739
740
741
    use_deep_gemm = bool(args.use_deep_gemm)

    if current_platform.is_rocm() and "HIP_VISIBLE_DEVICES" in os.environ:
        # Ray will set ROCR_VISIBLE_DEVICES for device visibility
        logger.warning(
            "Ray uses ROCR_VISIBLE_DEVICES to control device accessibility."
            "Replacing HIP_VISIBLE_DEVICES with ROCR_VISIBLE_DEVICES."
        )
        val = os.environ["HIP_VISIBLE_DEVICES"]
        os.environ["ROCR_VISIBLE_DEVICES"] = val
        del os.environ["HIP_VISIBLE_DEVICES"]

    ray.init(address=None, ignore_reinit_error=True, num_gpus=args.num_gpus)
zhuwenwen's avatar
zhuwenwen committed
742
    num_gpus = int(ray.available_resources()["GPU"])
743
    workers = [BenchmarkWorker.remote(args.seed, i) for i in range(num_gpus)]
zhuwenwen's avatar
zhuwenwen committed
744

745
    def _distribute(method: str, inputs: list[Any]) -> list[Any]:
zhuwenwen's avatar
zhuwenwen committed
746
747
748
749
750
751
752
753
754
755
756
        outputs = []
        worker_idx = 0
        for input_args in inputs:
            worker = workers[worker_idx]
            worker_method = getattr(worker, method)
            output = worker_method.remote(*input_args)
            outputs.append(output)
            worker_idx = (worker_idx + 1) % num_gpus
        return ray.get(outputs)

    if args.tune:
757
758
        is_fp16 = not (use_fp8_w8a8 or use_int8_w8a16)
        search_space = get_configs_compute_bound(is_fp16, block_quant_shape, args.nn_moe)
zhuwenwen's avatar
zhuwenwen committed
759
760
761
762
        print(f"Start tuning over {len(search_space)} configurations...")

        start = time.time()
        configs = _distribute(
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
            "tune",
            [
                (
                    batch_size,
                    E,
                    shard_intermediate_size,
                    hidden_size,
                    topk,
                    dtype,
                    use_fp8_w8a8,
                    use_int8_w8a16,
                    search_space,
                    block_quant_shape,
                    use_deep_gemm,
                    args.nn_moe,
                )
                for batch_size in batch_sizes
            ],
        )
zhuwenwen's avatar
zhuwenwen committed
782
        best_configs = {
783
            M: sort_config(config) for M, config in zip(batch_sizes, configs)
zhuwenwen's avatar
zhuwenwen committed
784
        }
785
786
787
788
789
790
791
792
793
794
795
796
        save_configs(
            best_configs,
            E,
            shard_intermediate_size,
            hidden_size,
            topk,
            dtype,
            use_fp8_w8a8,
            use_int8_w8a16,
            block_quant_shape,
            use_nn_moe=args.nn_moe,
        )
zhuwenwen's avatar
zhuwenwen committed
797
798
799
        end = time.time()
        print(f"Tuning took {end - start:.2f} seconds")
    else:
laibao's avatar
laibao committed
800
        outputs = _distribute(
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
            "benchmark",
            [
                (
                    batch_size,
                    E,
                    shard_intermediate_size,
                    hidden_size,
                    topk,
                    dtype,
                    use_fp8_w8a8,
                    use_int8_w8a16,
                    block_quant_shape,
                    use_deep_gemm,
                    args.nn_moe,
                )
                for batch_size in batch_sizes
            ],
        )
zhuwenwen's avatar
zhuwenwen committed
819
820
821
822
823
824
825

        for batch_size, (config, kernel_time) in zip(batch_sizes, outputs):
            print(f"Batch size: {batch_size}, config: {config}")
            print(f"Kernel time: {kernel_time:.2f} us")


if __name__ == "__main__":
laibao's avatar
laibao committed
826
    parser = FlexibleArgumentParser()
827
828
829
830
831
832
833
834
835
836
    parser.add_argument(
        "--model", type=str, default="mistralai/Mixtral-8x7B-Instruct-v0.1"
    )
    parser.add_argument(
        "--tp-size", "-tp", "--tensor-parallel-size", type=int, default=2
    )
    parser.add_argument(
        "--dtype", type=str, choices=["auto", "fp8_w8a8", "int8_w8a16"], default="auto"
    )
    parser.add_argument("--use-deep-gemm", action="store_true")
zhuwenwen's avatar
zhuwenwen committed
837
    parser.add_argument("--seed", type=int, default=0)
838
    parser.add_argument("--batch-size", type=int, nargs="+", required=False)
zhuwenwen's avatar
zhuwenwen committed
839
    parser.add_argument("--tune", action="store_true")
840
841
842
843
    parser.add_argument("--nn-moe", action='store_true', default=False)
    parser.add_argument("--trust-remote-code", action="store_true")
    parser.add_argument("--model-prefix", type=str, required=False)
    parser.add_argument("--num-gpus", type=int, default=1)
zhuwenwen's avatar
zhuwenwen committed
844
845
846
    args = parser.parse_args()

    main(args)