pytorch_bert_large.py 1.52 KB
Newer Older
1
2
3
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

4
"""Model benchmark example for bert-large (24-layer, 1024-hidden, 16-heads, 340M parameters).
5
6
7

Commands to run:
  python3 examples/benchmarks/pytorch_bert_large.py (Single GPU)
8
9
  python3 -m torch.distributed.launch --use_env --nproc_per_node=8 examples/benchmarks/pytorch_bert_large.py \
      --distributed (Distributed)
10
"""
11

12
13
14
import argparse

from superbench.benchmarks import Platform, Framework, BenchmarkRegistry
15
16
17
from superbench.common.utils import logger

if __name__ == '__main__':
18
19
20
21
22
23
24
25
26
27
28
29
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--distributed', action='store_true', default=False, help='Whether to enable distributed training.'
    )
    args = parser.parse_args()

    # Specify the model name and benchmark parameters.
    model_name = 'bert-large'
    parameters = '--batch_size 1 --duration 120 --seq_len 128 --precision float32 --run_count 2'
    if args.distributed:
        parameters += ' --distributed_impl ddp --distributed_backend nccl'

30
    # Create context for bert-large benchmark and run it for 120 * 2 seconds.
31
    context = BenchmarkRegistry.create_benchmark_context(
32
        model_name, platform=Platform.CUDA, parameters=parameters, framework=Framework.PYTORCH
33
34
    )

35
36
37
38
39
    benchmark = BenchmarkRegistry.launch_benchmark(context)
    if benchmark:
        logger.info(
            'benchmark: {}, return code: {}, result: {}'.format(
                benchmark.name, benchmark.return_code, benchmark.result
40
            )
41
        )