amd_mi100_hpe.yaml 3.04 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# SuperBench Config
#
# Server:
#   - Product: HPE Apollo 6500

version: v0.2
superbench:
  enable: null
  var:
    default_local_mode: &default_local_mode
      enable: true
      modes:
        - name: local
          proc_num: 8
          prefix: HIP_VISIBLE_DEVICES={proc_rank}
          parallel: yes
    default_pytorch_mode: &default_pytorch_mode
      enable: true
      modes:
        - name: torch.distributed
          proc_num: 8
          node_num: 1
      frameworks:
        - pytorch
    common_model_config: &common_model_config
      duration: 0
      num_warmup: 64
      num_steps: 2048
      sample_count: 8192
      batch_size: 32
      precision:
        - float32
        - float16
      model_action:
        - train
      pin_memory: yes
  benchmarks:
    kernel-launch:
      <<: *default_local_mode
    rccl-bw:
      enable: true
      modes:
        - name: mpi
          proc_num: 8
          env:  
            NCCL_SOCKET_IFNAME: ens17f0 
            NCCL_IB_GDR_LEVEL: 1
      parameters:
        maxbytes: 128M
        minbytes: 32M
        iters: 50
        ngpus: 1
53
        operation: allreduce
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    mem-bw:
      <<: *default_local_mode
    gemm-flops:
      <<: *default_local_mode
      parameters:
        m: 7680 
        n: 8192 
        k: 8192
    ib-loopback:
      enable: true
      modes:
        - name: local
          proc_num: 4
          prefix: PROC_RANK={proc_rank} IB_DEVICES=0,1,2,3
          parallel: no
    disk-benchmark:
      enable: false
      modes:
        - name: local
          proc_num: 1
          parallel: no
      parameters:
        block_devices: []
    gpu-sm-copy-bw:
78
      enable: true
79
80
81
82
83
84
      modes:
        - name: local
          proc_num: 32
          prefix: CUDA_VISIBLE_DEVICES=$(({proc_rank}%8)) numactl -N $(({proc_rank}%4)) -m $(({proc_rank}%4))
          parallel: no
      parameters:
85
86
87
        mem_type:
          - dtoh
          - htod
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
    gpt_models:
      <<: *default_pytorch_mode
      models:
        - gpt2-large
      parameters:
        <<: *common_model_config
        batch_size: 8
        seq_len: 224
    bert_models:
      <<: *default_pytorch_mode
      models:
        - bert-base
        - bert-large
      parameters:
        <<: *common_model_config
        seq_len: 224
    lstm_models:
      <<: *default_pytorch_mode
      models:
        - lstm
      parameters:
        <<: *common_model_config
        batch_size: 224
        input_size: 224
        hidden_size: 1000
        seq_len: 32
        pin_memory: no
    resnet_models:
      <<: *default_pytorch_mode
      models:
        - resnet50
        - resnet101
        - resnet152
      parameters:
        <<: *common_model_config
        pin_memory: no
    densenet_models:
      <<: *default_pytorch_mode
      models:
        - densenet169
        - densenet201
      parameters:
        <<: *common_model_config
        pin_memory: no
    vgg_models:
      <<: *default_pytorch_mode
      models:
        - vgg11
        - vgg13
        - vgg16
        - vgg19
      parameters:
        <<: *common_model_config
        pin_memory: no