gpt3s-flash.yaml 473 Bytes
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
5
6
7
8
9
# @package _global_
defaults:
  - /experiment/pile/base.yaml
  - override /model: gpt2
  - override /model/gpt2model: gpt2-small

model:
  config:
    # n_positions is already set to ${datamodule.max_length}
10
    residual_in_fp32: True
Tri Dao's avatar
Tri Dao committed
11
12
    use_flash_attn: True
    fused_dropout_add_ln: True
13
    fused_mlp: True
Tri Dao's avatar
Tri Dao committed
14
15
16
17
18
    fused_bias_fc: True
    pad_vocab_size_multiple: 8

datamodule:
  batch_size: ${eval:"8 if ${train.gpu_mem} < 24 else (16 if ${train.gpu_mem} < 40 else 32)"}