gpt2m-flash.yaml 423 Bytes
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
5
6
# @package _global_
defaults:
  - /experiment/owt/gpt2s-flash.yaml
  - override /model/gpt2model: gpt2-medium

# Can enable mlp_checkpoint_lvl to fit batch_size 32 to A100 40GB
Tri Dao's avatar
Tri Dao committed
7
8
9
# model:
#   config:
#     mlp_checkpoint_lvl: 1
Tri Dao's avatar
Tri Dao committed
10
11
12

datamodule:
  # batch_size: 32
Tri Dao's avatar
Tri Dao committed
13
  batch_size: ${eval:"8 if ${train.gpu_mem} < 24 else (16 if ${train.gpu_mem} < 40 else (32 if ${train.gpu_mem} < 80 else 64))"}
Tri Dao's avatar
Tri Dao committed
14
15
16
17

train:
  optimizer:
    lr: 1.5e-4