gpt3xl-flash.yaml 800 Bytes
Newer Older
Tri Dao's avatar
Tri Dao committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# @package _global_
defaults:
  - /experiment/pile/gpt3s-flash.yaml
  - override /optimizer: adamw-zero

model:
  config:
    n_embd: 2048
    n_head: 16
    n_layer: 24

datamodule:
  batch_size: ${eval:"1 if ${train.gpu_mem} < 24 else (2 if ${train.gpu_mem} < 40 else (4 if ${train.gpu} < 80 else 8))"}

train:
  global_batch_size: 512
  optimizer:
    lr: 2.0e-4
  scheduler:
    t_initial: 300000

trainer:
  strategy:
    _target_: src.utils.ddp_zero1.DDPStrategyZero1
    find_unused_parameters: False
    gradient_as_bucket_view: True
  max_steps: 400000
  val_check_interval: ${eval:1000 * ${.accumulate_grad_batches}}

callbacks:
  model_checkpoint:
    every_n_train_steps: 1000
  model_checkpoint_progress:
    every_n_train_steps: 12500
    fault_tolerant: False  # Saving takes too long