Commit 4a6eaa9f authored by Tri Dao's avatar Tri Dao
Browse files

Update configs, add results

parent 0bf5e500
# @package _global_
defaults:
- /experiment/pile/gpt3s-hf.yaml
model:
config:
n_embd: 1536
n_head: 16
n_layer: 24
datamodule:
batch_size: 2
train:
optimizer:
lr: 2.5e-4
......@@ -9,7 +9,7 @@ defaults:
# mlp_checkpoint_lvl: 1
datamodule:
batch_size: ${eval:"4 if ${train.gpu_mem} < 24 else (8 if ${train.gpu_mem} < 40 else 16)"}
batch_size: ${eval:"4 if ${train.gpu_mem} < 24 else (8 if ${train.gpu_mem} < 40 else (16 if ${train.gpu_mem} < 80 else 32))"}
train:
optimizer:
......
# @package _global_
defaults:
- /experiment/pile/gpt3s-hf.yaml
- override /model/gpt2model: gpt2-medium
datamodule:
batch_size: 4
train:
optimizer:
lr: 3.0e-4
# @package _global_
defaults:
- /experiment/pile/base.yaml
- override /model: gpt2-hf
- override /model/gpt2model: gpt2-small
datamodule:
batch_size: 8
train:
# Use the standard torch.nn.CrossEntropyLoss
loss_fn: null
# @package _global_
defaults:
- /experiment/pile/gpt2xl-flash.yaml
- /experiment/pile/gpt3xl-flash.yaml
datamodule:
max_length: 8192
......
# @package _global_
defaults:
- /experiment/pile/gpt2xl-flash-rotary.yaml
- /experiment/pile/gpt3xl-flash-rotary.yaml
trainer:
max_steps: 60000
......
# @package _global_
defaults:
- /experiment/pile/gpt2xl-flash-8k.yaml
- /experiment/pile/gpt3xl-flash-8k.yaml
model:
config:
......
# @package _global_
defaults:
- /experiment/pile/gpt2xl-flash.yaml
- /experiment/pile/gpt3xl-flash.yaml
model:
config:
......
......@@ -10,7 +10,7 @@ model:
n_layer: 24
datamodule:
batch_size: ${eval:"1 if ${train.gpu_mem} < 24 else (2 if ${train.gpu_mem} < 40 else (4 if ${train.gpu} < 80 else 8))"}
batch_size: ${eval:"1 if ${train.gpu_mem} < 24 else (2 if ${train.gpu_mem} < 40 else (4 if ${train.gpu_mem} < 80 else 8))"}
train:
global_batch_size: 512
......
# @package _global_
defaults:
- /experiment/pile/gpt3s-hf.yaml
- override /optimizer: adamw-zero
model:
config:
n_embd: 2048
n_head: 16
n_layer: 24
datamodule:
batch_size: 2
train:
global_batch_size: 512
optimizer:
lr: 2.0e-4
scheduler:
t_initial: 300000
trainer:
strategy:
_target_: src.utils.ddp_zero1.DDPStrategyZero1
find_unused_parameters: False
gradient_as_bucket_view: True
max_steps: 400000
val_check_interval: ${eval:1000 * ${.accumulate_grad_batches}}
callbacks:
model_checkpoint:
every_n_train_steps: 1000
model_checkpoint_progress:
every_n_train_steps: 12500
fault_tolerant: False # Saving takes too long
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment