type: basic format_version: 1 maintainers: [maanug] loggers: [stdout] spec: name: "{model}_{variant}_{scope}_{platforms}_{nodes}N{gpus}G_\ {'mcore_' if use_mcore else ''}{'te_' if use_te else ''}\ tp{tp_size}_pp{pp_size}{'_vp'+str(vp_size) if vp_size else ''}\ {'_resume_'+str(ckpt_format) if ckpt_resume else ''}\ {'_'+args_meta if args_meta else ''}" model: gpt3 variant: 345m build: mcore-pyt scope: nightly nodes: 1 gpus: 8 platforms: dgx_a100 use_te: False use_mcore: True vp_size: null extra_args: null args_meta: null micro_batch_size: 4 # MBS batch_size: 32 # GBS, JET schema requires 'batch_size' moe_grouped_gemm: 0 time_limit: 1200 artifacts: {/workspace/data/gpt3_data: text/the_pile/shard00} ckpt_format: torch ckpt_resume: 0 script: |- ls cd /workspace/megatron-lm ./tests/functional_tests/test_scripts/gpt3/pretrain_gpt3_distributed_test.sh \ DATA_PATH=/workspace/data/gpt3_data/my-gpt3_00_text_document \ CHECKPOINT_PATH=/workspace/checkpoints \ TENSORBOARD_DIR={assets_dir} \ VOCAB_FILE=/workspace/data/gpt3_data/bpe/vocab.json \ MERGE_FILE=/workspace/data/gpt3_data/bpe/merges.txt \ DATA_CACHE=/workspace/data/index-cache \ USE_TE={"1" if use_te else "0"} \ TP_SIZE={tp_size} \ PP_SIZE={pp_size} \ NUM_NODES={nodes} \ MAX_STEPS={100 if ckpt_resume else 50} \ USE_CORE={"1" if use_mcore else "0"} \ VP_SIZE={vp_size if vp_size is not None else '""'} \ MBS={micro_batch_size} \ GBS={batch_size} \ MOE_GROUPED_GEMM={moe_grouped_gemm} \ CKPT_FORMAT={ckpt_format} \ CHECKPOINT_RESUME_TEST={ckpt_resume} \ JOB_NAME={name} \ ADDITIONAL_PARAMS={extra_args if extra_args is not None else '""'} products: - {use_mcore: [True], tp_size: [4], pp_size: [1], ckpt_resume: [0, 1], ckpt_format: [torch_dist]} - {use_mcore: [False], tp_size: [4], pp_size: [1], ckpt_resume: [0, 1]} - {use_mcore: [True], tp_size: [4], pp_size: [1], ckpt_resume: [1]} - {use_mcore: [True], tp_size: [1], pp_size: [2,4], ckpt_resume: [0, 1], ckpt_format: [torch_dist]} - {use_mcore: [False], tp_size: [1], pp_size: [2,4], ckpt_resume: [0, 1]} - {tp_size: [2], pp_size: [2], ckpt_resume: [0, 1], ckpt_format: [torch_dist], extra_args: ['"--num-experts 2 --sequence-parallel --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_2experts"]} - {tp_size: [2], pp_size: [2], ckpt_resume: [0, 1], ckpt_format: [torch_dist], extra_args: ['"--sequence-parallel --num-experts 4 --expert-model-parallel-size 2 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["te_4experts2parallel"]} - {tp_size: [1], pp_size: [1], ckpt_resume: [0, 1], ckpt_format: [torch_dist], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce --overlap-param-gather"'], args_meta: ["dist_optimizer_overlap_grad_reduce_param_gather"]} # Non-MCore - {use_mcore: [False], tp_size: [1,4], pp_size: [1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} - {use_mcore: [False], tp_size: [1], pp_size: [1], extra_args: ['"--use-distributed-optimizer --overlap-grad-reduce"'], args_meta: ["dist_optimizer_overlap_grad_reduce"]} - {use_mcore: [False], tp_size: [1], pp_size: [4], vp_size: [null, 1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} - {use_mcore: [False], tp_size: [2], pp_size: [2], ckpt_resume: [0, 1], extra_args: ["--overlap-grad-reduce"], args_meta: ["overlap_grad_reduce"]} - {use_mcore: [False], tp_size: [2], pp_size: [2], ckpt_resume: [0, 1], extra_args: ['"--sequence-parallel --num-experts 4 --moe-router-load-balancing-type sinkhorn --moe-router-topk 1"'], args_meta: ["4experts"]}