Commit d99506f3 authored by chenzk's avatar chenzk
Browse files

v1.0.1

parent 61e92904
Pipeline #2033 canceled with stages
checkpoints:
checkpoint_interval: 10000
checkpoints_path: checkpoints
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: null
save_initial_state: false
data_stages:
- name: Stable Training Stage
start_training_step: 1
data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: roneneldan/TinyStories
hf_dataset_splits: train
text_column_name: text
num_loading_workers: 1
seed: 42
- name: Annealing Phase
start_training_step: 9000
data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: HuggingFaceH4/testing_alpaca_small
hf_dataset_splits: train
text_column_name: completion
num_loading_workers: 1
seed: 42
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: debug
run: llama_350m_mup
seed: 42
step: null
logging:
iteration_step_info_interval: 1
log_level: debug
log_level_replica: info
model:
ddp_bucket_cap_mb: 120
dtype: bfloat16
init_method:
use_mup: true
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 1
eos_token_id: 2
hidden_act: silu
initializer_range: 0.02
hidden_size: 1024
intermediate_size: 4096
num_hidden_layers: 14
is_llama_config: true
max_position_embeddings: 1024
num_attention_heads: 8
num_key_value_heads: 4
pad_token_id: null
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
tie_word_embeddings: false
use_cache: true
vocab_size: 49152
optimizer:
accumulate_grad_in_fp32: false
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.001
lr_decay_starting_step: null
lr_decay_steps: null
lr_decay_style: cosine
lr_warmup_steps: 100 # 10% warm up of total training steps
lr_warmup_style: linear
min_decay_lr: 1.0e-05
torch_adam_is_fused: true
weight_decay: 0.1
zero_stage: 0
parallelism:
dp: 4
pp: 1
pp_engine: 1f1b
tp: 2
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: gpt2
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 8
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 32
sequence_length: 1024
train_steps: 440
val_check_interval: -1
lighteval:
batch_size: 16
checkpoints_path: null
generation: null
logging:
hub_repo_details: null
hub_repo_results: null
# hub_repo_tensorboard: HuggingFaceBR4/fmom-mamba2
local_output_path: /fsx/phuc/new_workspace/experiments/mup_for_mamba2/test_mamba350M_tp4_917cfc66/logs
push_details_to_hub: null
push_results_to_hub: null
push_results_to_tensorboard: true
tensorboard_metric_prefix: e
parallelism:
dp: 2
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
tp: 2
tp_linear_async_communication: false
tp_mode: ALL_REDUCE
# slurm_script_dir: /fsx/phuc/new_workspace/experiments/mup_for_mamba2/test_mamba350M_tp4_917cfc66/lighteval/slurm_scripts
# slurm_template: /fsx/phuc/new_workspace/experiments/mup_for_mamba2/test_mamba350M_tp4_917cfc66/run_eval.slurm.jinja
tasks:
# custom_tasks: brrr.lighteval.custom_tasks
dataset_loading_processes: 8
max_samples: 1000
multichoice_continuations_start_space: null
no_multichoice_continuations_start_space: null
num_fewshot_seeds: null
tasks: early-signal
wandb: null
checkpoints:
checkpoint_interval: 10000
checkpoints_path: checkpoints
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: null
save_initial_state: false
data_stages:
- name: Stable Training Stage
start_training_step: 1
data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: roneneldan/TinyStories
hf_dataset_splits: train
text_column_name: text
num_loading_workers: 1
seed: 42
- name: Annealing Phase
start_training_step: 9000
data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: HuggingFaceH4/testing_alpaca_small
hf_dataset_splits: train
text_column_name: completion
num_loading_workers: 1
seed: 42
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: debug
run: llama_350m_sp
seed: 42
step: null
lighteval: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 120
dtype: bfloat16
init_method:
std: 0.03125 # 1/sqrt(1024)=0.022097086912079608
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 1
eos_token_id: 2
hidden_act: silu
initializer_range: 0.02
hidden_size: 1024
intermediate_size: 4096
num_hidden_layers: 14
is_llama_config: true
max_position_embeddings: 1024
num_attention_heads: 8
num_key_value_heads: 4
pad_token_id: null
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
tie_word_embeddings: false
use_cache: true
vocab_size: 49152
optimizer:
accumulate_grad_in_fp32: false
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.001
lr_decay_starting_step: null
lr_decay_steps: null
lr_decay_style: cosine
lr_warmup_steps: 100 # 10% warm up of total training steps
lr_warmup_style: linear
min_decay_lr: 1.0e-05
torch_adam_is_fused: true
weight_decay: 0.1
zero_stage: 0
parallelism:
dp: 4
pp: 1
pp_engine: 1f1b
tp: 2
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: gpt2
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 8
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 32
sequence_length: 1024
train_steps: 440
val_check_interval: -1
#!/bin/bash
# Simple script to create a tiny llama model and train it
set -e -x
# Create the YAML config file
EXAMPLE_PATH=$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)
REPO_PATH=$(dirname $EXAMPLE_PATH)
python $EXAMPLE_PATH/config_tiny_llama.py
# Setup from environment variables
export CUDA_DEVICE_MAX_CONNECTIONS=1
export FI_PROVIDER="efa"
python -u -m torch.distributed.run \
--nproc_per_node 8 \
--nnodes 1 \
--rdzv_backend c10d \
--max_restarts 0 \
--tee 3 \
$REPO_PATH/run_train.py --config-file $EXAMPLE_PATH/config_tiny_llama.yaml
wget https://mirror.ghproxy.com/https://github.com/git-lfs/git-lfs/releases/download/v3.5.1/git-lfs-linux-amd64-v3.5.1.tar.gz
tar -xzvf git-lfs-linux-amd64-v3.5.1.tar.gz
./git-lfs-3.5.1/install.sh
rm -rf git-lfs-3.5.1 git-lfs-linux-amd64-v3.5.1.tar.gz
icon.png

53.8 KB

torchrun --nproc_per_node=1 run_generate.py --ckpt-path checkpoints/10/ --tp 1 --pp 1
#!/bin/bash
#SBATCH --job-name=smollm1-135M
#SBATCH --nodes=4
#SBATCH --gres=gpu:8
#SBATCH --qos=high
#SBATCH --output=./logs/train-%j.out
#SBATCH --error=./logs/train-%j.err
set -e
TRAINER_PYTHON_FILE="run_train.py"
CONFIG_PATH_YAML="smollm1/config_smollm1_135M.yaml"
nvidia-smi
# Show some environment variables
echo python3 version = `python3 --version`
echo "Python path: $(which python3)"
echo "NCCL version: $(python -c "import torch;print(torch.cuda.nccl.version())")"
echo "CUDA version: $(python -c "import torch;print(torch.version.cuda)")"
echo "START TIME: $(date)"
secs_to_human() {
echo "$(( ${1} / 3600 )):$(( (${1} / 60) % 60 )):$(( ${1} % 60 ))"
}
start=$(date +%s)
echo "$(date -d @${start} "+%Y-%m-%d %H:%M:%S"): ${SLURM_JOB_NAME} start id=${SLURM_JOB_ID}\n"
# SLURM stuff
export HOSTNAMES=`scontrol show hostnames "$SLURM_JOB_NODELIST"`
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_PORT=6000
export COUNT_NODE=`scontrol show hostnames "$SLURM_JOB_NODELIST" | wc -l`
export CUDA_DEVICE_MAX_CONNECTIONS="1"
echo "Number of nodes: $COUNT_NODE"
echo "Hostnames: $HOSTNAMES"
CMD=" $TRAINER_PYTHON_FILE \
--config-file $CONFIG_PATH_YAML \
"
export LAUNCHER="torchrun \
--nproc_per_node 8 \
--nnodes $COUNT_NODE \
--node_rank $SLURM_PROCID \
--role $SLURMD_NODENAME: \
--max_restarts 0 \
--tee 3 \
"
# Wait a random number between 0 and 1000 (milliseconds) to avoid too many concurrent requests to the hub
random_milliseconds=$(( RANDOM % 1001 ))
sleep_time=$(bc <<< "scale=3; $random_milliseconds / 1000")
echo "Sleeping for $sleep_time seconds..."
sleep $sleep_time
srun $SRUN_ARGS -u bash -c "$LAUNCHER --node_rank $SLURM_PROCID --role $SLURMD_NODENAME: $CMD"
echo "END TIME: $(date)"
\ No newline at end of file
# 模型编码
modelCode=1128
# 模型名称
modelName=nanotron_pytorch
# 模型描述
modelDescription=彻底开源预训练大模型,本项目能够预训练出超出qwen2.5、llama3效果的大语言模型,为一些人工智能大厂的训练代码。
# 应用场景
appScenario=推理,训练,对话问答,制造,广媒,金融,能源,医疗,家居,教育
# 框架类型
frameType=pytorch
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment