checkpoints: checkpoint_interval: 1000 checkpoints_path: checkpoints/ checkpoints_path_is_shared_file_system: false resume_checkpoint_path: null save_initial_state: false data_stages: - data: dataset: dataset_folder: datasets/c4-es/tokenized num_loading_workers: 1 seed: 42 name: General purpose training (Single dataset) start_training_step: 1 - data: dataset: dataset_folder: - datasets/SlimPajama-6B/tokenized - datasets/c4-es/tokenized num_loading_workers: 1 seed: 42 name: Second purpose training (> 1 dataset) start_training_step: 15 - data: dataset: dataset_folder: datasets/SlimPajama-6B/tokenized: 0.8 datasets/c4-es/tokenized: 0.2 num_loading_workers: 1 seed: 42 name: Third purpose training (Blended dataset) start_training_step: 25 general: benchmark_csv_path: null consumed_train_samples: null ignore_sanity_checks: true project: Nanoset run: llama seed: 42 step: null lighteval: null logging: iteration_step_info_interval: 1 log_level: info log_level_replica: info model: ddp_bucket_cap_mb: 25 dtype: bfloat16 init_method: std: 0.025 make_vocab_size_divisible_by: 1 model_config: bos_token_id: 1 eos_token_id: 2 hidden_act: silu hidden_size: 16 initializer_range: 0.02 intermediate_size: 64 is_llama_config: true max_position_embeddings: 1024 num_attention_heads: 4 num_hidden_layers: 2 num_key_value_heads: 4 pad_token_id: null pretraining_tp: 1 rms_norm_eps: 1.0e-05 rope_scaling: null tie_word_embeddings: true use_cache: true vocab_size: 50257 optimizer: accumulate_grad_in_fp32: true clip_grad: 1.0 learning_rate_scheduler: learning_rate: 0.0003 lr_decay_starting_step: null lr_decay_steps: 98 lr_decay_style: cosine lr_warmup_steps: 2 lr_warmup_style: linear min_decay_lr: 1.0e-05 optimizer_factory: adam_beta1: 0.9 adam_beta2: 0.95 adam_eps: 1.0e-08 name: adamW torch_adam_is_fused: true weight_decay: 0.01 zero_stage: 0 parallelism: dp: 1 expert_parallel_size: 1 pp: 1 pp_engine: 1f1b tp: 1 tp_linear_async_communication: true tp_mode: REDUCE_SCATTER profiler: null tokenizer: tokenizer_max_length: null tokenizer_name_or_path: gpt2 tokenizer_revision: null tokens: batch_accumulation_per_replica: 1 limit_test_batches: 0 limit_val_batches: 0 micro_batch_size: 2 sequence_length: 1024 train_steps: 200 val_check_interval: -1