checkpoints: checkpoint_interval: 100000 checkpoints_path: /fsx/nouamane/projects/nanotron/examples/checkpoints checkpoints_path_is_shared_file_system: false resume_checkpoint_path: /fsx/nouamane/projects/nanotron/examples/checkpoints save_initial_state: true data_stages: - data: dataset: dataset_overwrite_cache: false dataset_processing_num_proc_per_process: 12 hf_dataset_config_name: null hf_dataset_or_datasets: roneneldan/TinyStories hf_dataset_splits: train text_column_name: text num_loading_workers: 1 seed: 42 name: Stable Training Stage start_training_step: 1 - data: dataset: dataset_overwrite_cache: false dataset_processing_num_proc_per_process: 12 hf_dataset_config_name: null hf_dataset_or_datasets: roneneldan/TinyStories hf_dataset_splits: train text_column_name: text num_loading_workers: 1 seed: 42 name: Annealing Phase start_training_step: 10 general: benchmark_csv_path: null consumed_train_samples: null ignore_sanity_checks: true project: moe run: llamoe seed: 42 step: null lighteval: null logging: iteration_step_info_interval: 1 log_level: info log_level_replica: info model: ddp_bucket_cap_mb: 25 dtype: bfloat16 init_method: std: 0.025 make_vocab_size_divisible_by: 1 model_config: bos_token_id: 1 eos_token_id: 2 hidden_act: silu hidden_size: 512 initializer_range: 0.02 intermediate_size: 2048 is_llamoe_config: true max_position_embeddings: 128 moe_capacity_factor: 1 moe_num_experts: 4 num_attention_heads: 8 num_experts_per_tok: 1 num_hidden_layers: 1 num_key_value_heads: 8 pad_token_id: null pretraining_tp: 1 rms_norm_eps: 1.0e-06 rope_scaling: null tie_word_embeddings: false use_cache: true vocab_size: 32000 optimizer: accumulate_grad_in_fp32: false clip_grad: 1.0 learning_rate_scheduler: learning_rate: 0.0003 lr_decay_starting_step: null lr_decay_steps: 1818 lr_decay_style: cosine lr_warmup_steps: 100 lr_warmup_style: linear min_decay_lr: 1.0e-05 optimizer_factory: adam_beta1: 0.9 adam_beta2: 0.95 adam_eps: 1.0e-08 name: adamW torch_adam_is_fused: true weight_decay: 0.01 zero_stage: 0 parallelism: dp: 1 expert_parallel_size: 2 pp: 1 pp_engine: 1f1b tp: 2 tp_linear_async_communication: false tp_mode: ALL_REDUCE profiler: null tokenizer: tokenizer_max_length: null tokenizer_name_or_path: meta-llama/Llama-2-7b-hf tokenizer_revision: null tokens: batch_accumulation_per_replica: 2 limit_test_batches: 0 limit_val_batches: 0 micro_batch_size: 256 sequence_length: 256 train_steps: 1918 val_check_interval: -1