base_model: $PATH_STORAGE/.cache/huggingface/hub/models--meta-llama--Meta-Llama-3.1-405B/snapshots/222de096204587406c7cadb3e0a101aade116279 tokenizer_type: AutoTokenizer strict: false plugins: - axolotl.integrations.liger.LigerPlugin liger_rope: true liger_rms_norm: true liger_swiglu: true liger_fused_linear_cross_entropy: true chat_template: llama3 datasets: - path: mlabonne/FineTome-100k type: chat_template split: train dataset_prepared_path: $PATH_STORAGE/axolotl/last_run_prepared val_set_size: 0.0 output_dir: $PATH_STORAGE/axolotl-artifacts/outputs/llama3_1-405b-finetome save_safetensors: false # saving final sharded dict may not work with safetensors wandb_project: llama-3.1-405b-fft-finetome wandb_entity: sequence_len: 4096 sample_packing: true pad_to_sequence_len: true gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 1.0e-5 train_on_inputs: false group_by_length: false bf16: true tf32: true gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false logging_steps: 1 flash_attention: true warmup_steps: 100 saves_per_epoch: 1 weight_decay: 0.1 fsdp_final_state_dict_type: SHARDED_STATE_DICT fsdp: - full_shard - auto_wrap fsdp_config: fsdp_limit_all_gathers: true fsdp_sync_module_states: true fsdp_offload_params: true fsdp_use_orig_params: false fsdp_cpu_ram_efficient_loading: true fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sharding_strategy: FULL_SHARD special_tokens: pad_token: <|finetune_right_pad_id|> eos_token: <|eot_id|>