base_model: axolotl-quants/Llama-4-Scout-17B-16E-Linearized-bnb-nf4-bf16 model_type: Llama4ForConditionalGeneration # Automatically upload checkpoint and final model to HF # hub_model_id: username/custom_model_name plugins: - axolotl.integrations.liger.LigerPlugin - axolotl.integrations.cut_cross_entropy.CutCrossEntropyPlugin liger_glu_activation: true liger_rms_norm: true liger_layer_norm: true llama4_linearized_experts: true # needed with custom linearized experts model load_in_4bit: true adapter: qlora lora_r: 32 lora_alpha: 64 lora_target_modules: - self_attn.q_proj - self_attn.k_proj - self_attn.v_proj - self_attn.o_proj - shared_expert.gate_proj - shared_expert.up_proj - shared_expert.down_proj # - experts.gate_projs.[0-9]+$ # optionally train the moe experts # - experts.up_projs.[0-9]+$ # - experts.down_projs.[0-9]+$ lora_modules_to_save: # - lm_head # needed if modifying vocabulary # - embed_tokens lora_mlp_kernel: true lora_qkv_kernel: true lora_o_kernel: true chat_template: llama4 datasets: - path: mlabonne/FineTome-100k type: chat_template split: train[:20%] field_messages: conversations message_property_mappings: role: from content: value dataset_prepared_path: last_run_prepared val_set_size: 0.0 output_dir: ./outputs/out sequence_len: 4096 # up to 8k will work on a single H100 sample_packing: true pad_to_sequence_len: true gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 1 optimizer: adamw_torch_4bit lr_scheduler: cosine learning_rate: 1e-4 bf16: true tf32: true torch_compile: true flex_attention: true flex_attn_compile_kwargs: dynamic: false mode: max-autotune-no-cudagraphs gradient_checkpointing: offload gradient_checkpointing_kwargs: use_reentrant: false logging_steps: 1 warmup_steps: 20 evals_per_epoch: 1 saves_per_epoch: 1 weight_decay: 0.0 special_tokens: pad_token: <|finetune_right_pad_id|> eos_token: <|eot|>