# Language Model Evaluation Harness Configuration File # # This YAML configuration file allows you to specify evaluation parameters # instead of passing them as command-line arguments. # # Usage: # $ lm_eval --config templates/example_ci_config.yaml # # You can override any values in this config with command-line arguments: # $ lm_eval --config templates/example_ci_config.yaml --model_args pretrained=gpt2 --tasks mmlu # # All parameters are optional and have the same meaning as their CLI counterparts. model: hf model_args: pretrained: EleutherAI/pythia-14m dtype: float16 tasks: - hellaswag - arc_easy batch_size: 1 device: mps trust_remote_code: true log_samples: true output_path: ./test gen_kwargs: do_sample: true temperature: 0.7 samples: hellaswag: [1,2,3,4,5,6,7,8,9,10] arc_easy: [10,20,30,40,50,60,70,80,90,100] metadata: name: Example CI Config description: This is an example configuration file for testing purposes.