Commit 2184b8de authored by lintangsutawika's avatar lintangsutawika
Browse files

Merge branch 'cont-metrics' of https://github.com/EleutherAI/lm-evaluation-harness into alt_worlds

parents b1ba4e71 1522009c
# Generated by utils.py
dataset_name: unnatural_in_context_learning_zero_shot
include: ../generate_until_template_yaml
task: bigbench_unnatural_in_context_learning_generate_until
# Generated by utils.py
dataset_name: vitaminc_fact_verification_zero_shot
include: ../generate_until_template_yaml
task: bigbench_vitaminc_fact_verification_generate_until
# Generated by utils.py
dataset_name: what_is_the_tao_zero_shot
include: ../generate_until_template_yaml
task: bigbench_what_is_the_tao_generate_until
# Generated by utils.py
dataset_name: which_wiki_edit_zero_shot
include: ../generate_until_template_yaml
task: bigbench_which_wiki_edit_generate_until
# Generated by utils.py
dataset_name: winowhy_zero_shot
include: ../generate_until_template_yaml
task: bigbench_winowhy_generate_until
# Generated by utils.py
dataset_name: word_sorting_zero_shot
include: ../generate_until_template_yaml
task: bigbench_word_sorting_generate_until
# Generated by utils.py
dataset_name: word_unscrambling_zero_shot
include: ../generate_until_template_yaml
task: bigbench_word_unscrambling_generate_until
group: bigbench
dataset_path: bigbench # will switch to `hails/bigbench` when all tasks are pushed
output_type: generate_until
dataset_kwargs:
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
# subtask_name: null
test_split: default
doc_to_text: inputs
doc_to_target: "{{targets[0]}}"
generation_kwargs:
max_length: 128
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_punctuation: true
- metric: !function aux_metric.token_edit_distance # pip install textdistance
aggregation: mean
higher_is_better: false
# Generated by utils.py
dataset_name: abstract_narrative_understanding_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_abstract_narrative_understanding_greedy_until
# Generated by utils.py
dataset_name: anachronisms_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_anachronisms_greedy_until
# Generated by utils.py
dataset_name: analogical_similarity_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_analogical_similarity_greedy_until
# Generated by utils.py
dataset_name: analytic_entailment_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_analytic_entailment_greedy_until
# Generated by utils.py
dataset_name: arithmetic_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_arithmetic_greedy_until
# Generated by utils.py
dataset_name: ascii_word_recognition_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_ascii_word_recognition_greedy_until
# Generated by utils.py
dataset_name: authorship_verification_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_authorship_verification_greedy_until
# Generated by utils.py
dataset_name: auto_categorization_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_auto_categorization_greedy_until
# Generated by utils.py
dataset_name: auto_debugging_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_auto_debugging_greedy_until
# Generated by utils.py
dataset_name: bbq_lite_json_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_bbq_lite_json_greedy_until
# Generated by utils.py
dataset_name: bridging_anaphora_resolution_barqa_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_bridging_anaphora_resolution_barqa_greedy_until
# Generated by utils.py
dataset_name: causal_judgment_zero_shot
include: ../greedy_until_template_yaml
task: bigbench_causal_judgment_greedy_until
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment