"vscode:/vscode.git/clone" did not exist on "1acd2ef5d2695fcb1ae065e9aa1f1a1b2a0c6098"
Unverified Commit 5a766ac5 authored by farzanehnakhaee70's avatar farzanehnakhaee70 Committed by GitHub
Browse files

Merge branch 'big-refactor' into big-refactor

parents a0c1dbbd 01cfb2ff
...@@ -6,7 +6,8 @@ import numpy as np ...@@ -6,7 +6,8 @@ import numpy as np
from tqdm import tqdm from tqdm import tqdm
from lm_eval import utils from lm_eval import utils
from lm_eval.api.model import LM, register_model from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
def get_result(response, ctxlen): def get_result(response, ctxlen):
...@@ -56,7 +57,7 @@ def oa_completion(**kwargs): ...@@ -56,7 +57,7 @@ def oa_completion(**kwargs):
backoff_time *= 1.5 backoff_time *= 1.5
@register_model("openai") @register_model("openai", "gooseai")
class GPT3LM(LM): class GPT3LM(LM):
REQ_CHUNK_SIZE = 20 REQ_CHUNK_SIZE = 20
......
...@@ -16,7 +16,8 @@ import os ...@@ -16,7 +16,8 @@ import os
import requests as _requests import requests as _requests
import time import time
from tqdm import tqdm from tqdm import tqdm
from lm_eval.api.model import LM, register_model from lm_eval.api.model import LM
from lm_eval.api.registry import register_model
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
# v1.0 Tasks
This list keeps track of which tasks' implementations have been ported to YAML / v2.0 of the Eval Harness.
Boxes should be checked iff tasks are implemented in v2.0 and tested for regression. Tasks should be struck through if checked *against original introducing paper* implementation or popularizing implementation.
- [ ] Glue
- [ ] SuperGlue
- [ ] CoQA
- [ ] DROP
- [x] ~~Lambada~~
- [x] Lambada (Cloze variants)
- [ ] Lambada (Multilingual)
- [x] Wikitext
- [x] PiQA
- [ ] PROST
- [ ] MCTACO
- [ ] Pubmed QA
- [x] SciQ
- [ ] QASPER
- [ ] QA4MRE
- [ ] TriviaQA
- [x] AI2 ARC
- [ ] LogiQA
- [ ] HellaSwag
- [ ] SWAG
- [ ] OpenBookQA
- [ ] SQuADv2
- [ ] RACE
- [ ] HeadQA
- [ ] MathQA
- [ ] WebQs
- [ ] WSC273
- [ ] Winogrande
- [ ] ANLI
- [ ] Hendrycks Ethics
- [ ] TruthfulQA
- [ ] MuTual
- [ ] Hendrycks Math
- [ ] Asdiv
- [ ] GSM8k
- [ ] Arithmetic
- [ ] MMMLU
- [ ] Translation (WMT) suite
- [ ] Unscramble
- [x] ~~Pile (perplexity)~~
- [ ] BLiMP
- [ ] ToxiGen
- [ ] CrowS-Pairs
- [ ] XCopa
- [ ] BIG-Bench
- [ ] XStoryCloze
- [ ] XWinograd
- [ ] PAWS-X
- [ ] XNLI
- [ ] MGSM
# Novel Tasks
Tasks added in the revamped harness that were not previously available. Again, a strikethrough denotes checking performed *against the original task's implementation or published results introducing the task*.
# Task Wishlist
- [ ] TheoremQA
- [ ] Theorem Proving evaluations
- [ ] Chain of Thought
- [ ] Self-consistency ; Least-to-Most prompting, etc.
- [ ] Summarization Tasks
- [ ] Anthropic Model-Written Evals
\ No newline at end of file
...@@ -64,4 +64,4 @@ Tasks added in the revamped harness that were not previously available. Again, a ...@@ -64,4 +64,4 @@ Tasks added in the revamped harness that were not previously available. Again, a
- [ ] Chain of Thought - [ ] Chain of Thought
- [ ] Self-consistency ; Least-to-Most prompting, etc. - [ ] Self-consistency ; Least-to-Most prompting, etc.
- [ ] Summarization Tasks - [ ] Summarization Tasks
- [ ] Anthropic Model-Written Evals - [ ] Anthropic Model-Written Evals
\ No newline at end of file
import os import os
from typing import List, Union from typing import List, Union
from .gsm8k import *
from .triviaqa import *
from lm_eval import utils from lm_eval import utils
from lm_eval.logger import eval_logger from lm_eval.logger import eval_logger
from lm_eval.api.task import TaskConfig, Task, ConfigurableTask from lm_eval.api.task import TaskConfig, Task, ConfigurableTask
from lm_eval.api.register import ( from lm_eval.api.registry import (
register_task, register_task,
register_group, register_group,
task_registry, TASK_REGISTRY,
group_registry, GROUP_REGISTRY,
) )
def get_task_name_from_config(task_config): def get_task_name_from_config(task_config):
return "configurable_{dataset_path}_{dataset_name}".format(**task_config) return "{dataset_path}_{dataset_name}".format(**task_config)
def include_task_folder(task_dir):
"""
Calling this function
"""
for root, subdirs, file_list in os.walk(task_dir):
if (subdirs == []) and (len(file_list) > 0):
for f in file_list:
if f.endswith(".yaml"):
yaml_path = os.path.join(root, f)
try:
config = utils.load_yaml_config(yaml_path)
SubClass = type(
config["task"] + "ConfigurableTask",
(ConfigurableTask,),
{"CONFIG": TaskConfig(**config)},
)
if "task" in config:
task_name = "{}".format(config["task"])
register_task(task_name)(SubClass)
if "group" in config:
for group in config["group"]:
register_group(group)(SubClass)
except Exception as error:
eval_logger.warning(
"Failed to load config in\n"
f" {yaml_path}\n"
" Config will not be added to registry\n"
f" Error: {error}"
)
task_dir = os.path.dirname(os.path.abspath(__file__)) + "/" task_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
for root, subdirs, file_list in os.walk(task_dir): include_task_folder(task_dir)
if (subdirs == []) and (len(file_list) > 0):
for file in file_list:
if "yaml" in file:
yaml_path = os.path.join(root, file)
try:
config = utils.load_yaml_config(yaml_path)
SubClass = type(
config["task"] + "ConfigurableTask",
(ConfigurableTask,),
{"CONFIG": TaskConfig(**config)},
)
if "task" in config:
task_name = "{}:{}".format(
get_task_name_from_config(config), config["task"]
)
register_task(task_name)(SubClass)
if "group" in config:
for group in config["group"]:
register_group(group)(SubClass)
except Exception:
eval_logger.warning(
"Failed to load config at in\n"
f" {yaml_path}\n"
" Config will not be added to registry"
)
TASK_REGISTRY = task_registry
GROUP_REGISTRY = group_registry
ALL_TASKS = sorted(list(TASK_REGISTRY.keys()) + list(GROUP_REGISTRY.keys()))
def get_task(task_name, config): def get_task(task_name, config):
......
"""
Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge
https://arxiv.org/pdf/1803.05457.pdf
The ARC dataset consists of 7,787 science exam questions drawn from a variety
of sources, including science questions provided under license by a research
partner affiliated with AI2. These are text-only, English language exam questions
that span several grade levels as indicated in the files. Each question has a
multiple choice structure (typically 4 answer options). The questions are sorted
into a Challenge Set of 2,590 “hard” questions (those that both a retrieval and
a co-occurrence method fail to answer correctly) and an Easy Set of 5,197 questions.
Homepage: https://allenai.org/data/arc
"""
from lm_eval import utils
from lm_eval.prompts import get_prompt
from lm_eval.api.task import MultipleChoiceTask
from lm_eval.api.register import register_task, register_group
_CITATION = """
@article{Clark2018ThinkYH,
title={Think you have Solved Question Answering? Try ARC, the AI2 Reasoning Challenge},
author={Peter Clark and Isaac Cowhey and Oren Etzioni and Tushar Khot and Ashish Sabharwal and Carissa Schoenick and Oyvind Tafjord},
journal={ArXiv},
year={2018},
volume={abs/1803.05457}
}
"""
@register_group("arc")
@register_task("arc_easy")
class ARCEasy(MultipleChoiceTask):
VERSION = "2.0"
DATASET_PATH = "ai2_arc"
DATASET_NAME = "ARC-Easy"
OUTPUT_TYPE = "loglikelihood"
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
def training_docs(self):
if self._training_docs is None:
self._training_docs = list(map(self._process_doc, self.dataset["train"]))
return self._training_docs
def validation_docs(self):
return map(self._process_doc, self.dataset["validation"])
def test_docs(self):
return map(self._process_doc, self.dataset["test"])
def _process_doc(self, doc):
# NOTE: Some `doc["answerKey"]`s are in numeric string format being one
# of {'1', '2', '3', '4', '5'}. We map them back to letters.
num_to_letter = {"1": "A", "2": "B", "3": "C", "4": "D", "5": "E"}
doc["answerKey"] = num_to_letter.get(doc["answerKey"], doc["answerKey"])
out_doc = {
"id": doc["id"],
"question": doc["question"],
"choices": doc["choices"]["text"],
"gold": ["A", "B", "C", "D", "E"].index(doc["answerKey"]),
}
return out_doc
def doc_to_text(self, doc):
doc_to_text = get_prompt("qa-basic:question-newline-answer")
return utils.apply_template(doc_to_text, doc)
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["query"]
@register_group("arc")
@register_task("arc_challenge")
class ARCChallenge(ARCEasy):
DATASET_PATH = "ai2_arc"
DATASET_NAME = "ARC-Challenge"
group: group:
- arc_yaml - ai2_arc
task: arc_challenge_yaml - multiple_choice
task: arc_challenge
dataset_path: ai2_arc dataset_path: ai2_arc
dataset_name: ARC-Challenge dataset_name: ARC-Challenge
output_type: multiple_choice output_type: multiple_choice
......
group: group:
- arc_yaml - ai2_arc
task: arc_easy_yaml - multiple_choice
task: arc_easy
dataset_path: ai2_arc dataset_path: ai2_arc
dataset_name: ARC-Easy dataset_name: ARC-Easy
output_type: multiple_choice output_type: multiple_choice
......
...@@ -24,7 +24,7 @@ from lm_eval.api.instance import Instance ...@@ -24,7 +24,7 @@ from lm_eval.api.instance import Instance
from lm_eval.prompts import get_prompt from lm_eval.prompts import get_prompt
from lm_eval.api.register import register_task, register_group from lm_eval.api.registry import register_task, register_group
_CITATION = """ _CITATION = """
@misc{cobbe2021training, @misc{cobbe2021training,
...@@ -92,7 +92,7 @@ class GradeSchoolMath8K(Task): ...@@ -92,7 +92,7 @@ class GradeSchoolMath8K(Task):
return Instance( return Instance(
request_type=self.OUTPUT_TYPE, request_type=self.OUTPUT_TYPE,
doc=doc, doc=doc,
arguments=(ctx, ["\n"]), arguments=(ctx, ["\n\n"]),
idx=0, idx=0,
**kwargs **kwargs
) )
...@@ -113,7 +113,7 @@ class GradeSchoolMath8K(Task): ...@@ -113,7 +113,7 @@ class GradeSchoolMath8K(Task):
assert gold != INVALID_ANS, "No ground truth answer found in the document." assert gold != INVALID_ANS, "No ground truth answer found in the document."
# return self._extract_answer(completion) == gold # return self._extract_answer(completion) == gold
# print(completion) # print(completion)
return completion == gold return self._extract_answer(completion) == gold
def process_results(self, doc, results): def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a """Take a single document and the LM results and evaluates, returning a
......
# GSM8k
## Paper
Training Verifiers to Solve Math Word Problems
https://arxiv.org/abs/2110.14168
State-of-the-art language models can match human performance on many tasks, but
they still struggle to robustly perform multi-step mathematical reasoning. To
diagnose the failures of current models and support research, we introduce GSM8K,
a dataset of 8.5K high quality linguistically diverse grade school math word problems.
We find that even the largest transformer models fail to achieve high test performance,
despite the conceptual simplicity of this problem distribution.
NOTE: See the official implementation of the task:
https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py
for how to make use of the dataset's calculator annotations in your language
model's sample/generation function.
Homepage: https://github.com/openai/grade-school-math
## Citation
```
@misc{cobbe2021training,
title={Training Verifiers to Solve Math Word Problems},
author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
year={2021},
eprint={2110.14168},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
### Checklist
- [x] Is in Eval-harness v1.0 ?
- [ ] Has been checked for regression from v1.0?
- [ ] Has been checked for equivalence with original paper methodology?
- [ ] "Main" checked variant clearly denoted?
### Variant Wishlist
- [ ] Variant with Calculator (see https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py for example implementation)
- [ ] Using Verifiers
- [ ] Majority voting "without CoT"
\ No newline at end of file
# "Training Verifiers to Solve Math Word Problems"
# https://arxiv.org/abs/2110.14168
# State-of-the-art language models can match human performance on many tasks, but
# they still struggle to robustly perform multi-step mathematical reasoning. To
# diagnose the failures of current models and support research, we introduce GSM8K,
# a dataset of 8.5K high quality linguistically diverse grade school math word problems.
# We find that even the largest transformer models fail to achieve high test performance,
# despite the conceptual simplicity of this problem distribution.
# NOTE: See the official implementation of the task:
# https://github.com/openai/grade-school-math/blob/master/grade_school_math/calculator.py
# for how to make use of the dataset's calculator annotations in your language
# model's sample/generation function.
# Homepage: https://github.com/openai/grade-school-math
# _CITATION = """
# @misc{cobbe2021training,
# title={Training Verifiers to Solve Math Word Problems},
# author={Karl Cobbe and Vineet Kosaraju and Mohammad Bavarian and Jacob Hilton and Reiichiro Nakano and Christopher Hesse and John Schulman},
# year={2021},
# eprint={2110.14168},
# archivePrefix={arXiv},
# primaryClass={cs.LG}
# }
# """
task: gsm8k_yaml
dataset_path: gsm8k
dataset_name: main
training_split: train
test_split: test
use_prompt: "qa-basic:question-newline-answer"
doc_to_target: "{{answer.split('### ')[-1]}}"
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: true
delimiter: "\n"
repeats: 4
# filter_list:
# - name: "get-answer"
# filter:
# - function: "regex"
# regex_pattern: "#### (\-?[0-9\.\,]+)"
include: gsm8k-cot.yaml
group:
- chain_of_thought
- self_consistency
task: gsm8k_cot_self_consistency
generation_kwargs:
until:
- "Q:"
- "\n\n"
do_sample: true
temperature: 0.2
repeats: 64
filter_list:
- name: "score-first" # pick only the first response, and report metrics on that
filter:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
- function: "take_first"
- name: "maj@64"
filter:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
- function: "majority_vote"
- function: "take_first"
- name: "maj@8" # get Maj@8 , via selecting the first 8 responses. Using a better estimator would be optimal.
filter:
- function: "take_first_k"
k: 8
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]*[0-9]+)"
- function: "majority_vote"
- function: "take_first"
group:
- chain_of_thought
task: gsm8k_cot
dataset_path: gsm8k
dataset_name: main
output_type: greedy_until
test_split: test
doc_to_text: "Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?\n\nA: There are 15 trees originally. Then there were 21 trees after some more were planted. So there must have been 21 - 15 = 6. The answer is 6.\n\n\
Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\n\nA: There are originally 3 cars. 2 more cars arrive. 3 + 2 = 5. The answer is 5.\n\n\
Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?\n\nA: Originally, Leah had 32 chocolates. Her sister had 42. So in total they had 32 + 42 = 74. After eating 35, they had 74 - 35 = 39. The answer is 39.\n\n\
Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?\n\nA: Jason started with 20 lollipops. Then he had 12 after giving some to Denny. So he gave Denny 20 - 12 = 8. The answer is 8.\n\n\
Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?\n\nA: Shawn started with 5 toys. If he got 2 toys each from his mom and dad, then that is 4 more toys. 5 + 4 = 9. The answer is 9.\n\n\
Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?\n\nA: There were originally 9 computers. For each of 4 days, 5 more computers were added. So 5 * 4 = 20 computers were added. 9 + 20 is 29. The answer is 29.\n\n\
Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?\n\nA: Michael started with 58 golf balls. After losing 23 on tuesday, he had 58 - 23 = 35. After losing 2 more, he had 35 - 2 = 33 golf balls. The answer is 33.\n\n\
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\n\nA: Olivia had 23 dollars. 5 bagels for 3 dollars each will be 5 x 3 = 15 dollars. So she has 23 - 15 dollars left. 23 - 15 is 8. The answer is 8.\n\n\
Q: {{question}}\n\nA:"
doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
gold_alias: "{{answer.split('### ')[-1].rstrip()}}" # this post-processes the reference that we'll score against
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
regexes_to_ignore:
- ","
- "\\$"
delimiter: "\n\n"
generation_kwargs:
until:
- "Q:"
- "\n\n"
do_sample: false
temperature: 0.0
repeats: 1
num_fewshot: 0
filter_list:
- name: "get-answer"
filter:
- function: "regex"
regex_pattern: "The answer is (\\-?[0-9\\.\\,]+)"
- function: "take_first"
group:
- greedy_until
- math_word_problems
task: gsm8k_yaml
dataset_path: gsm8k
dataset_name: main
output_type: greedy_until
training_split: train
fewshot_split: train
test_split: test
doc_to_text: "Question: {{question}}\nAnswer:"
doc_to_target: "{{answer}}" #" {{answer.split('### ')[-1].rstrip()}}"
gold_alias: "{{answer.split('### ')[-1].rstrip()}}" # this post-processes the reference that we'll score against
metric_list:
- metric: exact_match
aggregation: mean
higher_is_better: true
ignore_case: true
ignore_punctuation: false
regexes_to_ignore:
- ","
- "\\$"
- ".*### "
delimiter: "\n\n"
generation_kwargs:
until:
- "\n\n"
- "Question:"
do_sample: false
temperature: 0.0
repeats: 1
num_fewshot: 5
# filter_list:
# - name: "get-answer"
# filter:
# - function: "regex"
# regex_pattern: "### (\\-?[0-9\\.\\,]+)"
# - function: "take_first"
"""
The LAMBADA dataset: Word prediction requiring a broad discourse context∗
https://arxiv.org/pdf/1606.06031.pdf
LAMBADA is a dataset to evaluate the capabilities of computational models for text
understanding by means of a word prediction task. LAMBADA is a collection of narrative
passages sharing the characteristic that human subjects are able to guess their last
word if they are exposed to the whole passage, but not if they only see the last
sentence preceding the target word. To succeed on LAMBADA, computational models
cannot simply rely on local context, but must be able to keep track of information
in the broader discourse.
Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
"""
from lm_eval.api.task import Task
from lm_eval.api.instance import Instance
from lm_eval.api.metrics import mean, perplexity
from lm_eval.api.register import register_task, register_group
_CITATION = """
@misc{
author={Paperno, Denis and Kruszewski, Germán and Lazaridou, Angeliki and Pham, Quan Ngoc and Bernardi, Raffaella and Pezzelle, Sandro and Baroni, Marco and Boleda, Gemma and Fernández, Raquel},
title={The LAMBADA dataset},
DOI={10.5281/zenodo.2630551},
publisher={Zenodo},
year={2016},
month={Aug}
}
"""
class LambadaBase(Task):
VERSION = None
OUTPUT_TYPE = "loglikelihood"
def training_docs(self):
if self.has_training_docs():
return self.dataset["train"]
def validation_docs(self):
if self.has_validation_docs():
return self.dataset["validation"]
def test_docs(self):
if self.has_test_docs():
return self.dataset["test"]
def doc_to_text(self, doc):
return doc["text"].rsplit(" ", 1)[0]
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["text"]
def doc_to_target(self, doc):
return " " + doc["text"].rsplit(" ", 1)[1]
def construct_requests(self, doc, ctx, **kwargs):
return Instance(
request_type=self.OUTPUT_TYPE,
doc=doc,
arguments=(ctx, self.doc_to_target(doc)),
**kwargs
)
def process_results(self, doc, results):
# TODO: this ^ is a hack. filters should make it so that we only have one response per request that we score
results = results[
0
] # TODO: recheck this. currently a list of [(ll, is_greedy)] is passed in
ll, is_greedy = results
return {"ppl": ll, "acc": int(is_greedy)}
def aggregation(self):
return {"ppl": perplexity, "acc": mean}
def higher_is_better(self):
return {"ppl": False, "acc": True}
@register_task("lambada_standard")
class LambadaStandard(LambadaBase):
"""The LAMBADA task using the standard original LAMBADA dataset."""
VERSION = "2.0"
DATASET_PATH = "lambada"
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return True
@register_task("lambada_openai")
class LambadaOpenAI(LambadaBase):
"""The LAMBADA task using the LAMBADA OpenAI dataset, a modified version of the
original LAMBADA dataset created by OpenAI for evaluating their GPT-2 model.
Reference: https://github.com/openai/gpt-2/issues/131#issuecomment-497136199
"""
VERSION = "2.0"
DATASET_PATH = "EleutherAI/lambada_openai"
def has_training_docs(self):
return False
def has_validation_docs(self):
return False
def has_test_docs(self):
return True
# LAMBADA # LAMBADA
### Paper ### Paper
The LAMBADA dataset: Word prediction requiring a broad discourse context The LAMBADA dataset: Word prediction requiring a broad discourse context
https://arxiv.org/pdf/1606.06031.pdf https://arxiv.org/pdf/1606.06031.pdf
LAMBADA is a dataset to evaluate the capabilities of computational models for text LAMBADA is a dataset to evaluate the capabilities of computational models for text
...@@ -23,4 +23,4 @@ Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI ...@@ -23,4 +23,4 @@ Homepage: https://zenodo.org/record/2630551#.X4Xzn5NKjUI
publisher={Zenodo}, publisher={Zenodo},
year={2016}, year={2016},
month={Aug} month={Aug}
} }
\ No newline at end of file
group: group:
- lambada - lambada
task: lambada_openai_yaml - loglikelihood
- perplexity
task: lambada_openai
dataset_path: EleutherAI/lambada_openai dataset_path: EleutherAI/lambada_openai
dataset_name: default dataset_name: default
output_type: loglikelihood output_type: loglikelihood
......
group: group:
- lambada - lambada
task: lambada_standard_yaml - loglikelihood
- perplexity
task: lambada_standard
dataset_path: lambada dataset_path: lambada
dataset_name: null dataset_name: null
output_type: loglikelihood output_type: loglikelihood
......
group: group:
- lambada_cloze - lambada_cloze
- loglikelihood
task: lambada_openai_cloze_yaml task: lambada_openai_cloze_yaml
dataset_path: EleutherAI/lambada_openai dataset_path: EleutherAI/lambada_openai
dataset_name: default dataset_name: default
......
group: group:
- lambada_cloze - lambada_cloze
- loglikelihood
task: lambada_standard_cloze_yaml task: lambada_standard_cloze_yaml
dataset_path: lambada dataset_path: lambada
dataset_name: null dataset_name: null
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment