Commit 400c0199 authored by lintangsutawika's avatar lintangsutawika
Browse files

minor fixes to satisify pre-commit

parent 2ee7121b
...@@ -10,6 +10,7 @@ from lm_eval.logger import eval_logger ...@@ -10,6 +10,7 @@ from lm_eval.logger import eval_logger
os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["TOKENIZERS_PARALLELISM"] = "false"
ALL_TASKS = sorted(list(TASK_REGISTRY.keys()) + list(GROUP_REGISTRY.keys())) ALL_TASKS = sorted(list(TASK_REGISTRY.keys()) + list(GROUP_REGISTRY.keys()))
class MultiChoice: class MultiChoice:
def __init__(self, choices): def __init__(self, choices):
self.choices = choices self.choices = choices
...@@ -21,7 +22,7 @@ class MultiChoice: ...@@ -21,7 +22,7 @@ class MultiChoice:
eval_logger.warning("{} is not in task list.".format(value)) eval_logger.warning("{} is not in task list.".format(value))
eval_logger.info(f"Available tasks to choose:") eval_logger.info(f"Available tasks to choose:")
# for choice in self.choices: # for choice in self.choices:
# eval_logger.info(f" {choice}") # eval_logger.info(f" {choice}")
eval_logger.info(ALL_TASKS) eval_logger.info(ALL_TASKS)
return True return True
......
...@@ -32,10 +32,10 @@ def main(): ...@@ -32,10 +32,10 @@ def main():
task_names = args.tasks.split(",") task_names = args.tasks.split(",")
task_dict = tasks.get_task_dict(task_names) task_dict = tasks.get_task_dict(task_names)
description_dict = {} # description_dict = {}
if args.description_dict_path: # if args.description_dict_path:
with open(args.description_dict_path, "r") as f: # with open(args.description_dict_path, "r") as f:
description_dict = json.load(f) # description_dict = json.load(f)
os.makedirs(args.output_base_path, exist_ok=True) os.makedirs(args.output_base_path, exist_ok=True)
for task_name, task in task_dict.items(): for task_name, task in task_dict.items():
...@@ -55,11 +55,11 @@ def main(): ...@@ -55,11 +55,11 @@ def main():
docs = join_iters(iters) docs = join_iters(iters)
description = ( # description = (
description_dict[task_name] # description_dict[task_name]
if description_dict and task_name in description_dict # if description_dict and task_name in description_dict
else "" # else ""
) # )
with open(os.path.join(args.output_base_path, task_name), "w") as f: with open(os.path.join(args.output_base_path, task_name), "w") as f:
for i, doc in ( for i, doc in (
......
...@@ -28,7 +28,7 @@ For adding novel benchmarks/datasets to the library: ...@@ -28,7 +28,7 @@ For adding novel benchmarks/datasets to the library:
* [ ] Is the task an existing benchmark in the literature? * [ ] Is the task an existing benchmark in the literature?
* [ ] Have you referenced the original paper that introduced the task? * [ ] Have you referenced the original paper that introduced the task?
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
If other tasks on this dataset are already supported: If other tasks on this dataset are already supported:
* [ ] Is the "Main" variant of this task clearly denoted? * [ ] Is the "Main" variant of this task clearly denoted?
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment