Commit d5d19219 authored by Jonathan Tow's avatar Jonathan Tow
Browse files

Remove unused imports and format imports

parent c84a4af4
import math import math
from collections import Iterable from collections import Iterable
from pprint import pprint
import numpy as np import numpy as np
import sacrebleu import sacrebleu
......
...@@ -3,6 +3,7 @@ from lm_eval.base import rf ...@@ -3,6 +3,7 @@ from lm_eval.base import rf
from ..metrics import mean from ..metrics import mean
from . common import HFTask from . common import HFTask
class ANLIBase(HFTask): class ANLIBase(HFTask):
DATASET_PATH = "anli" DATASET_PATH = "anli"
DATASET_NAME = None DATASET_NAME = None
......
import numpy as np
from lm_eval.base import MultipleChoiceTask from lm_eval.base import MultipleChoiceTask
from ..metrics import mean
from . common import HFTask from . common import HFTask
......
...@@ -8,6 +8,7 @@ from best_download import download_file ...@@ -8,6 +8,7 @@ from best_download import download_file
ArithmeticDoc = namedtuple('ArithmeticDoc', ['context', 'completion']) ArithmeticDoc = namedtuple('ArithmeticDoc', ['context', 'completion'])
class Arithmetic(Task): class Arithmetic(Task):
directory = 'data/arithmetic/' directory = 'data/arithmetic/'
......
import datasets import datasets
import numpy as np
import lm_eval.metrics import lm_eval.metrics
from ..base import Task from ..base import Task
......
import os import os
import json import json
import transformers.data.metrics.squad_metrics as squad_metrics
from lm_eval.base import Task, rf, mean from lm_eval.base import Task, rf, mean
from ..utils import sh from ..utils import sh
from itertools import zip_longest from itertools import zip_longest
import transformers.data.metrics.squad_metrics as squad_metrics
import collections
import datasets
import numpy as np
from lm_eval.base import rf, mean
from . common import HFTask
from tqdm import tqdm
import string, re
class CoQA(Task): class CoQA(Task):
......
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from lm_eval.utils import sh
from .common import yesno
import abc import abc
import csv import csv
import os import os
import random import random
import numpy as np import numpy as np
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from lm_eval.utils import sh
from .common import yesno
class Ethics(Task): class Ethics(Task):
def download(self): def download(self):
......
import numpy as np import numpy as np
from lm_eval.base import rf from lm_eval.base import rf
from ..metrics import mean, matthews_corrcoef, f1_score from ..metrics import mean, matthews_corrcoef, f1_score
from scipy.stats import pearsonr, spearmanr
from tqdm import auto as tqdm_lib
from . common import HFTask, yesno from . common import HFTask, yesno
from ..utils import general_detokenize from ..utils import general_detokenize
......
import json
from lm_eval.base import Task, rf from lm_eval.base import Task, rf
from lm_eval.metrics import mean, perplexity from lm_eval.metrics import mean, perplexity
from lm_eval.utils import sh from lm_eval.utils import sh
import json
import math
from best_download import download_file from best_download import download_file
......
import abc
import json import json
import random
from lm_eval.utils import sh from lm_eval.utils import sh
from lm_eval.metrics import mean from lm_eval.metrics import mean
from lm_eval.base import Task, rf from lm_eval.base import Task, rf
from pathlib import Path from pathlib import Path
import abc
class Math(Task): class Math(Task):
......
from . common import HFTask
from lm_eval.base import mean, rf, MultipleChoiceTask
import re import re
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
class MathQA(HFTask, MultipleChoiceTask): class MathQA(HFTask, MultipleChoiceTask):
DATASET_PATH = "math_qa" DATASET_PATH = "math_qa"
......
import random
from . common import HFTask from . common import HFTask
from itertools import islice from itertools import islice
import random
class NaturalQs(HFTask): class NaturalQs(HFTask):
# TODO: naturalqs has a *really* large train set that huggingface just # TODO: naturalqs has a *really* large train set that huggingface just
......
import numpy as np import numpy as np
import json from .common import HFTask
import random
from .common import HFTask
from lm_eval.base import rf from lm_eval.base import rf
from ..metrics import mean from ..metrics import mean
......
import os import os
import numpy as np
from best_download import download_file
from lm_eval.base import MultipleChoiceTask, rf
from lm_eval.metrics import mean
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
import random from best_download import download_file
from lm_eval.base import MultipleChoiceTask
class QA4MRE(MultipleChoiceTask): class QA4MRE(MultipleChoiceTask):
YEAR = None YEAR = None
......
import json import json
import random
import os import os
from lm_eval.base import Task from lm_eval.base import Task
from ..utils import sh from ..utils import sh
......
...@@ -5,11 +5,6 @@ from lm_eval.base import rf ...@@ -5,11 +5,6 @@ from lm_eval.base import rf
from ..metrics import mean from ..metrics import mean
from . common import HFTask from . common import HFTask
import os
from functools import reduce
import operator
from tqdm import tqdm
import json
class each: class each:
def __init__(self, f): def __init__(self, f):
......
import json
import random
import os import os
from lm_eval.base import MultipleChoiceTask, rf from lm_eval.base import MultipleChoiceTask
from ..metrics import mean
from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
from ..utils import sh
class SATAnalogies(MultipleChoiceTask): class SATAnalogies(MultipleChoiceTask):
......
import os import os
import json import json
from ..utils import sh
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
import zipfile import zipfile
from lm_eval.base import MultipleChoiceTask
from best_download import download_file from best_download import download_file
......
import numpy as np from . common import HFTask
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
from tqdm import auto as tqdm_lib
from . common import HFTask, simple_accuracy_metric, yesno
class SQuAD(HFTask): class SQuAD(HFTask):
DATASET_PATH = "squad_v2" DATASET_PATH = "squad_v2"
......
import json
import random
from lm_eval.base import Task
from ..utils import sh
import csv import csv
from lm_eval.base import Task
class StoryCloze(Task): class StoryCloze(Task):
NEEDS_MANUAL_DL = True NEEDS_MANUAL_DL = True
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment