Commit d5d19219 authored by Jonathan Tow's avatar Jonathan Tow
Browse files

Remove unused imports and format imports

parent c84a4af4
import math
from collections import Iterable
from pprint import pprint
import numpy as np
import sacrebleu
......
......@@ -3,6 +3,7 @@ from lm_eval.base import rf
from ..metrics import mean
from . common import HFTask
class ANLIBase(HFTask):
DATASET_PATH = "anli"
DATASET_NAME = None
......
import numpy as np
from lm_eval.base import MultipleChoiceTask
from ..metrics import mean
from . common import HFTask
......
......@@ -8,6 +8,7 @@ from best_download import download_file
ArithmeticDoc = namedtuple('ArithmeticDoc', ['context', 'completion'])
class Arithmetic(Task):
directory = 'data/arithmetic/'
......
import datasets
import numpy as np
import lm_eval.metrics
from ..base import Task
......
import os
import json
import transformers.data.metrics.squad_metrics as squad_metrics
from lm_eval.base import Task, rf, mean
from ..utils import sh
from itertools import zip_longest
import transformers.data.metrics.squad_metrics as squad_metrics
import collections
import datasets
import numpy as np
from lm_eval.base import rf, mean
from . common import HFTask
from tqdm import tqdm
import string, re
class CoQA(Task):
......
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from lm_eval.utils import sh
from .common import yesno
import abc
import csv
import os
import random
import numpy as np
from lm_eval.base import Task, rf
from lm_eval.metrics import mean
from lm_eval.utils import sh
from .common import yesno
class Ethics(Task):
def download(self):
......
import numpy as np
from lm_eval.base import rf
from ..metrics import mean, matthews_corrcoef, f1_score
from scipy.stats import pearsonr, spearmanr
from tqdm import auto as tqdm_lib
from . common import HFTask, yesno
from ..utils import general_detokenize
......
import json
from lm_eval.base import Task, rf
from lm_eval.metrics import mean, perplexity
from lm_eval.utils import sh
import json
import math
from best_download import download_file
......
import abc
import json
import random
from lm_eval.utils import sh
from lm_eval.metrics import mean
from lm_eval.base import Task, rf
from pathlib import Path
import abc
class Math(Task):
......
from . common import HFTask
from lm_eval.base import mean, rf, MultipleChoiceTask
import re
from lm_eval.base import MultipleChoiceTask
from . common import HFTask
class MathQA(HFTask, MultipleChoiceTask):
DATASET_PATH = "math_qa"
......
import random
from . common import HFTask
from itertools import islice
import random
class NaturalQs(HFTask):
# TODO: naturalqs has a *really* large train set that huggingface just
......
import numpy as np
import json
import random
from .common import HFTask
from .common import HFTask
from lm_eval.base import rf
from ..metrics import mean
......
import os
import numpy as np
from best_download import download_file
from lm_eval.base import MultipleChoiceTask, rf
from lm_eval.metrics import mean
import xml.etree.ElementTree as ET
import random
from best_download import download_file
from lm_eval.base import MultipleChoiceTask
class QA4MRE(MultipleChoiceTask):
YEAR = None
......
import json
import random
import os
from lm_eval.base import Task
from ..utils import sh
......
......@@ -5,11 +5,6 @@ from lm_eval.base import rf
from ..metrics import mean
from . common import HFTask
import os
from functools import reduce
import operator
from tqdm import tqdm
import json
class each:
def __init__(self, f):
......
import json
import random
import os
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
from tqdm import auto as tqdm_lib
from . common import simple_accuracy_metric
import numpy as np
from ..utils import sh
from lm_eval.base import MultipleChoiceTask
class SATAnalogies(MultipleChoiceTask):
......
import os
import json
from ..utils import sh
from lm_eval.base import MultipleChoiceTask, rf
from ..metrics import mean
import zipfile
from lm_eval.base import MultipleChoiceTask
from best_download import download_file
......
import numpy as np
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
from tqdm import auto as tqdm_lib
from . common import HFTask, simple_accuracy_metric, yesno
from . common import HFTask
class SQuAD(HFTask):
DATASET_PATH = "squad_v2"
......
import json
import random
from lm_eval.base import Task
from ..utils import sh
import csv
from lm_eval.base import Task
class StoryCloze(Task):
NEEDS_MANUAL_DL = True
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment