Unverified Commit 7ff58fe1 authored by Leo Gao's avatar Leo Gao Committed by GitHub
Browse files

Merge pull request #166 from jon-tow/remove-unused-imports

Remove unused imports and format imports
parents a69ba385 ddc044eb
...@@ -4,11 +4,11 @@ To-do: ...@@ -4,11 +4,11 @@ To-do:
- ReCoRD - ReCoRD
""" """
import numpy as np import numpy as np
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
from . common import HFTask, yesno from . common import HFTask, yesno
from lm_eval.base import rf from lm_eval.base import rf
from ..metrics import mean, acc_all, metric_max_over_ground_truths from ..metrics import mean, acc_all, metric_max_over_ground_truths
import sklearn
import transformers.data.metrics.squad_metrics as squad_metrics
from ..utils import general_detokenize from ..utils import general_detokenize
......
import abc
import json
import random
import os
from collections import Iterable
from pprint import pprint
import pycountry import pycountry
from pprint import pprint
from sacrebleu import sacrebleu from sacrebleu import sacrebleu
import logging
from lm_eval import metrics from lm_eval import metrics
from lm_eval.base import Task, rf from lm_eval.base import Task, rf
......
import os import os
import json import json
import random
from lm_eval.base import Task, rf from lm_eval.base import Task, rf
from ..metrics import mean from ..metrics import mean
from ..utils import sh from ..utils import sh
class TriviaQA(Task): class TriviaQA(Task):
def download(self): def download(self):
if not os.path.exists('data/triviaqa'): if not os.path.exists('data/triviaqa'):
......
import gzip import gzip
import json import json
import random
import shutil import shutil
from pathlib import Path from pathlib import Path
from best_download import download_file from best_download import download_file
......
import numpy as np from . common import HFTask
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
from tqdm import auto as tqdm_lib
from . common import NLP_TASK, simple_accuracy_metric, yesno
class WikiText103(NLP_TASK):
class WikiText103(HFTask):
NLP_PATH = "wikitext" NLP_PATH = "wikitext"
NLP_NAME = "wikitext-103-raw-v1" NLP_NAME = "wikitext-103-raw-v1"
...@@ -66,7 +63,7 @@ class WikiText103(NLP_TASK): ...@@ -66,7 +63,7 @@ class WikiText103(NLP_TASK):
raise NotImplementedError('Evaluation not implemented') raise NotImplementedError('Evaluation not implemented')
class WikiText2(NLP_TASK): class WikiText2(HFTask):
NLP_PATH = "wikitext" NLP_PATH = "wikitext"
NLP_NAME = "wikitext-2-raw-v1" NLP_NAME = "wikitext-2-raw-v1"
......
...@@ -2,8 +2,6 @@ import argparse ...@@ -2,8 +2,6 @@ import argparse
import json import json
import numpy as np import numpy as np
import random import random
import itertools
import collections
import logging import logging
from lm_eval import models, tasks, evaluator, base from lm_eval import models, tasks, evaluator, base
......
import argparse
import json
import numpy as np
import random import random
import itertools
import collections
import logging
from lm_eval import models, tasks, evaluator, base
import random
from lm_eval.base import LM
import transformers import transformers
from lm_eval import tasks, evaluator
from lm_eval.base import LM
class DryrunLM(LM): class DryrunLM(LM):
......
import argparse
import json import json
import numpy as np import numpy as np
import random import random
import itertools
import collections
import logging import logging
from lm_eval import models, tasks, evaluator, base from lm_eval import models, tasks, evaluator, base
logging.getLogger("openai").setLevel(logging.WARNING) logging.getLogger("openai").setLevel(logging.WARNING)
......
...@@ -2,7 +2,6 @@ import argparse ...@@ -2,7 +2,6 @@ import argparse
import numpy as np import numpy as np
import os import os
import random import random
from lm_eval import tasks from lm_eval import tasks
from lm_eval.utils import join_iters from lm_eval.utils import join_iters
......
import lm_eval.models as models import lm_eval.models as models
import lm_eval.base as base
def test_gpt2(): def test_gpt2():
gpt2 = models.get_model('gpt2').create_from_arg_string("device=cpu") gpt2 = models.get_model('gpt2').create_from_arg_string("device=cpu")
......
import lm_eval.tasks as tasks import lm_eval.tasks as tasks
import lm_eval.base as base import lm_eval.base as base
from itertools import islice
import pytest import pytest
from itertools import islice
@pytest.mark.parametrize("taskname,Task", tasks.TASK_REGISTRY.items()) @pytest.mark.parametrize("taskname,Task", tasks.TASK_REGISTRY.items())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment