Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
6469a0e7
Commit
6469a0e7
authored
Oct 05, 2020
by
Leo Gao
Browse files
Replace HFNLPTask with HFTask everywhere
parent
f348fa2c
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
29 additions
and
14 deletions
+29
-14
datasets/coqa/__init__.py
datasets/coqa/__init__.py
+15
-0
lm_eval/tasks/arc.py
lm_eval/tasks/arc.py
+2
-2
lm_eval/tasks/drop.py
lm_eval/tasks/drop.py
+1
-1
lm_eval/tasks/race.py
lm_eval/tasks/race.py
+2
-2
lm_eval/tasks/superglue.py
lm_eval/tasks/superglue.py
+7
-7
lm_eval/tasks/webqs.py
lm_eval/tasks/webqs.py
+2
-2
No files found.
datasets/coqa/__init__.py
0 → 100644
View file @
6469a0e7
import
base
class
CoQA
(
base
.
Dataset
):
def
training_docs
(
self
):
pass
def
validation_docs
(
self
):
pass
def
test_docs
(
self
):
pass
def
fewshot_description
(
self
):
pass
\ No newline at end of file
lm_eval/tasks/arc.py
View file @
6469a0e7
from
.
common
import
HF
NLP
Task
from
.
common
import
HFTask
class
ARCEasy
(
HF
NLP
Task
):
class
ARCEasy
(
HFTask
):
NLP_PATH
=
"ai2_arc"
NLP_PATH
=
"ai2_arc"
NLP_NAME
=
"ARC-Easy"
NLP_NAME
=
"ARC-Easy"
...
...
lm_eval/tasks/drop.py
View file @
6469a0e7
...
@@ -3,7 +3,7 @@ import json
...
@@ -3,7 +3,7 @@ import json
from
scipy.stats
import
pearsonr
,
spearmanr
from
scipy.stats
import
pearsonr
,
spearmanr
from
sklearn.metrics
import
f1_score
,
matthews_corrcoef
from
sklearn.metrics
import
f1_score
,
matthews_corrcoef
from
tqdm
import
auto
as
tqdm_lib
from
tqdm
import
auto
as
tqdm_lib
from
.
common
import
HF
NLP
Task
,
simple_accuracy_metric
,
yesno
from
.
common
import
HFTask
,
simple_accuracy_metric
,
yesno
from
pathlib
import
Path
from
pathlib
import
Path
from
..base
import
Dataset
from
..base
import
Dataset
...
...
lm_eval/tasks/race.py
View file @
6469a0e7
from
.
common
import
HF
NLP
Task
from
.
common
import
HFTask
from
..utils_stream
import
X
,
each
,
apply
,
join
,
filt
,
one
from
..utils_stream
import
X
,
each
,
apply
,
join
,
filt
,
one
import
collections
import
collections
import
nlp
import
nlp
class
RACE
(
HF
NLP
Task
):
class
RACE
(
HFTask
):
NLP_PATH
=
"race"
NLP_PATH
=
"race"
NLP_NAME
=
"high"
NLP_NAME
=
"high"
...
...
lm_eval/tasks/superglue.py
View file @
6469a0e7
import
numpy
as
np
import
numpy
as
np
from
tqdm
import
auto
as
tqdm_lib
from
tqdm
import
auto
as
tqdm_lib
from
.
common
import
HF
NLP
Task
,
simple_accuracy_metric
,
yesno
from
.
common
import
HFTask
,
simple_accuracy_metric
,
yesno
class
BoolQ
(
HF
NLP
Task
):
class
BoolQ
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"boolq"
NLP_NAME
=
"boolq"
...
@@ -36,7 +36,7 @@ class BoolQ(HFNLPTask):
...
@@ -36,7 +36,7 @@ class BoolQ(HFNLPTask):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
CommitmentBank
(
HF
NLP
Task
):
class
CommitmentBank
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"cb"
NLP_NAME
=
"cb"
...
@@ -79,7 +79,7 @@ class CommitmentBank(HFNLPTask):
...
@@ -79,7 +79,7 @@ class CommitmentBank(HFNLPTask):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
Copa
(
HF
NLP
Task
):
class
Copa
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"copa"
NLP_NAME
=
"copa"
...
@@ -120,7 +120,7 @@ class Copa(HFNLPTask):
...
@@ -120,7 +120,7 @@ class Copa(HFNLPTask):
return
choice
[
0
].
lower
()
+
choice
[
1
:]
return
choice
[
0
].
lower
()
+
choice
[
1
:]
class
MultiRC
(
HF
NLP
Task
):
class
MultiRC
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"multirc"
NLP_NAME
=
"multirc"
...
@@ -177,7 +177,7 @@ class MultiRC(HFNLPTask):
...
@@ -177,7 +177,7 @@ class MultiRC(HFNLPTask):
}
}
class
WordsInContext
(
HF
NLP
Task
):
class
WordsInContext
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"wic"
NLP_NAME
=
"wic"
...
@@ -214,7 +214,7 @@ class WordsInContext(HFNLPTask):
...
@@ -214,7 +214,7 @@ class WordsInContext(HFNLPTask):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
WinogradSchemaChallenge
(
HF
NLP
Task
):
class
WinogradSchemaChallenge
(
HFTask
):
NLP_PATH
=
"super_glue"
NLP_PATH
=
"super_glue"
NLP_NAME
=
"wsc"
NLP_NAME
=
"wsc"
...
...
lm_eval/tasks/webqs.py
View file @
6469a0e7
from
.
common
import
HF
NLP
Task
from
.
common
import
HFTask
class
WebQs
(
HF
NLP
Task
):
class
WebQs
(
HFTask
):
NLP_PATH
=
"web_questions"
NLP_PATH
=
"web_questions"
NLP_NAME
=
None
NLP_NAME
=
None
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment