Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
edbb1a49
Commit
edbb1a49
authored
Oct 04, 2020
by
Leo Gao
Browse files
Rename NLP_TASK
parent
d53969b5
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
18 additions
and
18 deletions
+18
-18
lm_eval/tasks/common.py
lm_eval/tasks/common.py
+1
-1
lm_eval/tasks/drop.py
lm_eval/tasks/drop.py
+1
-1
lm_eval/tasks/glue.py
lm_eval/tasks/glue.py
+10
-10
lm_eval/tasks/superglue.py
lm_eval/tasks/superglue.py
+6
-6
No files found.
lm_eval/tasks/common.py
View file @
edbb1a49
...
...
@@ -4,7 +4,7 @@ import random
from
..base
import
Dataset
class
NLP
_TASK
(
Dataset
):
class
HF
NLP
Task
(
Dataset
):
NLP_PATH
=
None
NLP_NAME
=
None
...
...
lm_eval/tasks/drop.py
View file @
edbb1a49
...
...
@@ -3,7 +3,7 @@ import json
from
scipy.stats
import
pearsonr
,
spearmanr
from
sklearn.metrics
import
f1_score
,
matthews_corrcoef
from
tqdm
import
auto
as
tqdm_lib
from
.
common
import
NLP
_TASK
,
simple_accuracy_metric
,
yesno
from
.
common
import
HF
NLP
Task
,
simple_accuracy_metric
,
yesno
from
pathlib
import
Path
from
..base
import
Dataset
...
...
lm_eval/tasks/glue.py
View file @
edbb1a49
...
...
@@ -2,7 +2,7 @@ import numpy as np
from
scipy.stats
import
pearsonr
,
spearmanr
from
sklearn.metrics
import
f1_score
,
matthews_corrcoef
from
tqdm
import
auto
as
tqdm_lib
from
.
common
import
NLP
_TASK
,
simple_accuracy_metric
,
yesno
from
.
common
import
HF
NLP
Task
,
simple_accuracy_metric
,
yesno
def
get_accuracy_and_f1
(
preds
,
golds
):
...
...
@@ -22,7 +22,7 @@ def get_accuracy_and_f1(preds, golds):
}
class
CoLA
(
NLP
_TASK
):
class
CoLA
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"cola"
...
...
@@ -64,7 +64,7 @@ class CoLA(NLP_TASK):
}
class
MNLI
(
NLP
_TASK
):
class
MNLI
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"mnli"
...
...
@@ -115,7 +115,7 @@ class MNLI(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
MRPC
(
NLP
_TASK
):
class
MRPC
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"mrpc"
...
...
@@ -153,7 +153,7 @@ class MRPC(NLP_TASK):
return
get_accuracy_and_f1
(
preds
=
preds
,
golds
=
golds
)
class
RTE
(
NLP
_TASK
):
class
RTE
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"rte"
...
...
@@ -190,7 +190,7 @@ class RTE(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
QNLI
(
NLP
_TASK
):
class
QNLI
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"qnli"
...
...
@@ -227,7 +227,7 @@ class QNLI(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
QQP
(
NLP
_TASK
):
class
QQP
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"qqp"
...
...
@@ -265,7 +265,7 @@ class QQP(NLP_TASK):
return
get_accuracy_and_f1
(
preds
=
preds
,
golds
=
golds
)
class
STSB
(
NLP
_TASK
):
class
STSB
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"stsb"
...
...
@@ -322,7 +322,7 @@ class STSB(NLP_TASK):
}
class
SST
(
NLP
_TASK
):
class
SST
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"sst2"
...
...
@@ -359,7 +359,7 @@ class SST(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
WNLI
(
NLP
_TASK
):
class
WNLI
(
HF
NLP
Task
):
NLP_PATH
=
"glue"
NLP_NAME
=
"wnli"
...
...
lm_eval/tasks/superglue.py
View file @
edbb1a49
import
numpy
as
np
from
tqdm
import
auto
as
tqdm_lib
from
.
common
import
NLP
_TASK
,
simple_accuracy_metric
,
yesno
from
.
common
import
HF
NLP
Task
,
simple_accuracy_metric
,
yesno
class
BoolQ
(
NLP
_TASK
):
class
BoolQ
(
HF
NLP
Task
):
NLP_PATH
=
"super_glue"
NLP_NAME
=
"boolq"
...
...
@@ -36,7 +36,7 @@ class BoolQ(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
CommitmentBank
(
NLP
_TASK
):
class
CommitmentBank
(
HF
NLP
Task
):
NLP_PATH
=
"super_glue"
NLP_NAME
=
"cb"
...
...
@@ -79,7 +79,7 @@ class CommitmentBank(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
Copa
(
NLP
_TASK
):
class
Copa
(
HF
NLP
Task
):
NLP_PATH
=
"super_glue"
NLP_NAME
=
"copa"
...
...
@@ -120,7 +120,7 @@ class Copa(NLP_TASK):
return
choice
[
0
].
lower
()
+
choice
[
1
:]
class
WordsInContext
(
NLP
_TASK
):
class
WordsInContext
(
HF
NLP
Task
):
NLP_PATH
=
"super_glue"
NLP_NAME
=
"wic"
...
...
@@ -157,7 +157,7 @@ class WordsInContext(NLP_TASK):
return
simple_accuracy_metric
(
preds
=
preds
,
golds
=
golds
)
class
WinogradSchemaChallenge
(
NLP
_TASK
):
class
WinogradSchemaChallenge
(
HF
NLP
Task
):
NLP_PATH
=
"super_glue"
NLP_NAME
=
"wsc"
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment