Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
9907e0a7
Commit
9907e0a7
authored
Jul 04, 2023
by
FarzanehNakhaee
Browse files
Merge branch 'big-refactor' into add-qa4mre-config
parents
649a7f95
070b6b9c
Changes
29
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
36 additions
and
31 deletions
+36
-31
lm_eval/tasks/super_glue/record/t5-prompt.yaml
lm_eval/tasks/super_glue/record/t5-prompt.yaml
+1
-2
lm_eval/tasks/super_glue/wic/default.yaml
lm_eval/tasks/super_glue/wic/default.yaml
+14
-0
lm_eval/tasks/super_glue/wic/promptsource-00.yaml
lm_eval/tasks/super_glue/wic/promptsource-00.yaml
+0
-14
lm_eval/tasks/super_glue/wic/promptsource-01.yaml
lm_eval/tasks/super_glue/wic/promptsource-01.yaml
+0
-5
lm_eval/tasks/super_glue/wic/promptsource-02.yaml
lm_eval/tasks/super_glue/wic/promptsource-02.yaml
+0
-5
lm_eval/tasks/super_glue/wic/utils.py
lm_eval/tasks/super_glue/wic/utils.py
+13
-0
lm_eval/tasks/super_glue/wsc/t5-prompt.yaml
lm_eval/tasks/super_glue/wsc/t5-prompt.yaml
+1
-2
lm_eval/utils.py
lm_eval/utils.py
+6
-2
setup.py
setup.py
+1
-1
No files found.
lm_eval/tasks/super_glue/record/t5-prompt.yaml
View file @
9907e0a7
group
:
-
super-glue-t5-prompt
task
:
t5-prompt
reference
:
"
From
Raffel
et.
al.
2019"
task
:
super_glue-record-t5-prompt
dataset_path
:
super_glue
dataset_name
:
record
training_split
:
train
...
...
lm_eval/tasks/super_glue/wic/default.yaml
0 → 100644
View file @
9907e0a7
group
:
-
super-glue-lm-eval-v1
task
:
"
wic"
dataset_path
:
super_glue
dataset_name
:
wic
output_type
:
multiple_choice
training_split
:
train
validation_split
:
validation
doc_to_text
:
!function
utils.doc_to_text
doc_to_target
:
!function
utils.doc_to_target
gold_alias
:
"
{{label}}"
# this will be cast to an int.
template_aliases
:
"
{%
set
answer_choices
=
['no',
'yes']
%}"
metric_list
:
-
metric
:
acc
lm_eval/tasks/super_glue/wic/promptsource-00.yaml
deleted
100644 → 0
View file @
649a7f95
group
:
-
super-glue-promptsource
task
:
"
GPT-3-prompt"
dataset_path
:
super_glue
dataset_name
:
wic
training_split
:
train
validation_split
:
validation
use_prompt
:
"
promptsource:GPT-3-prompt"
metric_list
:
-
metric
:
exact_match
aggregation
:
mean
higher_is_better
:
true
ignore_case
:
true
ignore_punctuation
:
true
lm_eval/tasks/super_glue/wic/promptsource-01.yaml
deleted
100644 → 0
View file @
649a7f95
include
:
promptsource-00.yaml
group
:
-
super-glue-promptsource
task
:
"
GPT-3-prompt-with-label"
use_prompt
:
"
promptsource:GPT-3-prompt-with-label"
lm_eval/tasks/super_glue/wic/promptsource-02.yaml
deleted
100644 → 0
View file @
649a7f95
include
:
promptsource-00.yaml
group
:
-
super-glue-promptsource
task
:
"
affirmation_true_or_false"
use_prompt
:
"
promptsource:affirmation_true_or_false"
lm_eval/tasks/super_glue/wic/utils.py
0 → 100644
View file @
9907e0a7
def
doc_to_text
(
doc
):
return
(
"Sentence 1: {}
\n
Sentence 2: {}
\n
Question: Is the word '{}' used in the same way in the"
" two sentences above?
\n
Answer:"
.
format
(
doc
[
"sentence1"
],
doc
[
"sentence2"
],
doc
[
"sentence1"
][
doc
[
"start1"
]
:
doc
[
"end1"
]],
)
)
def
doc_to_target
(
doc
):
return
" {}"
.
format
({
0
:
"no"
,
1
:
"yes"
}[
doc
[
"label"
]])
lm_eval/tasks/super_glue/wsc/t5-prompt.yaml
View file @
9907e0a7
group
:
-
super-glue-t5-prompt
task
:
t5-prompt
reference
:
"
From
Raffel
et.
al.
2019"
task
:
super_glue-wsc-t5-prompt
dataset_path
:
super_glue
dataset_name
:
wsc
training_split
:
train
...
...
lm_eval/utils.py
View file @
9907e0a7
...
...
@@ -10,7 +10,7 @@ import collections
import
importlib.util
import
fnmatch
from
typing
import
List
,
Union
from
typing
import
List
,
Literal
,
Union
import
gc
import
torch
...
...
@@ -453,7 +453,11 @@ def create_iterator(raw_iterator, rank, world_size, limit=None):
return
islice
(
raw_iterator
,
rank
,
limit
,
world_size
)
def
pad_and_concat
(
max_length
:
int
,
tensors
:
List
[
torch
.
Tensor
],
padding_side
=
"right"
):
def
pad_and_concat
(
max_length
:
int
,
tensors
:
List
[
torch
.
Tensor
],
padding_side
:
Literal
[
"right"
,
"left"
]
=
"right"
,
):
"""
Method for padding a list of tensors given the maximum tensor
length in the batch. Used for batching inputs and continuations in
...
...
setup.py
View file @
9907e0a7
...
...
@@ -55,7 +55,7 @@ setuptools.setup(
"promptsource"
:
[
"promptsource @ git+https://github.com/bigscience-workshop/promptsource.git#egg=promptsource"
],
"
auto-
gptq"
:
[
"auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"
],
"gptq"
:
[
"auto-gptq[triton] @ git+https://github.com/PanQiWei/AutoGPTQ"
],
"anthropic"
:
[
"anthropic"
],
},
)
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment