Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
lm-evaluation-harness
Commits
f66fc06f
Commit
f66fc06f
authored
Feb 01, 2024
by
haileyschoelkopf
Browse files
fix merge conflicts
parents
b13753cd
d714fc95
Changes
84
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
821 additions
and
367 deletions
+821
-367
lm_eval/models/huggingface.py
lm_eval/models/huggingface.py
+26
-11
lm_eval/models/optimum_lm.py
lm_eval/models/optimum_lm.py
+69
-0
lm_eval/models/vllm_causallms.py
lm_eval/models/vllm_causallms.py
+15
-8
lm_eval/prompts/__init__.py
lm_eval/prompts/__init__.py
+1
-1
lm_eval/tasks/__init__.py
lm_eval/tasks/__init__.py
+366
-232
lm_eval/tasks/arc/arc_easy.yaml
lm_eval/tasks/arc/arc_easy.yaml
+1
-1
lm_eval/tasks/bbh/_generate_configs.py
lm_eval/tasks/bbh/_generate_configs.py
+2
-2
lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml
lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml
+1
-0
lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml
lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml
+1
-0
lm_eval/tasks/belebele/_generate_configs.py
lm_eval/tasks/belebele/_generate_configs.py
+4
-4
lm_eval/tasks/belebele/belebele_default.yaml
lm_eval/tasks/belebele/belebele_default.yaml
+0
-4
lm_eval/tasks/benchmarks/flan/_held_in_template_yaml
lm_eval/tasks/benchmarks/flan/_held_in_template_yaml
+2
-1
lm_eval/tasks/benchmarks/flan/flan_anli.yaml
lm_eval/tasks/benchmarks/flan/flan_anli.yaml
+0
-17
lm_eval/tasks/benchmarks/flan/flan_arc.yaml
lm_eval/tasks/benchmarks/flan/flan_arc.yaml
+0
-14
lm_eval/tasks/benchmarks/flan/flan_boolq.yaml
lm_eval/tasks/benchmarks/flan/flan_boolq.yaml
+0
-7
lm_eval/tasks/benchmarks/flan/flan_cot.yaml
lm_eval/tasks/benchmarks/flan/flan_cot.yaml
+0
-11
lm_eval/tasks/benchmarks/flan/flan_held_in.yaml
lm_eval/tasks/benchmarks/flan/flan_held_in.yaml
+329
-4
lm_eval/tasks/benchmarks/flan/flan_held_in_yaml
lm_eval/tasks/benchmarks/flan/flan_held_in_yaml
+0
-39
lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
+4
-4
lm_eval/tasks/benchmarks/flan/flan_rte.yaml
lm_eval/tasks/benchmarks/flan/flan_rte.yaml
+0
-7
No files found.
lm_eval/models/huggingface.py
View file @
f66fc06f
...
...
@@ -108,8 +108,8 @@ class HFLM(LM):
assert
not
parallelize
,
"`parallelize=True` is not compatible with passing pre-initialized model to `pretrained`"
self
.
_model
=
pretrained
self
.
_device
=
self
.
_model
.
device
self
.
_config
=
self
.
_model
.
config
gpus
=
0
if
tokenizer
:
assert
isinstance
(
...
...
@@ -200,8 +200,9 @@ class HFLM(LM):
)
# access self._model through self.model property outside this method
self
.
model
.
eval
()
self
.
model
.
tie_weights
()
if
isinstance
(
self
.
model
,
torch
.
nn
.
Module
):
self
.
model
.
eval
()
self
.
model
.
tie_weights
()
if
isinstance
(
pretrained
,
str
)
and
(
gpus
>=
1
or
str
(
self
.
device
)
==
"mps"
):
# TODO: can remove this whole snippet except in the mps case, perhaps?
...
...
@@ -238,6 +239,16 @@ class HFLM(LM):
if
self
.
config
.
model_type
==
"qwen"
:
# Qwen's trust_remote_code tokenizer does not allow for adding special tokens
self
.
tokenizer
.
pad_token
=
"<|endoftext|>"
elif
(
self
.
tokenizer
.
__class__
.
__name__
==
"RWKVWorldTokenizer"
or
self
.
tokenizer
.
__class__
.
__name__
==
"Rwkv5Tokenizer"
):
# The RWKV world tokenizer, does not allow for adding special tokens / setting the pad token (which is set as 0)
# The additional tokenizer name check is needed, as there exists rwkv4 models with neox tokenizer
# ---
# Note that the world tokenizer class name, might change in the future for the final huggingface merge
# https://github.com/huggingface/transformers/pull/26963
assert
self
.
tokenizer
.
pad_token_id
==
0
else
:
self
.
tokenizer
.
add_special_tokens
({
"pad_token"
:
"<|pad|>"
})
...
...
@@ -361,7 +372,7 @@ class HFLM(LM):
def
_get_backend
(
self
,
config
:
transformers
.
AutoConfig
,
config
:
Union
[
transformers
.
PretrainedConfig
,
transformers
.
AutoConfig
]
,
backend
:
Optional
[
Literal
[
"default"
,
"causal"
,
"seq2seq"
]]
=
"default"
,
trust_remote_code
:
Optional
[
bool
]
=
False
,
)
->
None
:
...
...
@@ -602,8 +613,7 @@ class HFLM(LM):
(
batch_size
,
max_length
),
device
=
self
.
device
).
long
()
for
_
in
range
(
5
):
out
=
F
.
log_softmax
(
self
.
_model_call
(
test_batch
,
**
call_kwargs
),
dim
=-
1
)
out
=
out
# Identity process so that it passes pre-commit
out
=
F
.
log_softmax
(
self
.
_model_call
(
test_batch
,
**
call_kwargs
),
dim
=-
1
)
# noqa: F841
return
batch_size
...
...
@@ -705,10 +715,14 @@ class HFLM(LM):
return
self
.
model
(
inps
).
logits
def
_model_generate
(
self
,
context
,
max_length
,
stop
,
**
generation_kwargs
):
# we require users to pass do_sample=True explicitly
# for non-greedy gen. This should be reevaluated when considering beam search.
if
"do_sample"
not
in
generation_kwargs
:
generation_kwargs
[
"do_sample"
]
=
False
# temperature = 0.0 if not set
# if do_sample is false and temp==0.0:
# remove temperature, as do_sample=False takes care of this
# and we don't want a warning from HF
generation_kwargs
[
"temperature"
]
=
generation_kwargs
.
get
(
"temperature"
,
0.0
)
do_sample
=
generation_kwargs
.
get
(
"do_sample"
,
None
)
if
do_sample
is
False
and
generation_kwargs
.
get
(
"temperature"
)
==
0.0
:
generation_kwargs
.
pop
(
"temperature"
)
# build stopping criteria
stopping_criteria
=
stop_sequences_criteria
(
self
.
tokenizer
,
stop
,
context
.
shape
[
1
],
context
.
shape
[
0
]
...
...
@@ -1045,6 +1059,7 @@ class HFLM(LM):
return
-
len
(
toks
),
x
[
0
]
pbar
=
tqdm
(
total
=
len
(
requests
),
disable
=
(
self
.
rank
!=
0
))
adaptive_batch_size
=
None
if
self
.
batch_size
==
"auto"
:
# using rolling window with maximum context
print
(
"Passed argument batch_size = auto. Detecting largest batch size"
)
...
...
@@ -1089,7 +1104,7 @@ class HFLM(LM):
)
else
:
raise
ValueError
(
f
"Expected `kwargs` to be of type `dict` but got
{
kwargs
}
"
f
"Expected `kwargs` to be of type `dict` but got
{
type
(
gen_
kwargs
)
}
"
)
if
not
until
:
until
=
[
self
.
tok_decode
(
self
.
eot_token_id
)]
...
...
lm_eval/models/optimum_lm.py
0 → 100644
View file @
f66fc06f
from
importlib.util
import
find_spec
from
pathlib
import
Path
from
lm_eval.api.registry
import
register_model
from
lm_eval.models.huggingface
import
HFLM
@
register_model
(
"openvino"
)
class
OptimumLM
(
HFLM
):
"""
Optimum Intel provides a simple interface to optimize Transformer models and convert them to
\
OpenVINO™ Intermediate Representation (IR) format to accelerate end-to-end pipelines on
\
Intel® architectures using OpenVINO™ runtime.
"""
def
__init__
(
self
,
device
=
"cpu"
,
**
kwargs
,
)
->
None
:
if
"backend"
in
kwargs
:
# optimum currently only supports causal models
assert
(
kwargs
[
"backend"
]
==
"causal"
),
"Currently, only OVModelForCausalLM is supported."
self
.
openvino_device
=
device
super
().
__init__
(
device
=
self
.
openvino_device
,
backend
=
kwargs
.
get
(
"backend"
,
"causal"
),
**
kwargs
,
)
def
_create_model
(
self
,
pretrained
:
str
,
revision
=
"main"
,
dtype
=
"auto"
,
trust_remote_code
=
False
,
**
kwargs
,
)
->
None
:
if
not
find_spec
(
"optimum"
):
raise
Exception
(
"package `optimum` is not installed. Please install it via `pip install optimum[openvino]`"
)
else
:
from
optimum.intel.openvino
import
OVModelForCausalLM
model_kwargs
=
kwargs
if
kwargs
else
{}
model_file
=
Path
(
pretrained
)
/
"openvino_model.xml"
if
model_file
.
exists
():
export
=
False
else
:
export
=
True
kwargs
[
"ov_config"
]
=
{
"PERFORMANCE_HINT"
:
"LATENCY"
,
"NUM_STREAMS"
:
"1"
,
"CACHE_DIR"
:
""
,
}
self
.
_model
=
OVModelForCausalLM
.
from_pretrained
(
pretrained
,
revision
=
revision
,
trust_remote_code
=
trust_remote_code
,
export
=
export
,
device
=
self
.
openvino_device
.
upper
(),
**
model_kwargs
,
)
lm_eval/models/vllm_causallms.py
View file @
f66fc06f
...
...
@@ -170,18 +170,12 @@ class VLLM(LM):
stop
:
Optional
[
List
[
str
]]
=
None
,
**
kwargs
,
):
if
"do_sample"
in
kwargs
.
keys
():
kwargs
.
pop
(
"do_sample"
)
if
generate
:
# hf defaults
kwargs
[
"skip_special_tokens"
]
=
kwargs
.
get
(
"skip_special_tokens"
,
False
)
kwargs
[
"spaces_between_special_tokens"
]
=
kwargs
.
get
(
"spaces_between_special_tokens"
,
False
)
kwargs
=
self
.
modify_gen_kwargs
(
kwargs
)
sampling_params
=
SamplingParams
(
max_tokens
=
max_tokens
,
stop
=
stop
,
**
kwargs
)
else
:
sampling_params
=
SamplingParams
(
temperature
=
0
,
prompt_logprobs
=
2
,
max_tokens
=
1
temperature
=
0
,
prompt_logprobs
=
1
,
max_tokens
=
1
)
if
self
.
data_parallel_size
>
1
:
requests
=
[
list
(
x
)
for
x
in
divide
(
requests
,
self
.
data_parallel_size
)]
...
...
@@ -438,3 +432,16 @@ class VLLM(LM):
break
return
continuation_logprobs
,
is_greedy
@
staticmethod
def
modify_gen_kwargs
(
kwargs
:
dict
)
->
dict
:
# sampling_params
do_sample
=
kwargs
.
pop
(
"do_sample"
,
None
)
if
do_sample
is
False
or
"temperature"
not
in
kwargs
:
kwargs
[
"temperature"
]
=
0.0
# hf defaults
kwargs
[
"skip_special_tokens"
]
=
kwargs
.
get
(
"skip_special_tokens"
,
False
)
kwargs
[
"spaces_between_special_tokens"
]
=
kwargs
.
get
(
"spaces_between_special_tokens"
,
False
)
return
kwargs
lm_eval/prompts/__init__.py
View file @
f66fc06f
...
...
@@ -117,7 +117,7 @@ class PromptString:
# TODO need a way to process doc_to_choice
if
"doc_to_choice"
in
self
.
prompt_string
:
raise
"Not yet implemented to accept doc_to_choice"
raise
Exception
(
"Not yet implemented to accept doc_to_choice"
)
text_string
=
utils
.
apply_template
(
doc_to_text
,
doc
)
target_string
=
utils
.
apply_template
(
doc_to_target
,
doc
)
...
...
lm_eval/tasks/__init__.py
View file @
f66fc06f
This diff is collapsed.
Click to expand it.
lm_eval/tasks/arc/arc_easy.yaml
View file @
f66fc06f
group
:
-
ai2_arc
task
:
arc_easy
dataset_path
:
ai2_arc
dataset_path
:
allenai/
ai2_arc
dataset_name
:
ARC-Easy
output_type
:
multiple_choice
training_split
:
train
...
...
lm_eval/tasks/bbh/_generate_configs.py
View file @
f66fc06f
...
...
@@ -28,7 +28,7 @@ if __name__ == "__main__":
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
base_yaml_name
=
os
.
path
.
split
(
args
.
base_yaml_path
)[
-
1
]
with
open
(
args
.
base_yaml_path
)
as
f
:
with
open
(
args
.
base_yaml_path
,
encoding
=
"utf-8"
)
as
f
:
base_yaml
=
yaml
.
full_load
(
f
)
base_doc_to_text
=
"Q: {{input}}
\n
A:"
...
...
@@ -70,7 +70,7 @@ if __name__ == "__main__":
file_save_path
=
args
.
save_prefix_path
+
f
"/
{
task
}
.yaml"
utils
.
eval_logger
.
info
(
f
"Saving yaml for subset
{
task
}
to
{
file_save_path
}
"
)
with
open
(
file_save_path
,
"w"
)
as
yaml_file
:
with
open
(
file_save_path
,
"w"
,
encoding
=
"utf-8"
)
as
yaml_file
:
yaml
.
dump
(
yaml_dict
,
yaml_file
,
...
...
lm_eval/tasks/bbh/cot_fewshot/_cot_fewshot_template_yaml
View file @
f66fc06f
...
...
@@ -28,3 +28,4 @@ filter_list:
num_fewshot: 0
metadata:
version: 2.0
num_fewshot: 3 # controls what is printed in n-shot
lm_eval/tasks/bbh/fewshot/_fewshot_template_yaml
View file @
f66fc06f
...
...
@@ -19,3 +19,4 @@ generation_kwargs:
num_fewshot: 0
metadata:
version: 1.0
num_fewshot: 3 # will be printed in results table
lm_eval/tasks/belebele/_generate_configs.py
View file @
f66fc06f
...
...
@@ -27,13 +27,13 @@ if __name__ == "__main__":
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
base_yaml_name
=
os
.
path
.
split
(
args
.
base_yaml_path
)[
-
1
]
with
open
(
args
.
base_yaml_path
)
as
f
:
with
open
(
args
.
base_yaml_path
,
encoding
=
"utf-8"
)
as
f
:
base_yaml
=
yaml
.
full_load
(
f
)
if
args
.
cot_prompt_path
is
not
None
:
import
json
with
open
(
args
.
cot_prompt_path
)
as
f
:
with
open
(
args
.
cot_prompt_path
,
encoding
=
"utf-8"
)
as
f
:
cot_file
=
json
.
load
(
f
)
def
query
():
...
...
@@ -42,7 +42,7 @@ if __name__ == "__main__":
print
(
query
())
languages
=
[
split
[
"split"
]
for
split
in
query
()]
for
lang
in
tqdm
(
languages
):
for
lang
in
tqdm
(
[
lang
for
lang
in
languages
if
"default"
not
in
lang
]
):
yaml_dict
=
{
"include"
:
base_yaml_name
,
"task"
:
f
"belebele_
{
args
.
task_prefix
}
_
{
lang
}
"
...
...
@@ -54,7 +54,7 @@ if __name__ == "__main__":
file_save_path
=
args
.
save_prefix_path
+
f
"_
{
lang
}
.yaml"
logging
.
info
(
f
"Saving yaml for subset
{
lang
}
to
{
file_save_path
}
"
)
with
open
(
file_save_path
,
"w"
)
as
yaml_file
:
with
open
(
file_save_path
,
"w"
,
encoding
=
"utf-8"
)
as
yaml_file
:
yaml
.
dump
(
yaml_dict
,
yaml_file
,
...
...
lm_eval/tasks/belebele/belebele_default.yaml
deleted
100644 → 0
View file @
b13753cd
"
fewshot_split"
:
"
default"
"
include"
:
"
_default_template_yaml"
"
task"
:
"
belebele_default"
"
test_split"
:
"
default"
lm_eval/tasks/benchmarks/flan/
yaml_templates/
held_in_template_yaml
→
lm_eval/tasks/benchmarks/flan/
_
held_in_template_yaml
View file @
f66fc06f
output_type: generate_until
validation_split: validation
test_split: null
doc_to_choice: null
metric_list:
- metric: exact_match
aggregation: mean
...
...
lm_eval/tasks/benchmarks/flan/flan_anli.yaml
deleted
100644 → 0
View file @
b13753cd
group
:
flan_anli
task
:
-
include
:
yaml_templates/held_in_template_yaml
task
:
anli_r1
dataset_path
:
anli
use_prompt
:
prompt_templates/anli.yaml:*
validation_split
:
dev_r1
-
include
:
yaml_templates/held_in_template_yaml
task
:
anli_r2
dataset_path
:
anli
use_prompt
:
prompt_templates/anli.yaml:*
validation_split
:
dev_r2
-
include
:
yaml_templates/held_in_template_yaml
task
:
anli_r3
dataset_path
:
anli
use_prompt
:
prompt_templates/anli.yaml:*
validation_split
:
dev_r3
lm_eval/tasks/benchmarks/flan/flan_arc.yaml
deleted
100644 → 0
View file @
b13753cd
group
:
flan_arc
task
:
-
include
:
yaml_templates/held_in_template_yaml
task
:
arc_easy
dataset_path
:
ai2_arc
dataset_name
:
ARC-Easy
use_prompt
:
prompt_templates/arc.yaml:*
validation_split
:
validation
-
include
:
yaml_templates/held_in_template_yaml
task
:
arc_challenge
dataset_path
:
ai2_arc
dataset_name
:
ARC-Challenge
use_prompt
:
prompt_templates/arc.yaml:*
validation_split
:
validation
lm_eval/tasks/benchmarks/flan/flan_boolq.yaml
deleted
100644 → 0
View file @
b13753cd
group
:
flan_boolq
task
:
-
include
:
yaml_templates/held_in_template_yaml
dataset_path
:
super_glue
dataset_name
:
boolq
use_prompt
:
prompt_templates/boolq.yaml:*
validation_split
:
validation
lm_eval/tasks/benchmarks/flan/flan_cot.yaml
deleted
100644 → 0
View file @
b13753cd
group
:
flan_cot
task
:
-
include
:
yaml_templates/cot_template_yaml
dataset_path
:
gsmk
dataset_name
:
boolq
use_prompt
:
promptsource:*
validation_split
:
validation
-
include
:
yaml_templates/cot_template_yaml
dataset_path
:
EleutherAI/asdiv
use_prompt
:
promptsource:*
validation_split
:
validation
lm_eval/tasks/benchmarks/flan/flan_held_in.yaml
View file @
f66fc06f
group
:
flan_held_in
group_alias
:
Flan (Held-In)
task
:
-
flan_boolq
-
flan_rte
-
flan_anli
-
flan_arc
# ANLI R1
-
group
:
anli_r1_flan
group_alias
:
ANLI R1
task
:
-
task
:
anli_r1
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Choose
your
answer:
based
on
the
paragraph
above
can
we
conclude
that
\"
{{hypothesis}}
\"
?
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
I
think
the
answer
is"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Based
on
that
paragraph
can
we
conclude
that
this
sentence
is
true?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Can
we
draw
the
following
conclusion?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Does
this
next
sentence
follow,
given
the
preceding
text?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Can
we
infer
the
following?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
The
answer
is:"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
following
paragraph
and
determine
if
the
hypothesis
is
true:
\n\n
{{premise}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
Hypothesis:
{{hypothesis}}
\n\n\n
"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
text
and
determine
if
the
sentence
is
true
(see
options
at
the
end):
\n\n
{{premise}}
\n\n
Sentence:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-7
include
:
_held_in_template_yaml
doc_to_text
:
"
Can
we
draw
the
following
hypothesis
from
the
context
(see
options)?
\n\n
Context:
\n\n
{{premise}}
\n\n
Hypothesis:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r1
task_alias
:
prompt-8
include
:
_held_in_template_yaml
doc_to_text
:
"
Choose
from
options:
Determine
if
the
sentence
is
true
based
on
the
text
below:
\n
{{hypothesis}}
\n\n
{{premise}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
# ANLI R2
-
group
:
anli_r2_flan
group_alias
:
ANLI R2
task
:
-
task
:
anli_r2
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Choose
your
answer:
based
on
the
paragraph
above
can
we
conclude
that
\"
{{hypothesis}}
\"
?
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
I
think
the
answer
is"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Based
on
that
paragraph
can
we
conclude
that
this
sentence
is
true?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Can
we
draw
the
following
conclusion?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Does
this
next
sentence
follow,
given
the
preceding
text?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Can
we
infer
the
following?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
The
answer
is:"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
following
paragraph
and
determine
if
the
hypothesis
is
true:
\n\n
{{premise}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
Hypothesis:
{{hypothesis}}
\n\n\n
"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
text
and
determine
if
the
sentence
is
true
(see
options
at
the
end):
\n\n
{{premise}}
\n\n
Sentence:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-7
include
:
_held_in_template_yaml
doc_to_text
:
"
Can
we
draw
the
following
hypothesis
from
the
context
(see
options)?
\n\n
Context:
\n\n
{{premise}}
\n\n
Hypothesis:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r2
task_alias
:
prompt-8
include
:
_held_in_template_yaml
doc_to_text
:
"
Choose
from
options:
Determine
if
the
sentence
is
true
based
on
the
text
below:
\n
{{hypothesis}}
\n\n
{{premise}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
# ANLI R3
-
group
:
anli_r3_flan
group_alias
:
ANLI R3
task
:
-
task
:
anli_r3
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Choose
your
answer:
based
on
the
paragraph
above
can
we
conclude
that
\"
{{hypothesis}}
\"
?
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
I
think
the
answer
is"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Based
on
that
paragraph
can
we
conclude
that
this
sentence
is
true?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n\n
Can
we
draw
the
following
conclusion?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Does
this
next
sentence
follow,
given
the
preceding
text?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
{{premise}}
\n
Can
we
infer
the
following?
\n
{{hypothesis}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
The
answer
is:"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
following
paragraph
and
determine
if
the
hypothesis
is
true:
\n\n
{{premise}}
\n\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No
\n
Hypothesis:
{{hypothesis}}
\n\n\n
"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
text
and
determine
if
the
sentence
is
true
(see
options
at
the
end):
\n\n
{{premise}}
\n\n
Sentence:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-7
include
:
_held_in_template_yaml
doc_to_text
:
"
Can
we
draw
the
following
hypothesis
from
the
context
(see
options)?
\n\n
Context:
\n\n
{{premise}}
\n\n
Hypothesis:
{{hypothesis}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
-
task
:
anli_r3
task_alias
:
prompt-8
include
:
_held_in_template_yaml
doc_to_text
:
"
Choose
from
options:
Determine
if
the
sentence
is
true
based
on
the
text
below:
\n
{{hypothesis}}
\n\n
{{premise}}
\n
OPTIONS:
\n
-
Yes
\n
-
It's
impossible
to
say
\n
-
No"
doc_to_target
:
"
{{[
\"
Yes
\"
,
\"
It's
impossible
to
say
\"
,
\"
No
\"
][label]}}"
# Arc Easy
-
group
:
arc_easy_flan
group_alias
:
Arc Easy
task
:
-
task
:
arc_easy
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{question}}
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
Question:
{{question}}
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}
\n
Answer:"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
Question:
{{question}}
\n\n
What
is
the
correct
answer
to
the
question
from
the
following
choices?
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
Q:
{{question}}
\n
What
is
the
correct
answer
to
this
question?
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}...A:"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
Choose
your
answer?
\n\n
{{question}}
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Answer
the
question
\n\n
{{question}}
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_easy
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
{{question}}
\n\n
Pick
the
answer
from
these
options
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
# Arc Challenge
-
group
:
arc_challenge_flan
group_alias
:
Arc Challenge
task
:
-
task
:
arc_challenge
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{question}}
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
Question:
{{question}}
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}
\n
Answer:"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
Question:
{{question}}
\n\n
What
is
the
correct
answer
to
the
question
from
the
following
choices?
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
Q:
{{question}}
\n
What
is
the
correct
answer
to
this
question?
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}...A:"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
Choose
your
answer?
\n\n
{{question}}
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Answer
the
question
\n\n
{{question}}
\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
-
task
:
arc_challenge
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
{{question}}
\n\n
Pick
the
answer
from
these
options
\n\n
OPTIONS:
\n
-
{{choices.text|join('
\n
-
')}}"
doc_to_target
:
"
{{choices.text[choices.label.index(answerKey)]}}"
# BoolQ
-
group
:
boolq_flan
group_alias
:
BoolQ
task
:
-
task
:
boolq
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
Can
we
conclude
that
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
Is
it
true
that
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
Text:
{{passage}}
\n\n
Question:
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
What's
the
best
answer
to
this
question:
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n
Based
on
the
above
text
what's
the
best
answer
to
this
question:
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n
Answer
this
question
making
sure
that
the
answer
is
supposed
by
the
text:
{{question}}?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-7
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
Is
the
following
statement
correct
based
on
the
text
\n\n
{{question}}
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-8
include
:
_held_in_template_yaml
doc_to_text
:
"
{{passage}}
\n\n
Is
this
statement
correct
\"
{{question}}
\"
?
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
-
task
:
boolq
task_alias
:
prompt-9
include
:
_held_in_template_yaml
doc_to_text
:
"
Is
it
true
that
{{question}}
based
on
the
following
text?
\n\n
{{passage}}
\n\n
OPTIONS:
\n
-
no
\n
-
yes"
doc_to_target
:
"
{{['no',
'yes'][label]}}"
# RTE
-
group
:
rte_flan
group_alias
:
RTE
task
:
-
task
:
rte
task_alias
:
prompt-0
include
:
_held_in_template_yaml
doc_to_text
:
"
{{sentence1}}
\n\n
Question
with
options:
Based
on
the
paragraph
above
can
we
conclude
that
\"
{{sentence2}}
\"
?
\n\n
OPTIONS:
\n
-
yes
\n
-
no"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-1
include
:
_held_in_template_yaml
doc_to_text
:
"
{{sentence1}}
\n\n
Based
on
that
paragraph
can
we
conclude
that
the
sentence
below
is
true?
\n
{{sentence2}}
\n\n
OPTIONS:
\n
-
yes
\n
-
no"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-2
include
:
_held_in_template_yaml
doc_to_text
:
"
{{sentence1}}
\n\n
Q
with
options:
Can
we
draw
the
following
conclusion?
\n
{{sentence2}}
\n\n
OPTIONS:
\n
-
yes
\n
-
no"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-3
include
:
_held_in_template_yaml
doc_to_text
:
"
{{sentence1}}
\n
Does
this
next
sentence
follow,
given
the
preceding
text?
\n
{{sentence2}}
\n\n
OPTIONS:
\n
-
yes
\n
-
no"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-4
include
:
_held_in_template_yaml
doc_to_text
:
"
{{sentence1}}
\n
OPTIONS:
\n
-
yes
\n
-
no
\n
Question:
Can
we
infer
the
following?
\n
{{sentence2}}"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-5
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
following
paragraph
and
determine
if
the
hypothesis
is
true.
Select
from
options
at
the
end:
\n\n
{{sentence1}}
\n\n
Hypothesis:
{{sentence2}}
\n
OPTIONS:
\n
-
yes
\n
-
no
\n
The
answer
is"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-6
include
:
_held_in_template_yaml
doc_to_text
:
"
Read
the
text
and
determine
if
the
sentence
is
true:
\n\n
{{sentence1}}
\n\n
Sentence:
{{sentence2}}
\n
OPTIONS:
\n
-
yes
\n
-
no
\n
A:"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-7
include
:
_held_in_template_yaml
doc_to_text
:
"
Question
with
options:
can
we
draw
the
following
hypothesis
from
the
context?
\n\n
Context:
\n\n
{{sentence1}}
\n\n
Hypothesis:
{{sentence2}}
\n
OPTIONS:
\n
-
yes
\n
-
no
\n
A:"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
-
task
:
rte
task_alias
:
prompt-8
include
:
_held_in_template_yaml
doc_to_text
:
"
Determine
if
the
sentence
is
true
based
on
the
text
below.
Choose
from
options.
\n
{{sentence2}}
\n\n
{{sentence1}}
\n
OPTIONS:
\n
-
yes
\n
-
no"
doc_to_target
:
"
{{['yes',
'no'][label]}}"
lm_eval/tasks/benchmarks/flan/flan_held_in_yaml
deleted
100644 → 0
View file @
b13753cd
group: flan_held_in
task:
- include: flan/yaml_templates/held_in_template_yaml
dataset_path: super_glue
dataset_name: boolq
use_prompt: flan/prompt_templates/boolq.yaml:*
validation_split: validation
- include: flan/yaml_templates/held_in_template_yaml
dataset_path: super_glue
dataset_name: rte
use_prompt: flan/prompt_templates/rte.yaml:*
validation_split: validation
- include: flan/yaml_templates/held_in_template_yaml
task: anli_r1
dataset_path: anli
use_prompt: flan/prompt_templates/anli.yaml:*
validation_split: dev_r1
- include: flan/yaml_templates/held_in_template_yaml
task: anli_r2
dataset_path: anli
use_prompt: flan/prompt_templates/anli.yaml:*
validation_split: dev_r2
- include: flan/yaml_templates/held_in_template_yaml
task: anli_r3
dataset_path: anli
use_prompt: flan/prompt_templates/anli.yaml:*
validation_split: dev_r3
- include: flan/yaml_templates/held_in_template_yaml
task: arc_easy
dataset_path: ai2_arc
dataset_name: ARC-Easy
use_prompt: flan/prompt_templates/arc.yaml:*
validation_split: validation
- include: flan/yaml_templates/held_in_template_yaml
task: arc_challenge
dataset_path: ai2_arc
dataset_name: ARC-Challenge
use_prompt: flan/prompt_templates/arc.yaml:*
validation_split: validation
lm_eval/tasks/benchmarks/flan/flan_held_out.yaml
View file @
f66fc06f
group
:
flan_held_out
task
:
# BBH
-
bbh_
flan_
zeroshot
-
bbh_
flan_
fewshot
-
bbh_
flan_
cot_fewshot
-
bbh_
flan_
cot_zeroshot
-
bbh_zeroshot
-
bbh_fewshot
-
bbh_cot_fewshot
-
bbh_cot_zeroshot
# MMLU
-
mmlu
-
mmlu_flan_n_shot_generative
...
...
lm_eval/tasks/benchmarks/flan/flan_rte.yaml
deleted
100644 → 0
View file @
b13753cd
group
:
flan_rte
task
:
-
include
:
yaml_templates/held_in_template_yaml
dataset_path
:
super_glue
dataset_name
:
rte
use_prompt
:
prompt_templates/rte.yaml:*
validation_split
:
validation
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment