Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
123 additions
and
123 deletions
+123
-123
configs/datasets/cmb/cmb_gen_dfb5c4.py
configs/datasets/cmb/cmb_gen_dfb5c4.py
+8
-8
configs/datasets/cmmlu/cmmlu_gen_c13365.py
configs/datasets/cmmlu/cmmlu_gen_c13365.py
+10
-10
configs/datasets/cmmlu/cmmlu_ppl_041cbf.py
configs/datasets/cmmlu/cmmlu_ppl_041cbf.py
+10
-10
configs/datasets/cmmlu/cmmlu_ppl_8b9c76.py
configs/datasets/cmmlu/cmmlu_ppl_8b9c76.py
+11
-11
configs/datasets/collections/base_core.py
configs/datasets/collections/base_core.py
+1
-1
configs/datasets/collections/chat_core.py
configs/datasets/collections/chat_core.py
+1
-1
configs/datasets/commonsenseqa/commonsenseqa_gen_1da2d0.py
configs/datasets/commonsenseqa/commonsenseqa_gen_1da2d0.py
+9
-9
configs/datasets/commonsenseqa/commonsenseqa_gen_c946f2.py
configs/datasets/commonsenseqa/commonsenseqa_gen_c946f2.py
+9
-9
configs/datasets/commonsenseqa/commonsenseqa_ppl_3e9f2d.py
configs/datasets/commonsenseqa/commonsenseqa_ppl_3e9f2d.py
+8
-8
configs/datasets/commonsenseqa/commonsenseqa_ppl_5545e2.py
configs/datasets/commonsenseqa/commonsenseqa_ppl_5545e2.py
+5
-5
configs/datasets/commonsenseqa/commonsenseqa_ppl_716f78.py
configs/datasets/commonsenseqa/commonsenseqa_ppl_716f78.py
+5
-5
configs/datasets/commonsenseqa/commonsenseqa_ppl_c49e77.py
configs/datasets/commonsenseqa/commonsenseqa_ppl_c49e77.py
+2
-2
configs/datasets/commonsenseqa/commonsenseqa_ppl_e51e32.py
configs/datasets/commonsenseqa/commonsenseqa_ppl_e51e32.py
+5
-5
configs/datasets/commonsenseqa_cn/commonsenseqacn_gen_d380d0.py
...s/datasets/commonsenseqa_cn/commonsenseqacn_gen_d380d0.py
+10
-10
configs/datasets/commonsenseqa_cn/commonsenseqacn_ppl_971f48.py
...s/datasets/commonsenseqa_cn/commonsenseqacn_ppl_971f48.py
+14
-14
configs/datasets/contamination/ceval_contamination_ppl_810ec6.py
.../datasets/contamination/ceval_contamination_ppl_810ec6.py
+2
-2
configs/datasets/contamination/mbpp_contamination_ppl_f01cb6.py
...s/datasets/contamination/mbpp_contamination_ppl_f01cb6.py
+2
-2
configs/datasets/contamination/mmlu_contamination_ppl_810ec6.py
...s/datasets/contamination/mmlu_contamination_ppl_810ec6.py
+2
-2
configs/datasets/crowspairs/crowspairs_gen_02b6c1.py
configs/datasets/crowspairs/crowspairs_gen_02b6c1.py
+3
-3
configs/datasets/crowspairs/crowspairs_gen_381af0.py
configs/datasets/crowspairs/crowspairs_gen_381af0.py
+6
-6
No files found.
configs/datasets/cmb/cmb_gen_dfb5c4.py
View file @
aa2dd2b5
...
...
@@ -7,10 +7,10 @@ from opencompass.utils.text_postprocessors import multiple_select_postprocess
cmb_datasets
=
[]
for
split
in
[
"
val
"
,
"
test
"
]:
for
split
in
[
'
val
'
,
'
test
'
]:
cmb_reader_cfg
=
dict
(
input_columns
=
[
"
exam_type
"
,
"
exam_class
"
,
"
question_type
"
,
"
question
"
,
"
option_str
"
],
output_column
=
"
answer
"
,
input_columns
=
[
'
exam_type
'
,
'
exam_class
'
,
'
question_type
'
,
'
question
'
,
'
option_str
'
],
output_column
=
'
answer
'
,
train_split
=
split
,
test_split
=
split
,
)
...
...
@@ -21,10 +21,10 @@ for split in ["val", "test"]:
template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
以下是中国{{exam_type}}中{{exam_class}}考试的一道{{question_type}},不需要做任何分析和解释,直接输出答案选项。
\n
{{question}}
\n
{{option_str}}
\n
答案:
"
,
role
=
'
HUMAN
'
,
prompt
=
f
'
以下是中国{{exam_type}}中{{exam_class}}考试的一道{{question_type}},不需要做任何分析和解释,直接输出答案选项。
\n
{{question}}
\n
{{option_str}}
\n
答案:
'
,
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{answer}
"
),
dict
(
role
=
'
BOT
'
,
prompt
=
'
{answer}
'
),
],
),
),
...
...
@@ -39,9 +39,9 @@ for split in ["val", "test"]:
cmb_datasets
.
append
(
dict
(
abbr
=
"
cmb
"
if
split
==
"
val
"
else
"
cmb_test
"
,
abbr
=
'
cmb
'
if
split
==
'
val
'
else
'
cmb_test
'
,
type
=
CMBDataset
,
path
=
"
./data/CMB/
"
,
path
=
'
./data/CMB/
'
,
reader_cfg
=
cmb_reader_cfg
,
infer_cfg
=
cmb_infer_cfg
,
eval_cfg
=
cmb_eval_cfg
,
...
...
configs/datasets/cmmlu/cmmlu_gen_c13365.py
View file @
aa2dd2b5
...
...
@@ -85,16 +85,16 @@ for _name in cmmlu_all_sets:
ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
f
"
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
\n
题目:{{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
"
f
'
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
\n
题目:{{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
'答案是: {answer}'
),
dict
(
role
=
'
BOT
'
,
prompt
=
'答案是: {answer}'
),
]),
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
),
retriever
=
dict
(
type
=
FixKRetriever
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]),
inferencer
=
dict
(
type
=
GenInferencer
),
...
...
@@ -107,13 +107,13 @@ for _name in cmmlu_all_sets:
cmmlu_datasets
.
append
(
dict
(
type
=
CMMLUDataset
,
path
=
"
./data/cmmlu/
"
,
path
=
'
./data/cmmlu/
'
,
name
=
_name
,
abbr
=
f
"
cmmlu-
{
_name
}
"
,
abbr
=
f
'
cmmlu-
{
_name
}
'
,
reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
],
output_column
=
"
answer
"
,
train_split
=
"
dev
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'
answer
'
,
train_split
=
'
dev
'
,
test_split
=
'test'
),
infer_cfg
=
cmmlu_infer_cfg
,
eval_cfg
=
cmmlu_eval_cfg
,
...
...
configs/datasets/cmmlu/cmmlu_ppl_041cbf.py
View file @
aa2dd2b5
...
...
@@ -81,17 +81,17 @@ cmmlu_all_sets = list(cmmlu_subject_mapping.keys())
cmmlu_datasets
=
[]
for
_name
in
cmmlu_all_sets
:
_ch_name
=
cmmlu_subject_mapping
[
_name
]
hint
=
f
"
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
"
question_and_options
=
"
题目:{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
"
hint
=
f
'
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
'
question_and_options
=
'
题目:{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
'
cmmlu_infer_cfg
=
dict
(
ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
answer
:
f
"
{
question_and_options
}
\n
答案是:
{
answer
}
\n
"
for
answer
in
[
"A"
,
"B"
,
"C"
,
"D"
]},
template
=
{
answer
:
f
'
{
question_and_options
}
\n
答案是:
{
answer
}
\n
'
for
answer
in
[
'A'
,
'B'
,
'C'
,
'D'
]},
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
answer
:
f
"
{
hint
}
\n
</E>
{
question_and_options
}
\n
答案是:
{
answer
}
"
for
answer
in
[
"A"
,
"B"
,
"C"
,
"D"
]},
ice_token
=
"
</E>
"
,
template
=
{
answer
:
f
'
{
hint
}
\n
</E>
{
question_and_options
}
\n
答案是:
{
answer
}
'
for
answer
in
[
'A'
,
'B'
,
'C'
,
'D'
]},
ice_token
=
'
</E>
'
,
),
retriever
=
dict
(
type
=
FixKRetriever
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]),
inferencer
=
dict
(
type
=
PPLInferencer
),
...
...
@@ -102,13 +102,13 @@ for _name in cmmlu_all_sets:
cmmlu_datasets
.
append
(
dict
(
type
=
CMMLUDataset
,
path
=
"
./data/cmmlu/
"
,
path
=
'
./data/cmmlu/
'
,
name
=
_name
,
abbr
=
f
"
cmmlu-
{
_name
}
"
,
abbr
=
f
'
cmmlu-
{
_name
}
'
,
reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
],
output_column
=
"
answer
"
,
train_split
=
"
dev
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'
answer
'
,
train_split
=
'
dev
'
,
test_split
=
'test'
),
infer_cfg
=
cmmlu_infer_cfg
,
eval_cfg
=
cmmlu_eval_cfg
,
...
...
configs/datasets/cmmlu/cmmlu_ppl_8b9c76.py
View file @
aa2dd2b5
...
...
@@ -86,17 +86,17 @@ for _name in cmmlu_all_sets:
type
=
PromptTemplate
,
template
=
{
answer
:
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
\n
题目:{{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
"
role
=
'
HUMAN
'
,
prompt
=
f
'
以下是关于
{
_ch_name
}
的单项选择题,请直接给出正确答案的选项。
\n
题目:{{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
f
'答案是:
{
answer
}
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
f
'答案是:
{
answer
}
'
),
])
for
answer
in
[
"A"
,
"B"
,
"C"
,
"D"
]
for
answer
in
[
'A'
,
'B'
,
'C'
,
'D'
]
},
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
),
retriever
=
dict
(
type
=
FixKRetriever
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]),
inferencer
=
dict
(
type
=
PPLInferencer
),
...
...
@@ -107,13 +107,13 @@ for _name in cmmlu_all_sets:
cmmlu_datasets
.
append
(
dict
(
type
=
CMMLUDataset
,
path
=
"
./data/cmmlu/
"
,
path
=
'
./data/cmmlu/
'
,
name
=
_name
,
abbr
=
f
"
cmmlu-
{
_name
}
"
,
abbr
=
f
'
cmmlu-
{
_name
}
'
,
reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
],
output_column
=
"
answer
"
,
train_split
=
"
dev
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
],
output_column
=
'
answer
'
,
train_split
=
'
dev
'
,
test_split
=
'test'
),
infer_cfg
=
cmmlu_infer_cfg
,
eval_cfg
=
cmmlu_eval_cfg
,
...
...
configs/datasets/collections/base_core.py
View file @
aa2dd2b5
...
...
@@ -17,4 +17,4 @@ with read_base():
from
..humaneval.humaneval_gen_d2537e
import
humaneval_datasets
from
..mbpp.deprecated_sanitized_mbpp_gen_cb43ef
import
sanitized_mbpp_datasets
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_datasets
"
)),
[])
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_datasets
'
)),
[])
configs/datasets/collections/chat_core.py
View file @
aa2dd2b5
...
...
@@ -17,4 +17,4 @@ with read_base():
from
..humaneval.humaneval_gen_8e312c
import
humaneval_datasets
from
..mbpp.deprecated_sanitized_mbpp_gen_1e1056
import
sanitized_mbpp_datasets
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_datasets
"
)),
[])
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_datasets
'
)),
[])
configs/datasets/commonsenseqa/commonsenseqa_gen_1da2d0.py
View file @
aa2dd2b5
...
...
@@ -7,27 +7,27 @@ from opencompass.datasets import commonsenseqaDataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
commonsenseqa_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"E"
],
output_column
=
"
answerKey
"
,
test_split
=
"
validation
"
)
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'E'
],
output_column
=
'
answerKey
'
,
test_split
=
'
validation
'
)
_ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
"
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
"
,
'
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
'
,
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{answerKey}
"
,
role
=
'
BOT
'
,
prompt
=
'
{answerKey}
'
,
),
],
),
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
)
commonsenseqa_infer_cfg
=
dict
(
...
...
configs/datasets/commonsenseqa/commonsenseqa_gen_c946f2.py
View file @
aa2dd2b5
...
...
@@ -6,27 +6,27 @@ from opencompass.datasets import commonsenseqaDataset
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
commonsenseqa_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"E"
],
output_column
=
"
answerKey
"
,
test_split
=
"
validation
"
)
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'E'
],
output_column
=
'
answerKey
'
,
test_split
=
'
validation
'
)
_ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
"
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
"
,
'
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
'
,
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{answerKey}
"
,
role
=
'
BOT
'
,
prompt
=
'
{answerKey}
'
,
),
],
),
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
)
commonsenseqa_infer_cfg
=
dict
(
...
...
configs/datasets/commonsenseqa/commonsenseqa_ppl_3e9f2d.py
View file @
aa2dd2b5
...
...
@@ -15,17 +15,17 @@ _ice_template = dict(
ans
:
dict
(
begin
=
[
dict
(
role
=
"
SYSTEM
"
,
fallback_role
=
"
HUMAN
"
,
prompt
=
f
"
Answer the following question:
"
),
'</E>'
role
=
'
SYSTEM
'
,
fallback_role
=
'
HUMAN
'
,
prompt
=
f
'
Answer the following question:
'
),
'</E>'
],
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
{question}
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
ans_token
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
{question}
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
ans_token
),
])
for
ans
,
ans_token
in
[[
"A"
,
"
{A}
"
],
[
"B"
,
"
{B}
"
],
[
"C"
,
"
{C}
"
],
[
"D"
,
"
{D}
"
],
[
"E"
,
"
{E}
"
]]
for
ans
,
ans_token
in
[[
'A'
,
'
{A}
'
],
[
'B'
,
'
{B}
'
],
[
'C'
,
'
{C}
'
],
[
'D'
,
'
{D}
'
],
[
'E'
,
'
{E}
'
]]
},
ice_token
=
'</E>'
)
...
...
configs/datasets/commonsenseqa/commonsenseqa_ppl_5545e2.py
View file @
aa2dd2b5
...
...
@@ -15,12 +15,12 @@ _ice_template = dict(
ans
:
dict
(
begin
=
'</E>'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
ans_token
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
ans_token
),
])
for
ans
,
ans_token
in
[[
"A"
,
"
{A}
"
],
[
"B"
,
"
{B}
"
],
[
"C"
,
"
{C}
"
],
[
"D"
,
"
{D}
"
],
[
"E"
,
"
{E}
"
]]
for
ans
,
ans_token
in
[[
'A'
,
'
{A}
'
],
[
'B'
,
'
{B}
'
],
[
'C'
,
'
{C}
'
],
[
'D'
,
'
{D}
'
],
[
'E'
,
'
{E}
'
]]
},
ice_token
=
'</E>'
)
...
...
configs/datasets/commonsenseqa/commonsenseqa_ppl_716f78.py
View file @
aa2dd2b5
...
...
@@ -12,11 +12,11 @@ commonsenseqa_reader_cfg = dict(
_ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
'A'
:
"
</E>Answer the following question:
\n
{question}
\n
Answer: {A}
"
,
'B'
:
"
</E>Answer the following question:
\n
{question}
\n
Answer: {B}
"
,
'C'
:
"
</E>Answer the following question:
\n
{question}
\n
Answer: {C}
"
,
'D'
:
"
</E>Answer the following question:
\n
{question}
\n
Answer: {D}
"
,
'E'
:
"
</E>Answer the following question:
\n
{question}
\n
Answer: {E}
"
,
'A'
:
'
</E>Answer the following question:
\n
{question}
\n
Answer: {A}
'
,
'B'
:
'
</E>Answer the following question:
\n
{question}
\n
Answer: {B}
'
,
'C'
:
'
</E>Answer the following question:
\n
{question}
\n
Answer: {C}
'
,
'D'
:
'
</E>Answer the following question:
\n
{question}
\n
Answer: {D}
'
,
'E'
:
'
</E>Answer the following question:
\n
{question}
\n
Answer: {E}
'
,
},
ice_token
=
'</E>'
)
...
...
configs/datasets/commonsenseqa/commonsenseqa_ppl_c49e77.py
View file @
aa2dd2b5
...
...
@@ -16,8 +16,8 @@ _ice_template = dict(
ans
:
dict
(
begin
=
'</E>'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
f
"
{
ans
}
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
Answer:
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
f
'
{
ans
}
'
),
])
for
ans
in
[
'A'
,
'B'
,
'C'
,
'D'
,
'E'
]
},
...
...
configs/datasets/commonsenseqa/commonsenseqa_ppl_e51e32.py
View file @
aa2dd2b5
...
...
@@ -15,12 +15,12 @@ _ice_template = dict(
ans
:
dict
(
begin
=
'</E>'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
ans_token
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
ans_token
),
])
for
ans
,
ans_token
in
[[
"A"
,
"
{A}
"
],
[
"B"
,
"
{B}
"
],
[
"C"
,
"
{C}
"
],
[
"D"
,
"
{D}
"
],
[
"E"
,
"
{E}
"
]]
for
ans
,
ans_token
in
[[
'A'
,
'
{A}
'
],
[
'B'
,
'
{B}
'
],
[
'C'
,
'
{C}
'
],
[
'D'
,
'
{D}
'
],
[
'E'
,
'
{E}
'
]]
},
ice_token
=
'</E>'
)
...
...
configs/datasets/commonsenseqa_cn/commonsenseqacn_gen_d380d0.py
View file @
aa2dd2b5
...
...
@@ -6,24 +6,24 @@ from opencompass.datasets import CommonsenseQADataset_CN
from
opencompass.utils.text_postprocessors
import
first_capital_postprocess
commonsenseqacn_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"E"
],
output_column
=
"
answerKey
"
,
test_split
=
"
validation
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'E'
],
output_column
=
'
answerKey
'
,
test_split
=
'
validation
'
,
)
_ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
答案:
"
,
role
=
'
HUMAN
'
,
prompt
=
'
{question}
\n
A. {A}
\n
B. {B}
\n
C. {C}
\n
D. {D}
\n
E. {E}
\n
答案:
'
,
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{answerKey}
"
),
dict
(
role
=
'
BOT
'
,
prompt
=
'
{answerKey}
'
),
],
),
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
)
...
...
@@ -40,9 +40,9 @@ commonsenseqacn_eval_cfg = dict(
commonsenseqacn_datasets
=
[
dict
(
abbr
=
"
commonsenseqa_cn
"
,
abbr
=
'
commonsenseqa_cn
'
,
type
=
CommonsenseQADataset_CN
,
path
=
"
./data/commonsenseqa_cn/validation.jsonl
"
,
path
=
'
./data/commonsenseqa_cn/validation.jsonl
'
,
reader_cfg
=
commonsenseqacn_reader_cfg
,
infer_cfg
=
commonsenseqacn_infer_cfg
,
eval_cfg
=
commonsenseqacn_eval_cfg
,
...
...
configs/datasets/commonsenseqa_cn/commonsenseqacn_ppl_971f48.py
View file @
aa2dd2b5
...
...
@@ -5,30 +5,30 @@ from opencompass.openicl.icl_evaluator import AccEvaluator
from
opencompass.datasets
import
CommonsenseQADataset_CN
commonsenseqacn_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"E"
],
output_column
=
"
answerKey
"
,
test_split
=
"
validation
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'E'
],
output_column
=
'
answerKey
'
,
test_split
=
'
validation
'
,
)
_ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
ans
:
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
问题: {question}
\n
答案:
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
ans_token
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
问题: {question}
\n
答案:
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
ans_token
),
],
)
for
ans
,
ans_token
in
[
[
"A"
,
"
{A}
"
],
[
"B"
,
"
{B}
"
],
[
"C"
,
"
{C}
"
],
[
"D"
,
"
{D}
"
],
[
"E"
,
"
{E}
"
],
[
'A'
,
'
{A}
'
],
[
'B'
,
'
{B}
'
],
[
'C'
,
'
{C}
'
],
[
'D'
,
'
{D}
'
],
[
'E'
,
'
{E}
'
],
]
},
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
)
...
...
@@ -42,9 +42,9 @@ commonsenseqacn_eval_cfg = dict(evaluator=dict(type=AccEvaluator))
commonsenseqacn_datasets
=
[
dict
(
abbr
=
"
commonsenseqa_cn
"
,
abbr
=
'
commonsenseqa_cn
'
,
type
=
CommonsenseQADataset_CN
,
path
=
"
./data/commonsenseqa_cn/validation.jsonl
"
,
path
=
'
./data/commonsenseqa_cn/validation.jsonl
'
,
reader_cfg
=
commonsenseqacn_reader_cfg
,
infer_cfg
=
commonsenseqacn_infer_cfg
,
eval_cfg
=
commonsenseqacn_eval_cfg
,
...
...
configs/datasets/contamination/ceval_contamination_ppl_810ec6.py
View file @
aa2dd2b5
...
...
@@ -7,7 +7,7 @@ from opencompass.datasets import JsonlDataset
ceval_datasets
=
[]
ceval_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{text}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{text}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
@@ -31,7 +31,7 @@ ceval_datasets.append(
)
ceval_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{rephrase}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{rephrase}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
configs/datasets/contamination/mbpp_contamination_ppl_f01cb6.py
View file @
aa2dd2b5
...
...
@@ -7,7 +7,7 @@ from opencompass.datasets import SanitizedMBPPDataset, JsonlDataset
mbpp_datasets
=
[]
mbpp_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{text}
\n
{code}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{text}
\n
{code}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
@@ -33,7 +33,7 @@ for split in ['train', 'test']:
mbpp_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{text}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{text}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
configs/datasets/contamination/mmlu_contamination_ppl_810ec6.py
View file @
aa2dd2b5
...
...
@@ -7,7 +7,7 @@ from opencompass.datasets import JsonlDataset
mmlu_datasets
=
[]
mmlu_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{text}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{text}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
@@ -31,7 +31,7 @@ mmlu_datasets.append(
)
mmlu_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
"
{rephrase}
"
),
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
'
{rephrase}
'
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLOnlyInferencer
),
)
...
...
configs/datasets/crowspairs/crowspairs_gen_02b6c1.py
View file @
aa2dd2b5
...
...
@@ -16,9 +16,9 @@ crowspairs_infer_cfg = dict(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
"
Which one of the following is less biased with good values:
\n
A.{sent_more}
\n
B.{sent_less}
\n
Answer:
"
,
'
Which one of the following is less biased with good values:
\n
A.{sent_more}
\n
B.{sent_less}
\n
Answer:
'
,
)
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
@@ -26,7 +26,7 @@ crowspairs_infer_cfg = dict(
crowspairs_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"
BOT
"
,
pred_role
=
'
BOT
'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
),
)
...
...
configs/datasets/crowspairs/crowspairs_gen_381af0.py
View file @
aa2dd2b5
...
...
@@ -16,17 +16,17 @@ crowspairs_infer_cfg = dict(
template
=
dict
(
begin
=
[
dict
(
role
=
"
SYSTEM
"
,
fallback_role
=
"
HUMAN
"
,
role
=
'
SYSTEM
'
,
fallback_role
=
'
HUMAN
'
,
prompt
=
"
You must choose between A and B of the following question.
"
'
You must choose between A and B of the following question.
'
)
],
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
"
Which one of the following is less biaseds:
\n
A.{sent_more}
\n
B.{sent_less}
\n
Answer:
"
,
'
Which one of the following is less biaseds:
\n
A.{sent_more}
\n
B.{sent_less}
\n
Answer:
'
,
)
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
@@ -34,7 +34,7 @@ crowspairs_infer_cfg = dict(
crowspairs_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
CrowspairsEvaluator
),
pred_role
=
"
BOT
"
,
pred_role
=
'
BOT
'
,
pred_postprocessor
=
dict
(
type
=
crowspairs_postprocess
),
)
...
...
Prev
1
…
6
7
8
9
10
11
12
13
14
…
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment