Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
81 additions
and
83 deletions
+81
-83
.pre-commit-config-zh-cn.yaml
.pre-commit-config-zh-cn.yaml
+2
-3
.pre-commit-config.yaml
.pre-commit-config.yaml
+2
-3
configs/api_examples/eval_api_360.py
configs/api_examples/eval_api_360.py
+2
-2
configs/api_examples/eval_api_baichuan.py
configs/api_examples/eval_api_baichuan.py
+3
-3
configs/api_examples/eval_api_baidu.py
configs/api_examples/eval_api_baidu.py
+1
-1
configs/api_examples/eval_api_bytedance.py
configs/api_examples/eval_api_bytedance.py
+3
-3
configs/api_examples/eval_api_minimax.py
configs/api_examples/eval_api_minimax.py
+1
-1
configs/api_examples/eval_api_moonshot.py
configs/api_examples/eval_api_moonshot.py
+1
-1
configs/api_examples/eval_api_nanbeige.py
configs/api_examples/eval_api_nanbeige.py
+2
-2
configs/api_examples/eval_api_pangu.py
configs/api_examples/eval_api_pangu.py
+6
-6
configs/api_examples/eval_api_qwen.py
configs/api_examples/eval_api_qwen.py
+1
-1
configs/api_examples/eval_api_sensetime.py
configs/api_examples/eval_api_sensetime.py
+12
-12
configs/api_examples/eval_api_xunfei.py
configs/api_examples/eval_api_xunfei.py
+7
-7
configs/api_examples/eval_api_zhipu.py
configs/api_examples/eval_api_zhipu.py
+2
-2
configs/api_examples/eval_api_zhipu_v2.py
configs/api_examples/eval_api_zhipu_v2.py
+1
-1
configs/dataset_collections/chat_OC15.py
configs/dataset_collections/chat_OC15.py
+1
-1
configs/datasets/ARC_c/ARC_c_clean_ppl.py
configs/datasets/ARC_c/ARC_c_clean_ppl.py
+12
-12
configs/datasets/ARC_c/ARC_c_gen_1e0de5.py
configs/datasets/ARC_c/ARC_c_gen_1e0de5.py
+7
-7
configs/datasets/ARC_c/ARC_c_ppl_2ef631.py
configs/datasets/ARC_c/ARC_c_ppl_2ef631.py
+3
-3
configs/datasets/ARC_c/ARC_c_ppl_a450bd.py
configs/datasets/ARC_c/ARC_c_ppl_a450bd.py
+12
-12
No files found.
.pre-commit-config-zh-cn.yaml
View file @
aa2dd2b5
...
@@ -45,7 +45,7 @@ repos:
...
@@ -45,7 +45,7 @@ repos:
(?x)^(
(?x)^(
dicts/|
dicts/|
projects/.*?/dicts/|
projects/.*?/dicts/|
configs/
configs/
.*?/.*\.txt
)
)
-
id
:
check-yaml
-
id
:
check-yaml
-
id
:
end-of-file-fixer
-
id
:
end-of-file-fixer
...
@@ -53,11 +53,10 @@ repos:
...
@@ -53,11 +53,10 @@ repos:
(?x)^(
(?x)^(
dicts/|
dicts/|
projects/.*?/dicts/|
projects/.*?/dicts/|
configs/
configs/
.*?/.*\.txt
)
)
-
id
:
requirements-txt-fixer
-
id
:
requirements-txt-fixer
-
id
:
double-quote-string-fixer
-
id
:
double-quote-string-fixer
exclude
:
configs/
-
id
:
check-merge-conflict
-
id
:
check-merge-conflict
-
id
:
fix-encoding-pragma
-
id
:
fix-encoding-pragma
args
:
[
"
--remove"
]
args
:
[
"
--remove"
]
...
...
.pre-commit-config.yaml
View file @
aa2dd2b5
...
@@ -45,7 +45,7 @@ repos:
...
@@ -45,7 +45,7 @@ repos:
(?x)^(
(?x)^(
dicts/|
dicts/|
projects/.*?/dicts/|
projects/.*?/dicts/|
configs/
configs/
.*?/.*\.txt
)
)
-
id
:
check-yaml
-
id
:
check-yaml
-
id
:
end-of-file-fixer
-
id
:
end-of-file-fixer
...
@@ -53,11 +53,10 @@ repos:
...
@@ -53,11 +53,10 @@ repos:
(?x)^(
(?x)^(
dicts/|
dicts/|
projects/.*?/dicts/|
projects/.*?/dicts/|
configs/
configs/
.*?/.*\.txt
)
)
-
id
:
requirements-txt-fixer
-
id
:
requirements-txt-fixer
-
id
:
double-quote-string-fixer
-
id
:
double-quote-string-fixer
exclude
:
configs/
-
id
:
check-merge-conflict
-
id
:
check-merge-conflict
-
id
:
fix-encoding-pragma
-
id
:
fix-encoding-pragma
args
:
[
"
--remove"
]
args
:
[
"
--remove"
]
...
...
configs/api_examples/eval_api_360.py
View file @
aa2dd2b5
...
@@ -17,7 +17,7 @@ models = [
...
@@ -17,7 +17,7 @@ models = [
abbr
=
'360GPT_S2_V9'
,
abbr
=
'360GPT_S2_V9'
,
type
=
AI360GPT
,
type
=
AI360GPT
,
path
=
'360GPT_S2_V9'
,
path
=
'360GPT_S2_V9'
,
key
=
"
xxxxxxxxxxxx
"
,
key
=
'
xxxxxxxxxxxx
'
,
generation_kwargs
=
{
generation_kwargs
=
{
'temperature'
:
0.9
,
'temperature'
:
0.9
,
'max_tokens'
:
2048
,
'max_tokens'
:
2048
,
...
@@ -40,4 +40,4 @@ infer = dict(
...
@@ -40,4 +40,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"./output/api_360GPT_S2_V9"
work_dir
=
'./output/api_360GPT_S2_V9'
\ No newline at end of file
configs/api_examples/eval_api_baichuan.py
View file @
aa2dd2b5
...
@@ -18,8 +18,8 @@ models = [
...
@@ -18,8 +18,8 @@ models = [
type
=
BaiChuan
,
type
=
BaiChuan
,
path
=
'Baichuan2-53B'
,
path
=
'Baichuan2-53B'
,
api_key
=
'xxxxxx'
,
api_key
=
'xxxxxx'
,
secret_key
=
"
xxxxx
"
,
secret_key
=
'
xxxxx
'
,
url
=
"
xxxxx
"
,
url
=
'
xxxxx
'
,
generation_kwargs
=
{
generation_kwargs
=
{
'temperature'
:
0.3
,
'temperature'
:
0.3
,
'top_p'
:
0.85
,
'top_p'
:
0.85
,
...
@@ -41,4 +41,4 @@ infer = dict(
...
@@ -41,4 +41,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_baichuan53b/"
work_dir
=
'outputs/api_baichuan53b/'
\ No newline at end of file
configs/api_examples/eval_api_baidu.py
View file @
aa2dd2b5
...
@@ -39,4 +39,4 @@ infer = dict(
...
@@ -39,4 +39,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_erniebot/"
work_dir
=
'outputs/api_erniebot/'
\ No newline at end of file
configs/api_examples/eval_api_bytedance.py
View file @
aa2dd2b5
...
@@ -18,8 +18,8 @@ models = [
...
@@ -18,8 +18,8 @@ models = [
abbr
=
'skylark-pro-public'
,
abbr
=
'skylark-pro-public'
,
type
=
ByteDance
,
type
=
ByteDance
,
path
=
'skylark-pro-public'
,
path
=
'skylark-pro-public'
,
accesskey
=
"
xxxxxxx
"
,
accesskey
=
'
xxxxxxx
'
,
secretkey
=
"
xxxxxxx
"
,
secretkey
=
'
xxxxxxx
'
,
url
=
'xxxxxx'
,
url
=
'xxxxxx'
,
generation_kwargs
=
{
generation_kwargs
=
{
'temperature'
:
0.7
,
'temperature'
:
0.7
,
...
@@ -41,4 +41,4 @@ infer = dict(
...
@@ -41,4 +41,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_bytedance/"
work_dir
=
'outputs/api_bytedance/'
\ No newline at end of file
configs/api_examples/eval_api_minimax.py
View file @
aa2dd2b5
...
@@ -34,4 +34,4 @@ infer = dict(
...
@@ -34,4 +34,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_minimax/"
work_dir
=
'outputs/api_minimax/'
\ No newline at end of file
configs/api_examples/eval_api_moonshot.py
View file @
aa2dd2b5
...
@@ -37,4 +37,4 @@ infer = dict(
...
@@ -37,4 +37,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_moonshot/"
work_dir
=
'outputs/api_moonshot/'
\ No newline at end of file
configs/api_examples/eval_api_nanbeige.py
View file @
aa2dd2b5
...
@@ -18,7 +18,7 @@ models = [
...
@@ -18,7 +18,7 @@ models = [
abbr
=
'nanbeige-plus'
,
abbr
=
'nanbeige-plus'
,
type
=
Nanbeige
,
type
=
Nanbeige
,
path
=
'nanbeige-plus'
,
path
=
'nanbeige-plus'
,
key
=
"
xxxxxx
"
,
key
=
'
xxxxxx
'
,
query_per_second
=
1
,
query_per_second
=
1
,
max_out_len
=
2048
,
max_out_len
=
2048
,
batch_size
=
8
),
batch_size
=
8
),
...
@@ -33,4 +33,4 @@ infer = dict(
...
@@ -33,4 +33,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"./output/nanbeige-plus"
work_dir
=
'./output/nanbeige-plus'
\ No newline at end of file
configs/api_examples/eval_api_pangu.py
View file @
aa2dd2b5
...
@@ -17,13 +17,13 @@ dict(
...
@@ -17,13 +17,13 @@ dict(
abbr
=
'pangu'
,
abbr
=
'pangu'
,
type
=
PanGu
,
type
=
PanGu
,
path
=
'pangu'
,
path
=
'pangu'
,
access_key
=
"
xxxxxx
"
,
access_key
=
'
xxxxxx
'
,
secret_key
=
"
xxxxxx
"
,
secret_key
=
'
xxxxxx
'
,
url
=
"
xxxxxx
"
,
url
=
'
xxxxxx
'
,
# url of token sever, used for generate token, like "https://xxxxxx.myhuaweicloud.com/v3/auth/tokens",
# url of token sever, used for generate token, like "https://xxxxxx.myhuaweicloud.com/v3/auth/tokens",
token_url
=
"
xxxxxx
"
,
token_url
=
'
xxxxxx
'
,
# scope-project-name, used for generate token
# scope-project-name, used for generate token
project_name
=
"
xxxxxx
"
,
project_name
=
'
xxxxxx
'
,
query_per_second
=
1
,
query_per_second
=
1
,
max_out_len
=
2048
,
max_out_len
=
2048
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
@@ -39,4 +39,4 @@ infer = dict(
...
@@ -39,4 +39,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_pangu/"
work_dir
=
'outputs/api_pangu/'
\ No newline at end of file
configs/api_examples/eval_api_qwen.py
View file @
aa2dd2b5
...
@@ -37,4 +37,4 @@ infer = dict(
...
@@ -37,4 +37,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"
outputs/api_qwen/
"
work_dir
=
'
outputs/api_qwen/
'
configs/api_examples/eval_api_sensetime.py
View file @
aa2dd2b5
...
@@ -24,17 +24,17 @@ models = [
...
@@ -24,17 +24,17 @@ models = [
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
parameters
=
{
parameters
=
{
"
temperature
"
:
0.8
,
'
temperature
'
:
0.8
,
"
top_p
"
:
0.7
,
'
top_p
'
:
0.7
,
"
max_new_tokens
"
:
1024
,
'
max_new_tokens
'
:
1024
,
"
repetition_penalty
"
:
1.05
,
'
repetition_penalty
'
:
1.05
,
"
know_ids
"
:
[],
'
know_ids
'
:
[],
"
stream
"
:
True
,
'
stream
'
:
True
,
"
user
"
:
"
#*#***TestUser***#*#
"
,
'
user
'
:
'
#*#***TestUser***#*#
'
,
"
knowledge_config
"
:
{
'
knowledge_config
'
:
{
"
control_level
"
:
"
normal
"
,
'
control_level
'
:
'
normal
'
,
"
knowledge_base_result
"
:
False
,
'
knowledge_base_result
'
:
False
,
"
online_search_result
"
:
False
'
online_search_result
'
:
False
}
}
}
}
)
)
...
@@ -49,4 +49,4 @@ infer = dict(
...
@@ -49,4 +49,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_sensetime/"
work_dir
=
'outputs/api_sensetime/'
\ No newline at end of file
configs/api_examples/eval_api_xunfei.py
View file @
aa2dd2b5
...
@@ -17,10 +17,10 @@ models = [
...
@@ -17,10 +17,10 @@ models = [
dict
(
dict
(
abbr
=
'Spark-v1-1'
,
abbr
=
'Spark-v1-1'
,
type
=
XunFei
,
type
=
XunFei
,
appid
=
"
xxxx
"
,
appid
=
'
xxxx
'
,
path
=
'ws://spark-api.xf-yun.com/v1.1/chat'
,
path
=
'ws://spark-api.xf-yun.com/v1.1/chat'
,
api_secret
=
"
xxxxxxx
"
,
api_secret
=
'
xxxxxxx
'
,
api_key
=
"
xxxxxxx
"
,
api_key
=
'
xxxxxxx
'
,
query_per_second
=
1
,
query_per_second
=
1
,
max_out_len
=
2048
,
max_out_len
=
2048
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
@@ -28,11 +28,11 @@ models = [
...
@@ -28,11 +28,11 @@ models = [
dict
(
dict
(
abbr
=
'Spark-v3-1'
,
abbr
=
'Spark-v3-1'
,
type
=
XunFei
,
type
=
XunFei
,
appid
=
"
xxxx
"
,
appid
=
'
xxxx
'
,
domain
=
'generalv3'
,
domain
=
'generalv3'
,
path
=
'ws://spark-api.xf-yun.com/v3.1/chat'
,
path
=
'ws://spark-api.xf-yun.com/v3.1/chat'
,
api_secret
=
"
xxxxxxxx
"
,
api_secret
=
'
xxxxxxxx
'
,
api_key
=
"
xxxxxxxxx
"
,
api_key
=
'
xxxxxxxxx
'
,
query_per_second
=
1
,
query_per_second
=
1
,
max_out_len
=
2048
,
max_out_len
=
2048
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
@@ -48,4 +48,4 @@ infer = dict(
...
@@ -48,4 +48,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_xunfei/"
work_dir
=
'outputs/api_xunfei/'
\ No newline at end of file
configs/api_examples/eval_api_zhipu.py
View file @
aa2dd2b5
...
@@ -29,7 +29,7 @@ models = [
...
@@ -29,7 +29,7 @@ models = [
abbr
=
'chatglm_pro'
,
abbr
=
'chatglm_pro'
,
type
=
ZhiPuAI
,
type
=
ZhiPuAI
,
path
=
'chatglm_pro'
,
path
=
'chatglm_pro'
,
key
=
'xxxxxxxxxxxx'
,
key
=
'xxxxxxxxxxxx'
,
query_per_second
=
1
,
query_per_second
=
1
,
max_out_len
=
2048
,
max_out_len
=
2048
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
@@ -45,4 +45,4 @@ infer = dict(
...
@@ -45,4 +45,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_zhipu/"
work_dir
=
'outputs/api_zhipu/'
\ No newline at end of file
configs/api_examples/eval_api_zhipu_v2.py
View file @
aa2dd2b5
...
@@ -64,4 +64,4 @@ infer = dict(
...
@@ -64,4 +64,4 @@ infer = dict(
task
=
dict
(
type
=
OpenICLInferTask
)),
task
=
dict
(
type
=
OpenICLInferTask
)),
)
)
work_dir
=
"outputs/api_zhipu_v2/"
work_dir
=
'outputs/api_zhipu_v2/'
\ No newline at end of file
configs/dataset_collections/chat_OC15.py
View file @
aa2dd2b5
...
@@ -19,4 +19,4 @@ with read_base():
...
@@ -19,4 +19,4 @@ with read_base():
from
..datasets.gpqa.gpqa_gen_4baadb
import
gpqa_datasets
from
..datasets.gpqa.gpqa_gen_4baadb
import
gpqa_datasets
from
..datasets.IFEval.IFEval_gen_3321a3
import
ifeval_datasets
from
..datasets.IFEval.IFEval_gen_3321a3
import
ifeval_datasets
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_datasets
"
)),
[])
datasets
=
sum
((
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_datasets
'
)),
[])
configs/datasets/ARC_c/ARC_c_clean_ppl.py
View file @
aa2dd2b5
...
@@ -12,29 +12,29 @@ ARC_c_infer_cfg = dict(
...
@@ -12,29 +12,29 @@ ARC_c_infer_cfg = dict(
prompt_template
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
type
=
PromptTemplate
,
template
=
{
template
=
{
"A"
:
'A'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textA}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textA}
'
)
],
),
],
),
"B"
:
'B'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textB}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textB}
'
)
],
),
],
),
"C"
:
'C'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textC}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textC}
'
)
],
),
],
),
"D"
:
'D'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textD}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textD}
'
)
],
),
],
),
}),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
configs/datasets/ARC_c/ARC_c_gen_1e0de5.py
View file @
aa2dd2b5
...
@@ -6,8 +6,8 @@ from opencompass.datasets import ARCDataset
...
@@ -6,8 +6,8 @@ from opencompass.datasets import ARCDataset
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
ARC_c_reader_cfg
=
dict
(
ARC_c_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"
textA
"
,
"
textB
"
,
"
textC
"
,
"
textD
"
],
input_columns
=
[
'
question
'
,
'
textA
'
,
'
textB
'
,
'
textC
'
,
'
textD
'
],
output_column
=
"
answerKey
"
)
output_column
=
'
answerKey
'
)
ARC_c_infer_cfg
=
dict
(
ARC_c_infer_cfg
=
dict
(
prompt_template
=
dict
(
prompt_template
=
dict
(
...
@@ -15,9 +15,9 @@ ARC_c_infer_cfg = dict(
...
@@ -15,9 +15,9 @@ ARC_c_infer_cfg = dict(
template
=
dict
(
template
=
dict
(
round
=
[
round
=
[
dict
(
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
prompt
=
"
Question: {question}
\n
A. {textA}
\n
B. {textB}
\n
C. {textC}
\n
D. {textD}
\n
Answer:
"
'
Question: {question}
\n
A. {textA}
\n
B. {textB}
\n
C. {textC}
\n
D. {textD}
\n
Answer:
'
)
)
],
),
],
),
),
),
...
@@ -27,15 +27,15 @@ ARC_c_infer_cfg = dict(
...
@@ -27,15 +27,15 @@ ARC_c_infer_cfg = dict(
ARC_c_eval_cfg
=
dict
(
ARC_c_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"
BOT
"
,
pred_role
=
'
BOT
'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'ABCD'
),
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'ABCD'
),
)
)
ARC_c_datasets
=
[
ARC_c_datasets
=
[
dict
(
dict
(
abbr
=
"
ARC-c
"
,
abbr
=
'
ARC-c
'
,
type
=
ARCDataset
,
type
=
ARCDataset
,
path
=
"
./data/ARC/ARC-c/ARC-Challenge-Dev.jsonl
"
,
path
=
'
./data/ARC/ARC-c/ARC-Challenge-Dev.jsonl
'
,
reader_cfg
=
ARC_c_reader_cfg
,
reader_cfg
=
ARC_c_reader_cfg
,
infer_cfg
=
ARC_c_infer_cfg
,
infer_cfg
=
ARC_c_infer_cfg
,
eval_cfg
=
ARC_c_eval_cfg
,
eval_cfg
=
ARC_c_eval_cfg
,
...
...
configs/datasets/ARC_c/ARC_c_ppl_2ef631.py
View file @
aa2dd2b5
...
@@ -14,10 +14,10 @@ ARC_c_infer_cfg = dict(
...
@@ -14,10 +14,10 @@ ARC_c_infer_cfg = dict(
template
=
{
template
=
{
opt
:
dict
(
opt
:
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
{{question}}
\n
A. {{textA}}
\n
B. {{textB}}
\n
C. {{textC}}
\n
D. {{textD}}
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
f
'
{{question}}
\n
A. {{textA}}
\n
B. {{textB}}
\n
C. {{textC}}
\n
D. {{textD}}
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
f
"
Answer:
{
opt
}
"
),
dict
(
role
=
'
BOT
'
,
prompt
=
f
'
Answer:
{
opt
}
'
),
]
]
)
for
opt
in
[
"A"
,
"B"
,
"C"
,
"D"
]
)
for
opt
in
[
'A'
,
'B'
,
'C'
,
'D'
]
},
},
),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
configs/datasets/ARC_c/ARC_c_ppl_a450bd.py
View file @
aa2dd2b5
...
@@ -12,29 +12,29 @@ ARC_c_infer_cfg = dict(
...
@@ -12,29 +12,29 @@ ARC_c_infer_cfg = dict(
prompt_template
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
type
=
PromptTemplate
,
template
=
{
template
=
{
"A"
:
'A'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textA}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textA}
'
)
],
),
],
),
"B"
:
'B'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textB}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textB}
'
)
],
),
],
),
"C"
:
'C'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textC}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textC}
'
)
],
),
],
),
"D"
:
'D'
:
dict
(
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
Question: {question}
\n
Answer:
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
Question: {question}
\n
Answer:
'
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{textD}
"
)
dict
(
role
=
'
BOT
'
,
prompt
=
'
{textD}
'
)
],
),
],
),
}),
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
Prev
1
2
3
4
5
…
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment