Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
75 additions
and
75 deletions
+75
-75
configs/datasets/winogrande/winogrande_gen_a027b6.py
configs/datasets/winogrande/winogrande_gen_a027b6.py
+9
-9
configs/datasets/winogrande/winogrande_ll_c5cf57.py
configs/datasets/winogrande/winogrande_ll_c5cf57.py
+2
-2
configs/datasets/winogrande/winogrande_ppl_55a66e.py
configs/datasets/winogrande/winogrande_ppl_55a66e.py
+1
-1
configs/datasets/winogrande/winogrande_ppl_9307fd.py
configs/datasets/winogrande/winogrande_ppl_9307fd.py
+2
-2
configs/datasets/xiezhi/xiezhi_gen_b86cf5.py
configs/datasets/xiezhi/xiezhi_gen_b86cf5.py
+14
-14
configs/datasets/xiezhi/xiezhi_ppl_ea6bd7.py
configs/datasets/xiezhi/xiezhi_ppl_ea6bd7.py
+14
-14
configs/datasets/z_bench/z_bench_gen_d8c84c.py
configs/datasets/z_bench/z_bench_gen_d8c84c.py
+1
-1
configs/eval_TheoremQA.py
configs/eval_TheoremQA.py
+1
-1
configs/eval_bluelm_32k_lveval.py
configs/eval_bluelm_32k_lveval.py
+7
-7
configs/eval_chat_agent.py
configs/eval_chat_agent.py
+4
-4
configs/eval_chat_agent_baseline.py
configs/eval_chat_agent_baseline.py
+1
-1
configs/eval_chat_cibench.py
configs/eval_chat_cibench.py
+1
-1
configs/eval_chat_cibench_api.py
configs/eval_chat_cibench_api.py
+1
-1
configs/eval_cibench.py
configs/eval_cibench.py
+2
-2
configs/eval_circular.py
configs/eval_circular.py
+3
-3
configs/eval_code_passk.py
configs/eval_code_passk.py
+1
-1
configs/eval_code_passk_repeat_dataset.py
configs/eval_code_passk_repeat_dataset.py
+8
-8
configs/eval_codeagent.py
configs/eval_codeagent.py
+1
-1
configs/eval_ds1000_interpreter.py
configs/eval_ds1000_interpreter.py
+1
-1
configs/eval_hf_llama2.py
configs/eval_hf_llama2.py
+1
-1
No files found.
configs/datasets/winogrande/winogrande_gen_a027b6.py
View file @
aa2dd2b5
...
...
@@ -6,20 +6,20 @@ from opencompass.datasets import winograndeDataset_V2
from
opencompass.utils.text_postprocessors
import
first_option_postprocess
winogrande_reader_cfg
=
dict
(
input_columns
=
[
"
opt1
"
,
"
opt2
"
],
output_column
=
"
answer
"
,
input_columns
=
[
'
opt1
'
,
'
opt2
'
],
output_column
=
'
answer
'
,
)
winogrande_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"
BOT
"
,
pred_role
=
'
BOT
'
,
pred_postprocessor
=
dict
(
type
=
first_option_postprocess
,
options
=
'AB'
),
)
_winogrande_prompt
=
dict
(
prompt_1
=
"
Which of the following is a good sentence:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
"
,
prompt_2
=
"
Which is a good sentence out of the following:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
"
,
prompt_3
=
"
Can you identify a good sentence from the following:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
"
,
prompt_1
=
'
Which of the following is a good sentence:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
'
,
prompt_2
=
'
Which is a good sentence out of the following:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
'
,
prompt_3
=
'
Can you identify a good sentence from the following:
\n
A. {opt1}
\n
B. {opt2}
\n
Answer:
'
,
)
winogrande_datasets
=
[]
...
...
@@ -28,14 +28,14 @@ for _choice in _winogrande_prompt:
dict
(
abbr
=
'winogrande_'
+
_choice
,
type
=
winograndeDataset_V2
,
path
=
"
./data/winogrande
"
,
path
=
'
./data/winogrande
'
,
reader_cfg
=
winogrande_reader_cfg
,
infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
role
=
'
HUMAN
'
,
prompt
=
_winogrande_prompt
[
_choice
]
),
]),
...
...
@@ -46,4 +46,4 @@ for _choice in _winogrande_prompt:
eval_cfg
=
winogrande_eval_cfg
),
)
del
_choice
\ No newline at end of file
del
_choice
configs/datasets/winogrande/winogrande_ll_c5cf57.py
View file @
aa2dd2b5
...
...
@@ -13,8 +13,8 @@ winogrande_infer_cfg = dict(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
1
:
"
{opt1}
"
,
2
:
"
{opt2}
"
,
1
:
'
{opt1}
'
,
2
:
'
{opt2}
'
,
}
),
retriever
=
dict
(
type
=
ZeroRetriever
),
...
...
configs/datasets/winogrande/winogrande_ppl_55a66e.py
View file @
aa2dd2b5
...
...
@@ -18,7 +18,7 @@ winogrande_infer_cfg = dict(
type
=
PromptTemplate
,
template
=
{
i
:
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
Good sentence: {{opt
{
i
}
}}
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
f
'
Good sentence: {{opt
{
i
}
}}
'
),
])
for
i
in
range
(
1
,
3
)
}),
...
...
configs/datasets/winogrande/winogrande_ppl_9307fd.py
View file @
aa2dd2b5
...
...
@@ -17,8 +17,8 @@ winogrande_infer_cfg = dict(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
{
1
:
"
Good sentence: {opt1}
"
,
2
:
"
Good sentence: {opt2}
"
,
1
:
'
Good sentence: {opt1}
'
,
2
:
'
Good sentence: {opt2}
'
,
}),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
PPLInferencer
))
...
...
configs/datasets/xiezhi/xiezhi_gen_b86cf5.py
View file @
aa2dd2b5
...
...
@@ -6,44 +6,44 @@ from opencompass.utils.text_postprocessors import first_capital_postprocess
xiezhi_datasets
=
[]
for
split
in
[
"
spec_eng
"
,
"
spec_chn
"
,
"
inter_eng
"
,
"
inter_chn
"
]:
for
split
in
[
'
spec_eng
'
,
'
spec_chn
'
,
'
inter_eng
'
,
'
inter_chn
'
]:
if
'chn'
in
split
:
q_hint
,
a_hint
=
"
题目
"
,
"
答案
"
q_hint
,
a_hint
=
'
题目
'
,
'
答案
'
else
:
q_hint
,
a_hint
=
"
Question
"
,
"
Answer
"
q_hint
,
a_hint
=
'
Question
'
,
'
Answer
'
xiezhi_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"
labels
"
],
output_column
=
"
answer
"
,
train_split
=
"
train
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'
labels
'
],
output_column
=
'
answer
'
,
train_split
=
'
train
'
,
test_split
=
'test'
,
)
xiezhi_infer_cfg
=
dict
(
ice_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
{
q_hint
}
: {{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
\n
{
a_hint
}
:
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
"
{answer}
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
f
'
{
q_hint
}
: {{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
\n
{
a_hint
}
:
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
'
{answer}
'
),
]
),
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
),
retriever
=
dict
(
type
=
XiezhiRetriever
,
ice_num
=
3
),
inferencer
=
dict
(
type
=
GenInferencer
),
)
xiezhi_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
pred_role
=
"
BOT
"
,
pred_role
=
'
BOT
'
,
pred_postprocessor
=
dict
(
type
=
first_capital_postprocess
))
xiezhi_datasets
.
append
(
dict
(
type
=
XiezhiDataset
,
abbr
=
f
"
xiezhi-
{
split
}
"
,
path
=
"
./data/xiezhi/
"
,
name
=
"
xiezhi_
"
+
split
,
abbr
=
f
'
xiezhi-
{
split
}
'
,
path
=
'
./data/xiezhi/
'
,
name
=
'
xiezhi_
'
+
split
,
reader_cfg
=
xiezhi_reader_cfg
,
infer_cfg
=
xiezhi_infer_cfg
,
eval_cfg
=
xiezhi_eval_cfg
,
...
...
configs/datasets/xiezhi/xiezhi_ppl_ea6bd7.py
View file @
aa2dd2b5
...
...
@@ -5,16 +5,16 @@ from opencompass.datasets import XiezhiDataset, XiezhiRetriever
xiezhi_datasets
=
[]
for
split
in
[
"
spec_eng
"
,
"
spec_chn
"
,
"
inter_eng
"
,
"
inter_chn
"
]:
for
split
in
[
'
spec_eng
'
,
'
spec_chn
'
,
'
inter_eng
'
,
'
inter_chn
'
]:
if
'chn'
in
split
:
q_hint
,
a_hint
=
"
题目
"
,
"
答案
"
q_hint
,
a_hint
=
'
题目
'
,
'
答案
'
else
:
q_hint
,
a_hint
=
"
Question
"
,
"
Answer
"
q_hint
,
a_hint
=
'
Question
'
,
'
Answer
'
xiezhi_reader_cfg
=
dict
(
input_columns
=
[
"
question
"
,
"A"
,
"B"
,
"C"
,
"D"
,
"
labels
"
],
output_column
=
"
answer
"
,
train_split
=
"
train
"
,
input_columns
=
[
'
question
'
,
'A'
,
'B'
,
'C'
,
'D'
,
'
labels
'
],
output_column
=
'
answer
'
,
train_split
=
'
train
'
,
test_split
=
'test'
,
)
xiezhi_infer_cfg
=
dict
(
...
...
@@ -22,14 +22,14 @@ for split in ["spec_eng", "spec_chn", "inter_eng", "inter_chn"]:
type
=
PromptTemplate
,
template
=
{
answer
:
dict
(
begin
=
"
</E>
"
,
begin
=
'
</E>
'
,
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
f
"
{
q_hint
}
: {{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
"
),
dict
(
role
=
"
BOT
"
,
prompt
=
f
"
{
a_hint
}
:
{
answer
}
"
),
dict
(
role
=
'
HUMAN
'
,
prompt
=
f
'
{
q_hint
}
: {{question}}
\n
A. {{A}}
\n
B. {{B}}
\n
C. {{C}}
\n
D. {{D}}
'
),
dict
(
role
=
'
BOT
'
,
prompt
=
f
'
{
a_hint
}
:
{
answer
}
'
),
])
for
answer
in
[
"A"
,
"B"
,
"C"
,
"D"
]
for
answer
in
[
'A'
,
'B'
,
'C'
,
'D'
]
},
ice_token
=
"
</E>
"
,
ice_token
=
'
</E>
'
,
),
retriever
=
dict
(
type
=
XiezhiRetriever
,
ice_num
=
3
),
inferencer
=
dict
(
type
=
PPLInferencer
),
...
...
@@ -40,9 +40,9 @@ for split in ["spec_eng", "spec_chn", "inter_eng", "inter_chn"]:
xiezhi_datasets
.
append
(
dict
(
type
=
XiezhiDataset
,
abbr
=
f
"
xiezhi-
{
split
}
"
,
path
=
"
./data/xiezhi/
"
,
name
=
"
xiezhi_
"
+
split
,
abbr
=
f
'
xiezhi-
{
split
}
'
,
path
=
'
./data/xiezhi/
'
,
name
=
'
xiezhi_
'
+
split
,
reader_cfg
=
xiezhi_reader_cfg
,
infer_cfg
=
xiezhi_infer_cfg
,
eval_cfg
=
xiezhi_eval_cfg
,
...
...
configs/datasets/z_bench/z_bench_gen_d8c84c.py
View file @
aa2dd2b5
...
...
@@ -12,7 +12,7 @@ z_bench_reader_cfg = dict(
z_bench_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
prompt
=
"
{text}
"
)]),
template
=
dict
(
round
=
[
dict
(
role
=
'
HUMAN
'
,
prompt
=
'
{text}
'
)]),
),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
))
...
...
configs/eval_TheoremQA.py
View file @
aa2dd2b5
...
...
@@ -8,7 +8,7 @@ with read_base():
from
.datasets.TheoremQA.TheoremQA_5shot_gen_6f0af8
import
TheoremQA_datasets
as
datasets
models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_model
"
)],
[])
models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_model
'
)],
[])
work_dir
=
'outputs/TheoremQA-5shot'
...
...
configs/eval_bluelm_32k_lveval.py
View file @
aa2dd2b5
...
...
@@ -6,11 +6,11 @@ with read_base():
from
.summarizers.lveval
import
summarizer
models
[
0
][
"
path
"
]
=
"
/path/to/your/huggingface_models/BlueLM-7B-Chat-32K
"
'
path
'
]
=
'
/path/to/your/huggingface_models/BlueLM-7B-Chat-32K
'
models
[
0
][
"
tokenizer_path
"
]
=
"
/path/to/your/huggingface_models/BlueLM-7B-Chat-32K
"
models
[
0
][
"
max_seq_len
"
]
=
32768
models
[
0
][
"
generation_kwargs
"
]
=
dict
(
do_sample
=
False
)
models
[
0
][
"
mode
"
]
=
"
mid
"
# truncate in the middle
'
tokenizer_path
'
]
=
'
/path/to/your/huggingface_models/BlueLM-7B-Chat-32K
'
models
[
0
][
'
max_seq_len
'
]
=
32768
models
[
0
][
'
generation_kwargs
'
]
=
dict
(
do_sample
=
False
)
models
[
0
][
'
mode
'
]
=
'
mid
'
# truncate in the middle
configs/eval_chat_agent.py
View file @
aa2dd2b5
...
...
@@ -28,9 +28,9 @@ def solution():
protocol
=
dict
(
type
=
ReActProtocol
,
action
=
dict
(
role
=
"
ACTION
"
,
begin
=
"
Tool:
"
,
end
=
"
\n
"
),
action_input
=
dict
(
role
=
"
ARGS
"
,
begin
=
"
Tool Input:
"
,
end
=
"
\n
"
),
finish
=
dict
(
role
=
"
FINISH
"
,
begin
=
"
FinalAnswer:
"
,
end
=
"
\n
"
),
action
=
dict
(
role
=
'
ACTION
'
,
begin
=
'
Tool:
'
,
end
=
'
\n
'
),
action_input
=
dict
(
role
=
'
ARGS
'
,
begin
=
'
Tool Input:
'
,
end
=
'
\n
'
),
finish
=
dict
(
role
=
'
FINISH
'
,
begin
=
'
FinalAnswer:
'
,
end
=
'
\n
'
),
call_protocol
=
system_prompt
,
)
...
...
@@ -61,4 +61,4 @@ infer = dict(
type
=
LocalRunner
,
max_num_workers
=
16
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
\ No newline at end of file
)
configs/eval_chat_agent_baseline.py
View file @
aa2dd2b5
...
...
@@ -34,4 +34,4 @@ infer = dict(
type
=
LocalRunner
,
max_num_workers
=
16
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
\ No newline at end of file
)
configs/eval_chat_cibench.py
View file @
aa2dd2b5
...
...
@@ -90,4 +90,4 @@ infer = dict(
type
=
LocalRunner
,
max_num_workers
=
16
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
\ No newline at end of file
)
configs/eval_chat_cibench_api.py
View file @
aa2dd2b5
...
...
@@ -93,4 +93,4 @@ infer = dict(
type
=
LocalRunner
,
max_num_workers
=
16
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
\ No newline at end of file
)
configs/eval_cibench.py
View file @
aa2dd2b5
...
...
@@ -10,7 +10,7 @@ from lagent.agents.react import ReActProtocol
with
read_base
():
from
.datasets.CIBench.CIBench_gen_eb42f9
import
cibench_datasets
as
datasets
FORCE_STOP_PROMPT_EN
=
"""You should directly give results based on history information."""
FEWSHOT_INSTRUCTION
=
"""
\
...
...
@@ -75,6 +75,6 @@ models = [
infer
=
dict
(
partitioner
=
dict
(
type
=
SizePartitioner
,
max_task_size
=
50
,
gen_task_coef
=
1
),
runner
=
dict
(
type
=
SlurmRunner
,
max_num_workers
=
8
,
retry
=
2
,
type
=
SlurmRunner
,
max_num_workers
=
8
,
retry
=
2
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
configs/eval_circular.py
View file @
aa2dd2b5
...
...
@@ -41,15 +41,15 @@ for ds, t in [
d
[
'circular_patterns'
]
=
'circular'
datasets
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_datasets
"
)
or
k
==
'datasets'
],
[])
models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_model
"
)],
[])
datasets
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_datasets
'
)
or
k
==
'datasets'
],
[])
models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_model
'
)],
[])
# config summarizer
other_summary_groups
=
[
{
'name'
:
'average'
,
'subsets'
:
[
'ceval'
,
'mmlu'
,
'cmmlu'
,
'hellaswag'
,
'ARC-e'
,
'ARC-c'
,
'commonsense_qa'
,
'openbookqa_fact'
,
'race-middle'
,
'race-high'
]},
]
origin_summary_groups
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_summary_groups
"
)],
[])
origin_summary_groups
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_summary_groups
'
)],
[])
new_summary_groups
=
[]
for
item
in
origin_summary_groups
:
new_summary_groups
.
append
(
...
...
configs/eval_code_passk.py
View file @
aa2dd2b5
...
...
@@ -21,7 +21,7 @@ models = [
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'CodeLlama-7b-Python'
,
path
=
"
codellama/CodeLlama-7b-Python-hf
"
,
path
=
'
codellama/CodeLlama-7b-Python-hf
'
,
tokenizer_path
=
'codellama/CodeLlama-7b-Python-hf'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
...
...
configs/eval_code_passk_repeat_dataset.py
View file @
aa2dd2b5
...
...
@@ -19,26 +19,26 @@ datasets += sanitized_mbpp_datasets
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
begin
=
"
<|User|>:
"
,
end
=
"
\n
"
),
dict
(
role
=
"
BOT
"
,
begin
=
"
<|Bot|>:
"
,
end
=
"
<eoa>
\n
"
,
generate
=
True
),
dict
(
role
=
'
HUMAN
'
,
begin
=
'
<|User|>:
'
,
end
=
'
\n
'
),
dict
(
role
=
'
BOT
'
,
begin
=
'
<|Bot|>:
'
,
end
=
'
<eoa>
\n
'
,
generate
=
True
),
],
)
models
=
[
dict
(
abbr
=
"
internlm-chat-7b-hf-v11
"
,
abbr
=
'
internlm-chat-7b-hf-v11
'
,
type
=
HuggingFaceCausalLM
,
path
=
"
internlm/internlm-chat-7b-v1_1
"
,
tokenizer_path
=
"
internlm/internlm-chat-7b-v1_1
"
,
path
=
'
internlm/internlm-chat-7b-v1_1
'
,
tokenizer_path
=
'
internlm/internlm-chat-7b-v1_1
'
,
tokenizer_kwargs
=
dict
(
padding_side
=
"
left
"
,
truncation_side
=
"
left
"
,
padding_side
=
'
left
'
,
truncation_side
=
'
left
'
,
use_fast
=
False
,
trust_remote_code
=
True
,
),
max_seq_len
=
2048
,
meta_template
=
_meta_template
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
"
auto
"
),
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'
auto
'
),
generation_kwargs
=
dict
(
do_sample
=
True
,
top_p
=
0.95
,
...
...
configs/eval_codeagent.py
View file @
aa2dd2b5
...
...
@@ -30,7 +30,7 @@ models = [
type
=
CodeAgent
,
llm
=
dict
(
type
=
HuggingFaceCausalLM
,
path
=
"
WizardLM/WizardCoder-Python-13B-V1.0
"
,
path
=
'
WizardLM/WizardCoder-Python-13B-V1.0
'
,
tokenizer_path
=
'WizardLM/WizardCoder-Python-13B-V1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
...
...
configs/eval_ds1000_interpreter.py
View file @
aa2dd2b5
...
...
@@ -41,4 +41,4 @@ infer = dict(
runner
=
dict
(
type
=
LocalRunner
,
max_num_workers
=
16
,
task
=
dict
(
type
=
OpenICLInferTask
)),
)
\ No newline at end of file
)
configs/eval_hf_llama2.py
View file @
aa2dd2b5
...
...
@@ -14,5 +14,5 @@ with read_base():
from
.models.hf_llama.hf_llama2_7b
import
models
from
.summarizers.example
import
summarizer
datasets
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_datasets
"
)
or
k
==
'datasets'
],
[])
datasets
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_datasets
'
)
or
k
==
'datasets'
],
[])
work_dir
=
'./outputs/llama2/'
Prev
1
…
20
21
22
23
24
25
26
27
28
…
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment