Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
8c85edd1
Unverified
Commit
8c85edd1
authored
Apr 19, 2024
by
Fengzhe Zhou
Committed by
GitHub
Apr 19, 2024
Browse files
[Sync] deprecate old mbpps (#1064)
parent
c1724013
Changes
95
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
63 additions
and
50 deletions
+63
-50
configs/eval_code_passk_repeat_dataset.py
configs/eval_code_passk_repeat_dataset.py
+2
-2
configs/eval_internlm2_chat_keyset.py
configs/eval_internlm2_chat_keyset.py
+1
-1
configs/eval_internlm2_keyset.py
configs/eval_internlm2_keyset.py
+1
-1
configs/models/deepseek/hf_deepseek_67b_chat.py
configs/models/deepseek/hf_deepseek_67b_chat.py
+2
-2
configs/models/deepseek/hf_deepseek_7b_chat.py
configs/models/deepseek/hf_deepseek_7b_chat.py
+2
-1
configs/models/deepseek/hf_deepseek_moe_16b_chat.py
configs/models/deepseek/hf_deepseek_moe_16b_chat.py
+3
-3
configs/models/gemma/hf_gemma_2b_it.py
configs/models/gemma/hf_gemma_2b_it.py
+2
-1
configs/models/gemma/hf_gemma_7b_it.py
configs/models/gemma/hf_gemma_7b_it.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_20b.py
configs/models/hf_internlm/hf_internlm2_chat_20b.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
...s/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_7b.py
configs/models/hf_internlm/hf_internlm2_chat_7b.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
+1
-1
configs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
...gs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
+1
-1
configs/models/hf_internlm/lmdeploy_internlm2_chat_20b.py
configs/models/hf_internlm/lmdeploy_internlm2_chat_20b.py
+17
-13
configs/models/hf_internlm/lmdeploy_internlm2_chat_7b.py
configs/models/hf_internlm/lmdeploy_internlm2_chat_7b.py
+18
-13
configs/models/hf_llama/hf_llama2_13b_chat.py
configs/models/hf_llama/hf_llama2_13b_chat.py
+3
-2
configs/models/hf_llama/hf_llama2_70b_chat.py
configs/models/hf_llama/hf_llama2_70b_chat.py
+3
-2
No files found.
configs/eval_code_passk_repeat_dataset.py
View file @
8c85edd1
...
@@ -9,8 +9,8 @@ from opencompass.tasks import OpenICLInferTask
...
@@ -9,8 +9,8 @@ from opencompass.tasks import OpenICLInferTask
with
read_base
():
with
read_base
():
from
.datasets.humaneval.humaneval_repeat10_gen_8e312c
import
humaneval_datasets
from
.datasets.humaneval.humaneval_repeat10_gen_8e312c
import
humaneval_datasets
from
.datasets.mbpp.mbpp_repeat10_gen_1e1056
import
mbpp_datasets
from
.datasets.mbpp.
deprecated_
mbpp_repeat10_gen_1e1056
import
mbpp_datasets
from
.datasets.mbpp.sanitized_mbpp_repeat10_gen_1e1056
import
sanitized_mbpp_datasets
from
.datasets.mbpp.
deprecated_
sanitized_mbpp_repeat10_gen_1e1056
import
sanitized_mbpp_datasets
datasets
=
[]
datasets
=
[]
datasets
+=
humaneval_datasets
datasets
+=
humaneval_datasets
...
...
configs/eval_internlm2_chat_keyset.py
View file @
8c85edd1
...
@@ -8,7 +8,7 @@ with read_base():
...
@@ -8,7 +8,7 @@ with read_base():
from
.datasets.gsm8k.gsm8k_gen_1d7fe4
import
gsm8k_datasets
from
.datasets.gsm8k.gsm8k_gen_1d7fe4
import
gsm8k_datasets
from
.datasets.math.math_evaluatorv2_gen_cecb31
import
math_datasets
from
.datasets.math.math_evaluatorv2_gen_cecb31
import
math_datasets
from
.datasets.humaneval.humaneval_gen_8e312c
import
humaneval_datasets
from
.datasets.humaneval.humaneval_gen_8e312c
import
humaneval_datasets
from
.datasets.mbpp.sanitized_mbpp_gen_1e1056
import
sanitized_mbpp_datasets
from
.datasets.mbpp.
deprecated_
sanitized_mbpp_gen_1e1056
import
sanitized_mbpp_datasets
from
.models.hf_internlm.hf_internlm2_chat_7b
import
models
as
hf_internlm2_chat_7b_model
from
.models.hf_internlm.hf_internlm2_chat_7b
import
models
as
hf_internlm2_chat_7b_model
from
.models.hf_internlm.hf_internlm2_chat_20b
import
models
as
hf_internlm2_chat_20b_model
from
.models.hf_internlm.hf_internlm2_chat_20b
import
models
as
hf_internlm2_chat_20b_model
...
...
configs/eval_internlm2_keyset.py
View file @
8c85edd1
...
@@ -7,7 +7,7 @@ with read_base():
...
@@ -7,7 +7,7 @@ with read_base():
from
.datasets.gsm8k.gsm8k_gen_1d7fe4
import
gsm8k_datasets
from
.datasets.gsm8k.gsm8k_gen_1d7fe4
import
gsm8k_datasets
from
.datasets.math.math_gen_265cce
import
math_datasets
from
.datasets.math.math_gen_265cce
import
math_datasets
from
.datasets.humaneval.humaneval_gen_a82cae
import
humaneval_datasets
from
.datasets.humaneval.humaneval_gen_a82cae
import
humaneval_datasets
from
.datasets.mbpp.sanitized_mbpp_gen_1e1056
import
sanitized_mbpp_datasets
from
.datasets.mbpp.
deprecated_
sanitized_mbpp_gen_1e1056
import
sanitized_mbpp_datasets
from
.models.hf_internlm.hf_internlm2_7b
import
models
as
hf_internlm2_7b_model
from
.models.hf_internlm.hf_internlm2_7b
import
models
as
hf_internlm2_7b_model
from
.models.hf_internlm.hf_internlm2_20b
import
models
as
hf_internlm2_20b_model
from
.models.hf_internlm.hf_internlm2_20b
import
models
as
hf_internlm2_20b_model
...
...
configs/models/deepseek/hf_deepseek_67b_chat.py
View file @
8c85edd1
from
opencompass.models
import
HuggingFaceCausalLM
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
begin
=
'<|begin▁of▁sentence|>'
,
round
=
[
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
...
@@ -12,7 +13,6 @@ models = [
...
@@ -12,7 +13,6 @@ models = [
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'deepseek-67b-chat-hf'
,
abbr
=
'deepseek-67b-chat-hf'
,
path
=
"deepseek-ai/deepseek-llm-67b-chat"
,
path
=
"deepseek-ai/deepseek-llm-67b-chat"
,
tokenizer_path
=
'deepseek-ai/deepseek-llm-67b-chat'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
...
@@ -28,6 +28,6 @@ models = [
...
@@ -28,6 +28,6 @@ models = [
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
end_str
=
'<|end▁of▁sentence|>'
,
batch_padding
=
True
,
)
)
]
]
configs/models/deepseek/hf_deepseek_7b_chat.py
View file @
8c85edd1
from
opencompass.models
import
HuggingFaceCausalLM
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
begin
=
'<|begin▁of▁sentence|>'
,
round
=
[
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
...
@@ -12,7 +13,6 @@ models = [
...
@@ -12,7 +13,6 @@ models = [
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'deepseek-7b-chat-hf'
,
abbr
=
'deepseek-7b-chat-hf'
,
path
=
"deepseek-ai/deepseek-llm-7b-chat"
,
path
=
"deepseek-ai/deepseek-llm-7b-chat"
,
tokenizer_path
=
'deepseek-ai/deepseek-llm-7b-chat'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
...
@@ -28,5 +28,6 @@ models = [
...
@@ -28,5 +28,6 @@ models = [
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
batch_padding
=
True
,
)
)
]
]
configs/models/deepseek/hf_deepseek_moe_16b_chat.py
View file @
8c85edd1
from
opencompass.models
import
HuggingFaceCausalLM
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
begin
=
'<|begin▁of▁sentence|>'
,
round
=
[
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'User: '
,
end
=
'
\n\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"Assistant: "
,
end
=
'<|end▁of▁sentence|>'
,
generate
=
True
),
...
@@ -12,7 +13,6 @@ models = [
...
@@ -12,7 +13,6 @@ models = [
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'deepseek-moe-16b-chat-hf'
,
abbr
=
'deepseek-moe-16b-chat-hf'
,
path
=
"deepseek-ai/deepseek-moe-16b-chat"
,
path
=
"deepseek-ai/deepseek-moe-16b-chat"
,
tokenizer_path
=
'deepseek-ai/deepseek-moe-16b-chat'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
...
@@ -26,7 +26,7 @@ models = [
...
@@ -26,7 +26,7 @@ models = [
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|end▁of▁sentence|>'
,
batch_padding
=
True
,
)
)
]
]
configs/models/gemma/hf_gemma_2b_it.py
View file @
8c85edd1
...
@@ -5,7 +5,6 @@ _meta_template = dict(
...
@@ -5,7 +5,6 @@ _meta_template = dict(
dict
(
role
=
"HUMAN"
,
begin
=
'<start_of_turn>user
\n
'
,
end
=
'<end_of_turn>
\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'<start_of_turn>user
\n
'
,
end
=
'<end_of_turn>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<start_of_turn>model
\n
"
,
end
=
'<end_of_turn>
\n
'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"<start_of_turn>model
\n
"
,
end
=
'<end_of_turn>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
151645
,
)
)
models
=
[
models
=
[
...
@@ -24,9 +23,11 @@ models = [
...
@@ -24,9 +23,11 @@ models = [
use_fast
=
False
,
use_fast
=
False
,
),
),
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
min_out_len
=
1
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
batch_padding
=
True
,
)
)
]
]
configs/models/gemma/hf_gemma_7b_it.py
View file @
8c85edd1
...
@@ -5,7 +5,6 @@ _meta_template = dict(
...
@@ -5,7 +5,6 @@ _meta_template = dict(
dict
(
role
=
"HUMAN"
,
begin
=
'<start_of_turn>user
\n
'
,
end
=
'<end_of_turn>
\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'<start_of_turn>user
\n
'
,
end
=
'<end_of_turn>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<start_of_turn>model
\n
"
,
end
=
'<end_of_turn>
\n
'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"<start_of_turn>model
\n
"
,
end
=
'<end_of_turn>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
151645
,
)
)
models
=
[
models
=
[
...
@@ -29,5 +28,6 @@ models = [
...
@@ -29,5 +28,6 @@ models = [
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
View file @
8c85edd1
...
@@ -7,7 +7,6 @@ _meta_template = dict(
...
@@ -7,7 +7,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -33,5 +32,6 @@ models = [
...
@@ -33,5 +32,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
View file @
8c85edd1
...
@@ -6,7 +6,6 @@ _meta_template = dict(
...
@@ -6,7 +6,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -32,5 +31,6 @@ models = [
...
@@ -32,5 +31,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
View file @
8c85edd1
...
@@ -7,7 +7,6 @@ _meta_template = dict(
...
@@ -7,7 +7,6 @@ _meta_template = dict(
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
...
@@ -33,5 +32,6 @@ models = [
...
@@ -33,5 +32,6 @@ models = [
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
batch_padding
=
True
,
)
)
]
]
configs/models/hf_internlm/lmdeploy_internlm2_chat_20b.py
View file @
8c85edd1
...
@@ -3,27 +3,31 @@ from opencompass.models.turbomind import TurboMindModel
...
@@ -3,27 +3,31 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
"<|im_start|>user
\n
"
,
end
=
"<|im_end|>
\n
"
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
"<|im_end|>
\n
"
,
generate
=
True
),
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
dict
(
dict
(
type
=
TurboMindModel
,
type
=
TurboMindModel
,
abbr
=
'
internlm2-chat-20b-turbomind
'
,
abbr
=
"
internlm2-chat-20b-turbomind
"
,
path
=
"internlm/internlm2-chat-20b"
,
path
=
"internlm/internlm2-chat-20b"
,
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
engine_config
=
dict
(
session_len
=
210000
,
engine_config
=
dict
(
max_batch_size
=
8
,
session_len
=
210000
,
rope_scaling_factor
=
3.0
,
max_batch_size
=
8
,
model_name
=
"internlm2-chat-20b"
,
rope_scaling_factor
=
3.0
,
tp
=
2
),
model_name
=
"internlm2-chat-20b"
,
gen_config
=
dict
(
top_k
=
1
,
top_p
=
0.8
,
tp
=
2
,
temperature
=
1.0
,
stop_words
=
[
2
,
92542
],
max_new_tokens
=
2000
,),
),
gen_config
=
dict
(
top_k
=
1
,
top_p
=
0.8
,
temperature
=
1.0
,
max_new_tokens
=
2000
,
),
max_out_len
=
2000
,
max_out_len
=
2000
,
max_seq_len
=
210000
,
max_seq_len
=
210000
,
batch_size
=
1
,
batch_size
=
1
,
...
...
configs/models/hf_internlm/lmdeploy_internlm2_chat_7b.py
View file @
8c85edd1
...
@@ -3,29 +3,34 @@ from opencompass.models.turbomind import TurboMindModel
...
@@ -3,29 +3,34 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"HUMAN"
,
begin
=
"<|im_start|>user
\n
"
,
end
=
"<|im_end|>
\n
"
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
"<|im_end|>
\n
"
,
generate
=
True
),
generate
=
True
),
],
],
eos_token_id
=
92542
)
)
models
=
[
models
=
[
dict
(
dict
(
type
=
TurboMindModel
,
type
=
TurboMindModel
,
abbr
=
'
internlm2-chat-7b-turbomind
'
,
abbr
=
"
internlm2-chat-7b-turbomind
"
,
path
=
"internlm/internlm2-chat-7b"
,
path
=
"internlm/internlm2-chat-7b"
,
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
engine_config
=
dict
(
session_len
=
210000
,
engine_config
=
dict
(
max_batch_size
=
8
,
session_len
=
210000
,
rope_scaling_factor
=
2.0
,
max_batch_size
=
8
,
model_name
=
"internlm2-chat-7b"
),
rope_scaling_factor
=
2.0
,
gen_config
=
dict
(
top_k
=
1
,
top_p
=
0.8
,
model_name
=
"internlm2-chat-7b"
,
temperature
=
1.0
,
tp
=
1
,
max_new_tokens
=
2000
),
stop_words
=
[
2
,
92542
],
),
gen_config
=
dict
(
top_k
=
1
,
top_p
=
0.8
,
temperature
=
1.0
,
max_new_tokens
=
2000
,
),
max_out_len
=
2000
,
max_out_len
=
2000
,
max_seq_len
=
210000
,
max_seq_len
=
210000
,
batch_size
=
8
,
batch_size
=
1
,
concurrency
=
8
,
concurrency
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
...
...
configs/models/hf_llama/hf_llama2_13b_chat.py
View file @
8c85edd1
...
@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'
[INST] '
,
end
=
' [/INST]
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'[INST] '
,
end
=
' [/INST]'
),
dict
(
role
=
"BOT"
,
begin
=
''
,
end
=
''
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
'
'
,
end
=
'
'
,
generate
=
True
),
],
],
)
)
...
@@ -27,5 +27,6 @@ models = [
...
@@ -27,5 +27,6 @@ models = [
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'[INST]'
,
end_str
=
'[INST]'
,
batch_padding
=
True
,
)
)
]
]
configs/models/hf_llama/hf_llama2_70b_chat.py
View file @
8c85edd1
...
@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'
[INST] '
,
end
=
' [/INST]
'
),
dict
(
role
=
"HUMAN"
,
begin
=
'[INST] '
,
end
=
' [/INST]'
),
dict
(
role
=
"BOT"
,
begin
=
''
,
end
=
''
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
'
'
,
end
=
'
'
,
generate
=
True
),
],
],
)
)
...
@@ -27,5 +27,6 @@ models = [
...
@@ -27,5 +27,6 @@ models = [
batch_size
=
8
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
end_str
=
'[INST]'
,
end_str
=
'[INST]'
,
batch_padding
=
True
,
)
)
]
]
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment