Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
30 additions
and
30 deletions
+30
-30
configs/eval_subjective_mtbench.py
configs/eval_subjective_mtbench.py
+3
-3
configs/eval_teval.py
configs/eval_teval.py
+1
-1
configs/models/accessory/accessory_llama2_7b.py
configs/models/accessory/accessory_llama2_7b.py
+5
-5
configs/models/accessory/accessory_mixtral_8x7b.py
configs/models/accessory/accessory_mixtral_8x7b.py
+2
-2
configs/models/accessory/accessory_sphinx_v2_1k.py
configs/models/accessory/accessory_sphinx_v2_1k.py
+3
-3
configs/models/alaya/alaya.py
configs/models/alaya/alaya.py
+1
-1
configs/models/aquila/hf_aquilachat2_34b.py
configs/models/aquila/hf_aquilachat2_34b.py
+1
-1
configs/models/aquila/hf_aquilachat2_34b_16k.py
configs/models/aquila/hf_aquilachat2_34b_16k.py
+1
-1
configs/models/aquila/hf_aquilachat2_7b.py
configs/models/aquila/hf_aquilachat2_7b.py
+1
-1
configs/models/aquila/hf_aquilachat2_7b_16k.py
configs/models/aquila/hf_aquilachat2_7b_16k.py
+1
-1
configs/models/baichuan/hf_baichuan2_13b_chat.py
configs/models/baichuan/hf_baichuan2_13b_chat.py
+1
-1
configs/models/baichuan/hf_baichuan2_7b_chat.py
configs/models/baichuan/hf_baichuan2_7b_chat.py
+2
-2
configs/models/baichuan/hf_baichuan_13b_base.py
configs/models/baichuan/hf_baichuan_13b_base.py
+1
-1
configs/models/baichuan/hf_baichuan_13b_chat.py
configs/models/baichuan/hf_baichuan_13b_chat.py
+1
-1
configs/models/baichuan/hf_baichuan_7b.py
configs/models/baichuan/hf_baichuan_7b.py
+1
-1
configs/models/bluelm/hf_bluelm_7b_chat.py
configs/models/bluelm/hf_bluelm_7b_chat.py
+1
-1
configs/models/bluelm/hf_bluelm_7b_chat_32k.py
configs/models/bluelm/hf_bluelm_7b_chat_32k.py
+1
-1
configs/models/claude/claude2.py
configs/models/claude/claude2.py
+1
-1
configs/models/codegeex2/hf_codegeex2_6b.py
configs/models/codegeex2/hf_codegeex2_6b.py
+1
-1
configs/models/gemini/gemini_pro.py
configs/models/gemini/gemini_pro.py
+1
-1
No files found.
configs/eval_subjective_mtbench.py
View file @
aa2dd2b5
...
@@ -24,8 +24,8 @@ api_meta_template = dict(
...
@@ -24,8 +24,8 @@ api_meta_template = dict(
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
'
HUMAN
'
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
"
BOT
"
,
begin
=
"
\n
<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>'
,
generate
=
True
),
dict
(
role
=
'
BOT
'
,
begin
=
'
\n
<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>'
,
generate
=
True
),
],
],
)
)
# -------------Inference Stage ----------------------------------------
# -------------Inference Stage ----------------------------------------
...
@@ -34,7 +34,7 @@ models = [
...
@@ -34,7 +34,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen-7b-chat-hf'
,
abbr
=
'qwen-7b-chat-hf'
,
path
=
"
Qwen/Qwen-7B-Chat
"
,
path
=
'
Qwen/Qwen-7B-Chat
'
,
tokenizer_path
=
'Qwen/Qwen-7B-Chat'
,
tokenizer_path
=
'Qwen/Qwen-7B-Chat'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/eval_teval.py
View file @
aa2dd2b5
...
@@ -16,7 +16,7 @@ meta_template_system_patches = {
...
@@ -16,7 +16,7 @@ meta_template_system_patches = {
'internlm2-chat-20b-hf'
:
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
'internlm2-chat-20b-hf'
:
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
}
}
_origin_models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
"
_model
"
)],
[])
_origin_models
=
sum
([
v
for
k
,
v
in
locals
().
items
()
if
k
.
endswith
(
'
_model
'
)],
[])
models
=
[]
models
=
[]
for
m
in
_origin_models
:
for
m
in
_origin_models
:
m
=
deepcopy
(
m
)
m
=
deepcopy
(
m
)
...
...
configs/models/accessory/accessory_llama2_7b.py
View file @
aa2dd2b5
...
@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
...
@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
Accessory_llama2_7b
"
,
abbr
=
'
Accessory_llama2_7b
'
,
type
=
LLaMA2AccessoryModel
,
type
=
LLaMA2AccessoryModel
,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
...
@@ -18,10 +18,10 @@ models = [
...
@@ -18,10 +18,10 @@ models = [
# - consolidated.00.pth
# - consolidated.00.pth
# - params.json
# - params.json
# - tokenizer.model
# - tokenizer.model
pretrained_path
=
"
path/to/Llama-2-7b/
"
,
pretrained_path
=
'
path/to/Llama-2-7b/
'
,
llama_type
=
"
llama
"
,
llama_type
=
'
llama
'
,
llama_config
=
"
path/to/Llama-2-7b/params.json
"
,
llama_config
=
'
path/to/Llama-2-7b/params.json
'
,
tokenizer_path
=
"
path/to/Llama-2-7b/tokenizer.model
"
,
tokenizer_path
=
'
path/to/Llama-2-7b/tokenizer.model
'
,
with_visual
=
False
,
with_visual
=
False
,
max_seq_len
=
4096
,
max_seq_len
=
4096
,
quant
=
False
,
quant
=
False
,
...
...
configs/models/accessory/accessory_mixtral_8x7b.py
View file @
aa2dd2b5
...
@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
...
@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
Accessory_mixtral_8x7b
"
,
abbr
=
'
Accessory_mixtral_8x7b
'
,
type
=
LLaMA2AccessoryModel
,
type
=
LLaMA2AccessoryModel
,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
...
@@ -15,7 +15,7 @@ models = [
...
@@ -15,7 +15,7 @@ models = [
# <begin> kwargs for accessory.MetaModel.from_pretrained
# <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/MoE-Mixtral-7B-8Expert/tree/main/converted_sparse # noqa
# download from https://huggingface.co/Alpha-VLLM/MoE-Mixtral-7B-8Expert/tree/main/converted_sparse # noqa
# see https://llama2-accessory.readthedocs.io/en/latest/projects/mixtral-8x7b.html for more details # noqa
# see https://llama2-accessory.readthedocs.io/en/latest/projects/mixtral-8x7b.html for more details # noqa
pretrained_path
=
"
path/to/MoE-Mixtral-7B-8Expert/converted_sparse
"
,
pretrained_path
=
'
path/to/MoE-Mixtral-7B-8Expert/converted_sparse
'
,
llama_type
=
None
,
# None for automatic probe from pretrained_path
llama_type
=
None
,
# None for automatic probe from pretrained_path
llama_config
=
None
,
# None for automatic probe from pretrained_path
llama_config
=
None
,
# None for automatic probe from pretrained_path
tokenizer_path
=
None
,
# None for automatic probe from pretrained_path
tokenizer_path
=
None
,
# None for automatic probe from pretrained_path
...
...
configs/models/accessory/accessory_sphinx_v2_1k.py
View file @
aa2dd2b5
...
@@ -6,14 +6,14 @@ from opencompass.models import LLaMA2AccessoryModel
...
@@ -6,14 +6,14 @@ from opencompass.models import LLaMA2AccessoryModel
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
Accessory_sphinx_v2_1k
"
,
abbr
=
'
Accessory_sphinx_v2_1k
'
,
type
=
LLaMA2AccessoryModel
,
type
=
LLaMA2AccessoryModel
,
additional_stop_symbols
=
[
"
###
"
],
# for models tuned with chat template
additional_stop_symbols
=
[
'
###
'
],
# for models tuned with chat template
# <begin> kwargs for accessory.MetaModel.from_pretrained
# <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/LLaMA2-Accessory/tree/main/finetune/mm/SPHINX/SPHINX-v2-1k # noqa
# download from https://huggingface.co/Alpha-VLLM/LLaMA2-Accessory/tree/main/finetune/mm/SPHINX/SPHINX-v2-1k # noqa
pretrained_path
=
"
path/to/sphinx_v2_1k
"
,
pretrained_path
=
'
path/to/sphinx_v2_1k
'
,
llama_type
=
None
,
# None for automatic probe from pretrained_path
llama_type
=
None
,
# None for automatic probe from pretrained_path
llama_config
=
None
,
# None for automatic probe from pretrained_path
llama_config
=
None
,
# None for automatic probe from pretrained_path
tokenizer_path
=
None
,
# None for automatic probe from pretrained_path
tokenizer_path
=
None
,
# None for automatic probe from pretrained_path
...
...
configs/models/alaya/alaya.py
View file @
aa2dd2b5
...
@@ -5,7 +5,7 @@ models = [
...
@@ -5,7 +5,7 @@ models = [
dict
(
dict
(
type
=
AlayaLM
,
type
=
AlayaLM
,
abbr
=
'alaya-7b-hf'
,
abbr
=
'alaya-7b-hf'
,
path
=
"
DataCanvas/Alaya-7B-Base
"
,
path
=
'
DataCanvas/Alaya-7B-Base
'
,
tokenizer_path
=
'DataCanvas/Alaya-7B-Base'
,
tokenizer_path
=
'DataCanvas/Alaya-7B-Base'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/aquila/hf_aquilachat2_34b.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-34b-hf'
,
abbr
=
'aquilachat2-34b-hf'
,
path
=
"
BAAI/AquilaChat2-34B
"
,
path
=
'
BAAI/AquilaChat2-34B
'
,
tokenizer_path
=
'BAAI/AquilaChat2-34B'
,
tokenizer_path
=
'BAAI/AquilaChat2-34B'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/aquila/hf_aquilachat2_34b_16k.py
View file @
aa2dd2b5
...
@@ -12,7 +12,7 @@ models = [
...
@@ -12,7 +12,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-34b-16k-hf'
,
abbr
=
'aquilachat2-34b-16k-hf'
,
path
=
"
BAAI/AquilaChat2-34B-16K
"
,
path
=
'
BAAI/AquilaChat2-34B-16K
'
,
tokenizer_path
=
'BAAI/AquilaChat2-34B-16K'
,
tokenizer_path
=
'BAAI/AquilaChat2-34B-16K'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/aquila/hf_aquilachat2_7b.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-7b-hf'
,
abbr
=
'aquilachat2-7b-hf'
,
path
=
"
BAAI/AquilaChat2-7B
"
,
path
=
'
BAAI/AquilaChat2-7B
'
,
tokenizer_path
=
'BAAI/AquilaChat2-7B'
,
tokenizer_path
=
'BAAI/AquilaChat2-7B'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/aquila/hf_aquilachat2_7b_16k.py
View file @
aa2dd2b5
...
@@ -12,7 +12,7 @@ models = [
...
@@ -12,7 +12,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-7b-16k-hf'
,
abbr
=
'aquilachat2-7b-16k-hf'
,
path
=
"
BAAI/AquilaChat2-7B-16K
"
,
path
=
'
BAAI/AquilaChat2-7B-16K
'
,
tokenizer_path
=
'BAAI/AquilaChat2-7B-16K'
,
tokenizer_path
=
'BAAI/AquilaChat2-7B-16K'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/baichuan/hf_baichuan2_13b_chat.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'baichuan2-13b-chat-hf'
,
abbr
=
'baichuan2-13b-chat-hf'
,
path
=
"
baichuan-inc/Baichuan2-13B-Chat
"
,
path
=
'
baichuan-inc/Baichuan2-13B-Chat
'
,
tokenizer_path
=
'baichuan-inc/Baichuan2-13B-Chat'
,
tokenizer_path
=
'baichuan-inc/Baichuan2-13B-Chat'
,
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
...
...
configs/models/baichuan/hf_baichuan2_7b_chat.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'baichuan2-7b-chat-hf'
,
abbr
=
'baichuan2-7b-chat-hf'
,
path
=
"
baichuan-inc/Baichuan2-7B-Chat
"
,
path
=
'
baichuan-inc/Baichuan2-7B-Chat
'
,
tokenizer_path
=
'baichuan-inc/Baichuan2-7B-Chat'
,
tokenizer_path
=
'baichuan-inc/Baichuan2-7B-Chat'
,
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
...
...
configs/models/baichuan/hf_baichuan_13b_base.py
View file @
aa2dd2b5
...
@@ -5,7 +5,7 @@ models = [
...
@@ -5,7 +5,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'baichuan-13b-base-hf'
,
abbr
=
'baichuan-13b-base-hf'
,
path
=
"
baichuan-inc/Baichuan-13B-Base
"
,
path
=
'
baichuan-inc/Baichuan-13B-Base
'
,
tokenizer_path
=
'baichuan-inc/Baichuan-13B-Base'
,
tokenizer_path
=
'baichuan-inc/Baichuan-13B-Base'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/baichuan/hf_baichuan_13b_chat.py
View file @
aa2dd2b5
...
@@ -5,7 +5,7 @@ models = [
...
@@ -5,7 +5,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'baichuan-13b-chat-hf'
,
abbr
=
'baichuan-13b-chat-hf'
,
path
=
"
baichuan-inc/Baichuan-13B-Chat
"
,
path
=
'
baichuan-inc/Baichuan-13B-Chat
'
,
tokenizer_path
=
'baichuan-inc/Baichuan-13B-Chat'
,
tokenizer_path
=
'baichuan-inc/Baichuan-13B-Chat'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/baichuan/hf_baichuan_7b.py
View file @
aa2dd2b5
...
@@ -5,7 +5,7 @@ models = [
...
@@ -5,7 +5,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'baichuan-7b-hf'
,
abbr
=
'baichuan-7b-hf'
,
path
=
"
baichuan-inc/baichuan-7B
"
,
path
=
'
baichuan-inc/baichuan-7B
'
,
tokenizer_path
=
'baichuan-inc/baichuan-7B'
,
tokenizer_path
=
'baichuan-inc/baichuan-7B'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/bluelm/hf_bluelm_7b_chat.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'bluelm-7b-chat-hf'
,
abbr
=
'bluelm-7b-chat-hf'
,
path
=
"
vivo-ai/BlueLM-7B-Chat
"
,
path
=
'
vivo-ai/BlueLM-7B-Chat
'
,
tokenizer_path
=
'vivo-ai/BlueLM-7B-Chat'
,
tokenizer_path
=
'vivo-ai/BlueLM-7B-Chat'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/bluelm/hf_bluelm_7b_chat_32k.py
View file @
aa2dd2b5
...
@@ -11,7 +11,7 @@ models = [
...
@@ -11,7 +11,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'bluelm-7b-chat-32k-hf'
,
abbr
=
'bluelm-7b-chat-32k-hf'
,
path
=
"
vivo-ai/BlueLM-7B-Chat-32K
"
,
path
=
'
vivo-ai/BlueLM-7B-Chat-32K
'
,
tokenizer_path
=
'vivo-ai/BlueLM-7B-Chat-32K'
,
tokenizer_path
=
'vivo-ai/BlueLM-7B-Chat-32K'
,
model_kwargs
=
dict
(
model_kwargs
=
dict
(
device_map
=
'auto'
,
device_map
=
'auto'
,
...
...
configs/models/claude/claude2.py
View file @
aa2dd2b5
configs/models/codegeex2/hf_codegeex2_6b.py
View file @
aa2dd2b5
configs/models/gemini/gemini_pro.py
View file @
aa2dd2b5
...
@@ -13,7 +13,7 @@ models = [
...
@@ -13,7 +13,7 @@ models = [
type
=
Gemini
,
type
=
Gemini
,
path
=
'gemini-pro'
,
path
=
'gemini-pro'
,
key
=
'your keys'
,
# The key will be obtained from Environment, but you can write down your key here as well
key
=
'your keys'
,
# The key will be obtained from Environment, but you can write down your key here as well
url
=
"
your url
"
,
url
=
'
your url
'
,
meta_template
=
api_meta_template
,
meta_template
=
api_meta_template
,
query_per_second
=
16
,
query_per_second
=
16
,
max_out_len
=
100
,
max_out_len
=
100
,
...
...
Prev
1
…
22
23
24
25
26
27
28
29
30
…
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment