Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
60 additions
and
60 deletions
+60
-60
configs/models/internlm/internlm_7b.py
configs/models/internlm/internlm_7b.py
+2
-2
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
+4
-4
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
+2
-2
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
+4
-4
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
+2
-2
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
+2
-2
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
+2
-2
configs/models/lemur/lemur_70b_chat.py
configs/models/lemur/lemur_70b_chat.py
+3
-3
configs/models/llama/llama2_13b.py
configs/models/llama/llama2_13b.py
+3
-3
configs/models/llama/llama2_13b_chat.py
configs/models/llama/llama2_13b_chat.py
+5
-5
configs/models/llama/llama2_70b.py
configs/models/llama/llama2_70b.py
+3
-3
configs/models/llama/llama2_70b_chat.py
configs/models/llama/llama2_70b_chat.py
+5
-5
configs/models/llama/llama2_7b.py
configs/models/llama/llama2_7b.py
+3
-3
configs/models/llama/llama2_7b_chat.py
configs/models/llama/llama2_7b_chat.py
+5
-5
configs/models/llama/llama_13b.py
configs/models/llama/llama_13b.py
+3
-3
configs/models/llama/llama_30b.py
configs/models/llama/llama_30b.py
+3
-3
configs/models/llama/llama_65b.py
configs/models/llama/llama_65b.py
+3
-3
No files found.
configs/models/internlm/internlm_7b.py
View file @
aa2dd2b5
...
...
@@ -4,9 +4,9 @@ from opencompass.models import InternLM
models
=
[
dict
(
type
=
InternLM
,
path
=
"
./internData/
"
,
path
=
'
./internData/
'
,
tokenizer_path
=
'./internData/V7.model'
,
model_config
=
"
./internData/model_config.py
"
,
model_config
=
'
./internData/model_config.py
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
View file @
aa2dd2b5
...
...
@@ -10,7 +10,7 @@ https://huggingface.co/GAIR/autoj-bilingual-6b
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-bilingual-6b'
,
path
=
"
GAIR/autoj-bilingual-6b
"
,
path
=
'
GAIR/autoj-bilingual-6b
'
,
tokenizer_path
=
'GAIR/autoj-bilingual-6b'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-13b'
,
path
=
"
GAIR/autoj-13b
"
,
path
=
'
GAIR/autoj-13b
'
,
tokenizer_path
=
'GAIR/autoj-13b'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
View file @
aa2dd2b5
...
...
@@ -9,7 +9,7 @@ https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-13b-GPTQ-4bits'
,
path
=
"
GAIR/autoj-13b-GPTQ-4bits
"
,
path
=
'
GAIR/autoj-13b-GPTQ-4bits
'
,
tokenizer_path
=
'GAIR/autoj-13b-GPTQ-4bits'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-scenario-classifier'
,
path
=
"
GAIR/autoj-scenario-classifier
"
,
path
=
'
GAIR/autoj-scenario-classifier
'
,
tokenizer_path
=
'GAIR/autoj-scenario-classifier'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-13b-v1-hf'
,
path
=
"
BAAI/JudgeLM-13B-v1.0
"
,
path
=
'
BAAI/JudgeLM-13B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-13B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-33b-v1-hf'
,
path
=
"
BAAI/JudgeLM-33B-v1.0
"
,
path
=
'
BAAI/JudgeLM-33B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-33B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-7b-v1-hf'
,
path
=
"
BAAI/JudgeLM-7B-v1.0
"
,
path
=
'
BAAI/JudgeLM-7B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-7B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'alpaca-pandalm-7b-v1-hf'
,
path
=
"
WeOpenML/PandaLM-Alpaca-7B-v1
"
,
path
=
'
WeOpenML/PandaLM-Alpaca-7B-v1
'
,
tokenizer_path
=
'WeOpenML/PandaLM-Alpaca-7B-v1'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
View file @
aa2dd2b5
...
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'pandalm-7b-v1-hf'
,
path
=
"
WeOpenML/PandaLM-7B-v1
"
,
path
=
'
WeOpenML/PandaLM-7B-v1
'
,
tokenizer_path
=
'WeOpenML/PandaLM-7B-v1'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/lemur/lemur_70b_chat.py
View file @
aa2dd2b5
...
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
"
BOT
"
,
begin
=
"
\n
<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>'
,
generate
=
True
),
dict
(
role
=
'
HUMAN
'
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
'
BOT
'
,
begin
=
'
\n
<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>'
,
generate
=
True
),
],
)
...
...
@@ -12,7 +12,7 @@ models = [
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'lemur-70b-chat-v1'
,
path
=
"
OpenLemur/lemur-70b-chat-v1
"
,
path
=
'
OpenLemur/lemur-70b-chat-v1
'
,
tokenizer_path
=
'OpenLemur/lemur-70b-chat-v1'
,
# tokenizer_kwargs=dict(
# padding_side='left',
...
...
configs/models/llama/llama2_13b.py
View file @
aa2dd2b5
...
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-2-13b
"
,
abbr
=
'
llama-2-13b
'
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-13b/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-13b/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_13b_chat.py
View file @
aa2dd2b5
...
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
)
models
=
[
dict
(
abbr
=
"
llama-2-13b-chat
"
,
abbr
=
'
llama-2-13b-chat
'
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-13b-chat/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-13b-chat/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama2_70b.py
View file @
aa2dd2b5
...
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-2-70b
"
,
abbr
=
'
llama-2-70b
'
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-70b/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-70b/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_70b_chat.py
View file @
aa2dd2b5
...
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
)
models
=
[
dict
(
abbr
=
"
llama-2-70b-chat
"
,
abbr
=
'
llama-2-70b-chat
'
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-70b-chat/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-70b-chat/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama2_7b.py
View file @
aa2dd2b5
...
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-2-7b
"
,
abbr
=
'
llama-2-7b
'
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-7b/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-7b/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_7b_chat.py
View file @
aa2dd2b5
...
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
)
models
=
[
dict
(
abbr
=
"
llama-2-7b-chat
"
,
abbr
=
'
llama-2-7b-chat
'
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-7b-chat/
"
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
path
=
'
./models/llama2/llama/llama-2-7b-chat/
'
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama_13b.py
View file @
aa2dd2b5
...
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-13b
"
,
abbr
=
'
llama-13b
'
,
type
=
Llama2
,
path
=
"
./models/llama/13B/
"
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
path
=
'
./models/llama/13B/
'
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/llama/llama_30b.py
View file @
aa2dd2b5
...
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-30b
"
,
abbr
=
'
llama-30b
'
,
type
=
Llama2
,
path
=
"
./models/llama/30B/
"
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
path
=
'
./models/llama/30B/
'
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
configs/models/llama/llama_65b.py
View file @
aa2dd2b5
...
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
dict
(
abbr
=
"
llama-65b
"
,
abbr
=
'
llama-65b
'
,
type
=
Llama2
,
path
=
"
./models/llama/65B/
"
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
path
=
'
./models/llama/65B/
'
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
16
,
...
...
Prev
1
…
24
25
26
27
28
29
30
31
32
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment