Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
aa2dd2b5
Unverified
Commit
aa2dd2b5
authored
May 14, 2024
by
Fengzhe Zhou
Committed by
GitHub
May 14, 2024
Browse files
[Format] Add config lints (#892)
parent
3dbba119
Changes
648
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
60 additions
and
60 deletions
+60
-60
configs/models/internlm/internlm_7b.py
configs/models/internlm/internlm_7b.py
+2
-2
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
+4
-4
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
+2
-2
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
+4
-4
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
+2
-2
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
+2
-2
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
+2
-2
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
+2
-2
configs/models/lemur/lemur_70b_chat.py
configs/models/lemur/lemur_70b_chat.py
+3
-3
configs/models/llama/llama2_13b.py
configs/models/llama/llama2_13b.py
+3
-3
configs/models/llama/llama2_13b_chat.py
configs/models/llama/llama2_13b_chat.py
+5
-5
configs/models/llama/llama2_70b.py
configs/models/llama/llama2_70b.py
+3
-3
configs/models/llama/llama2_70b_chat.py
configs/models/llama/llama2_70b_chat.py
+5
-5
configs/models/llama/llama2_7b.py
configs/models/llama/llama2_7b.py
+3
-3
configs/models/llama/llama2_7b_chat.py
configs/models/llama/llama2_7b_chat.py
+5
-5
configs/models/llama/llama_13b.py
configs/models/llama/llama_13b.py
+3
-3
configs/models/llama/llama_30b.py
configs/models/llama/llama_30b.py
+3
-3
configs/models/llama/llama_65b.py
configs/models/llama/llama_65b.py
+3
-3
No files found.
configs/models/internlm/internlm_7b.py
View file @
aa2dd2b5
...
@@ -4,9 +4,9 @@ from opencompass.models import InternLM
...
@@ -4,9 +4,9 @@ from opencompass.models import InternLM
models
=
[
models
=
[
dict
(
dict
(
type
=
InternLM
,
type
=
InternLM
,
path
=
"
./internData/
"
,
path
=
'
./internData/
'
,
tokenizer_path
=
'./internData/V7.model'
,
tokenizer_path
=
'./internData/V7.model'
,
model_config
=
"
./internData/model_config.py
"
,
model_config
=
'
./internData/model_config.py
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py
View file @
aa2dd2b5
from
opencompass.models
import
HuggingFaceCausalLM
from
opencompass.models
import
HuggingFaceCausalLM
'''
'''
This is a bilingual 6B version of Auto-J.
This is a bilingual 6B version of Auto-J.
It is trained on both the original training data
It is trained on both the original training data
and its Chinese translation, which can be find in
and its Chinese translation, which can be find in
https://huggingface.co/GAIR/autoj-bilingual-6b
https://huggingface.co/GAIR/autoj-bilingual-6b
'''
'''
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-bilingual-6b'
,
abbr
=
'autoj-bilingual-6b'
,
path
=
"
GAIR/autoj-bilingual-6b
"
,
path
=
'
GAIR/autoj-bilingual-6b
'
,
tokenizer_path
=
'GAIR/autoj-bilingual-6b'
,
tokenizer_path
=
'GAIR/autoj-bilingual-6b'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
...
configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-13b'
,
abbr
=
'autoj-13b'
,
path
=
"
GAIR/autoj-13b
"
,
path
=
'
GAIR/autoj-13b
'
,
tokenizer_path
=
'GAIR/autoj-13b'
,
tokenizer_path
=
'GAIR/autoj-13b'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py
View file @
aa2dd2b5
from
opencompass.models
import
HuggingFaceCausalLM
from
opencompass.models
import
HuggingFaceCausalLM
'''
'''
#This is a 4bits quantized version of Auto-J by using AutoGPTQ,
#This is a 4bits quantized version of Auto-J by using AutoGPTQ,
which is available on huggingface-hub:
which is available on huggingface-hub:
https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
'''
'''
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-13b-GPTQ-4bits'
,
abbr
=
'autoj-13b-GPTQ-4bits'
,
path
=
"
GAIR/autoj-13b-GPTQ-4bits
"
,
path
=
'
GAIR/autoj-13b-GPTQ-4bits
'
,
tokenizer_path
=
'GAIR/autoj-13b-GPTQ-4bits'
,
tokenizer_path
=
'GAIR/autoj-13b-GPTQ-4bits'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -20,4 +20,4 @@ models = [dict(
...
@@ -20,4 +20,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'autoj-scenario-classifier'
,
abbr
=
'autoj-scenario-classifier'
,
path
=
"
GAIR/autoj-scenario-classifier
"
,
path
=
'
GAIR/autoj-scenario-classifier
'
,
tokenizer_path
=
'GAIR/autoj-scenario-classifier'
,
tokenizer_path
=
'GAIR/autoj-scenario-classifier'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-13b-v1-hf'
,
abbr
=
'judgelm-13b-v1-hf'
,
path
=
"
BAAI/JudgeLM-13B-v1.0
"
,
path
=
'
BAAI/JudgeLM-13B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-13B-v1.0'
,
tokenizer_path
=
'BAAI/JudgeLM-13B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-33b-v1-hf'
,
abbr
=
'judgelm-33b-v1-hf'
,
path
=
"
BAAI/JudgeLM-33B-v1.0
"
,
path
=
'
BAAI/JudgeLM-33B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-33B-v1.0'
,
tokenizer_path
=
'BAAI/JudgeLM-33B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'judgelm-7b-v1-hf'
,
abbr
=
'judgelm-7b-v1-hf'
,
path
=
"
BAAI/JudgeLM-7B-v1.0
"
,
path
=
'
BAAI/JudgeLM-7B-v1.0
'
,
tokenizer_path
=
'BAAI/JudgeLM-7B-v1.0'
,
tokenizer_path
=
'BAAI/JudgeLM-7B-v1.0'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'alpaca-pandalm-7b-v1-hf'
,
abbr
=
'alpaca-pandalm-7b-v1-hf'
,
path
=
"
WeOpenML/PandaLM-Alpaca-7B-v1
"
,
path
=
'
WeOpenML/PandaLM-Alpaca-7B-v1
'
,
tokenizer_path
=
'WeOpenML/PandaLM-Alpaca-7B-v1'
,
tokenizer_path
=
'WeOpenML/PandaLM-Alpaca-7B-v1'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py
View file @
aa2dd2b5
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models
=
[
dict
(
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'pandalm-7b-v1-hf'
,
abbr
=
'pandalm-7b-v1-hf'
,
path
=
"
WeOpenML/PandaLM-7B-v1
"
,
path
=
'
WeOpenML/PandaLM-7B-v1
'
,
tokenizer_path
=
'WeOpenML/PandaLM-7B-v1'
,
tokenizer_path
=
'WeOpenML/PandaLM-7B-v1'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,4 +15,4 @@ models = [dict(
...
@@ -15,4 +15,4 @@ models = [dict(
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)]
)]
\ No newline at end of file
configs/models/lemur/lemur_70b_chat.py
View file @
aa2dd2b5
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
'
HUMAN
'
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
"
BOT
"
,
begin
=
"
\n
<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>'
,
generate
=
True
),
dict
(
role
=
'
BOT
'
,
begin
=
'
\n
<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>'
,
generate
=
True
),
],
],
)
)
...
@@ -12,7 +12,7 @@ models = [
...
@@ -12,7 +12,7 @@ models = [
dict
(
dict
(
type
=
HuggingFaceCausalLM
,
type
=
HuggingFaceCausalLM
,
abbr
=
'lemur-70b-chat-v1'
,
abbr
=
'lemur-70b-chat-v1'
,
path
=
"
OpenLemur/lemur-70b-chat-v1
"
,
path
=
'
OpenLemur/lemur-70b-chat-v1
'
,
tokenizer_path
=
'OpenLemur/lemur-70b-chat-v1'
,
tokenizer_path
=
'OpenLemur/lemur-70b-chat-v1'
,
# tokenizer_kwargs=dict(
# tokenizer_kwargs=dict(
# padding_side='left',
# padding_side='left',
...
...
configs/models/llama/llama2_13b.py
View file @
aa2dd2b5
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-13b
"
,
abbr
=
'
llama-2-13b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-13b/
"
,
path
=
'
./models/llama2/llama/llama-2-13b/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_13b_chat.py
View file @
aa2dd2b5
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
api_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
],
)
)
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-13b-chat
"
,
abbr
=
'
llama-2-13b-chat
'
,
type
=
Llama2Chat
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-13b-chat/
"
,
path
=
'
./models/llama2/llama/llama-2-13b-chat/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama2_70b.py
View file @
aa2dd2b5
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-70b
"
,
abbr
=
'
llama-2-70b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-70b/
"
,
path
=
'
./models/llama2/llama/llama-2-70b/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_70b_chat.py
View file @
aa2dd2b5
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
api_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
],
)
)
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-70b-chat
"
,
abbr
=
'
llama-2-70b-chat
'
,
type
=
Llama2Chat
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-70b-chat/
"
,
path
=
'
./models/llama2/llama/llama-2-70b-chat/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama2_7b.py
View file @
aa2dd2b5
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
...
@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-7b
"
,
abbr
=
'
llama-2-7b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama2/llama/llama-2-7b/
"
,
path
=
'
./models/llama2/llama/llama-2-7b/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/llama/llama2_7b_chat.py
View file @
aa2dd2b5
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
...
@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template
=
dict
(
api_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
"
HUMAN
"
,
api_role
=
"
HUMAN
"
),
dict
(
role
=
'
HUMAN
'
,
api_role
=
'
HUMAN
'
),
dict
(
role
=
"
BOT
"
,
api_role
=
"
BOT
"
,
generate
=
True
),
dict
(
role
=
'
BOT
'
,
api_role
=
'
BOT
'
,
generate
=
True
),
],
],
)
)
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-2-7b-chat
"
,
abbr
=
'
llama-2-7b-chat
'
,
type
=
Llama2Chat
,
type
=
Llama2Chat
,
path
=
"
./models/llama2/llama/llama-2-7b-chat/
"
,
path
=
'
./models/llama2/llama/llama-2-7b-chat/
'
,
tokenizer_path
=
"
./models/llama2/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama2/llama/tokenizer.model
'
,
meta_template
=
api_meta_template
,
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
...
...
configs/models/llama/llama_13b.py
View file @
aa2dd2b5
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-13b
"
,
abbr
=
'
llama-13b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama/13B/
"
,
path
=
'
./models/llama/13B/
'
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/llama/llama_30b.py
View file @
aa2dd2b5
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-30b
"
,
abbr
=
'
llama-30b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama/30B/
"
,
path
=
'
./models/llama/30B/
'
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
configs/models/llama/llama_65b.py
View file @
aa2dd2b5
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
...
@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models
=
[
models
=
[
dict
(
dict
(
abbr
=
"
llama-65b
"
,
abbr
=
'
llama-65b
'
,
type
=
Llama2
,
type
=
Llama2
,
path
=
"
./models/llama/65B/
"
,
path
=
'
./models/llama/65B/
'
,
tokenizer_path
=
"
./models/llama/tokenizer.model
"
,
tokenizer_path
=
'
./models/llama/tokenizer.model
'
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
16
,
batch_size
=
16
,
...
...
Prev
1
…
24
25
26
27
28
29
30
31
32
33
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment