Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
d3de5c41
"git@developer.sourcefind.cn:OpenDAS/mmcv.git" did not exist on "2b97c52d39324fadd81235e01649e9b01956b07d"
Unverified
Commit
d3de5c41
authored
Nov 13, 2023
by
Fengzhe Zhou
Committed by
GitHub
Nov 13, 2023
Browse files
[Sync] update model configs (#574)
parent
689ffe5b
Changes
41
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
400 additions
and
12 deletions
+400
-12
.gitignore
.gitignore
+1
-0
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
+2
-2
configs/datasets/humaneval/humaneval_gen_4a6eef.py
configs/datasets/humaneval/humaneval_gen_4a6eef.py
+35
-0
configs/models/aquila/hf_aquila2_34b.py
configs/models/aquila/hf_aquila2_34b.py
+24
-0
configs/models/aquila/hf_aquila2_7b.py
configs/models/aquila/hf_aquila2_7b.py
+24
-0
configs/models/aquila/hf_aquilachat2_34b.py
configs/models/aquila/hf_aquilachat2_34b.py
+33
-0
configs/models/aquila/hf_aquilachat2_34b_16k.py
configs/models/aquila/hf_aquilachat2_34b_16k.py
+34
-0
configs/models/aquila/hf_aquilachat2_7b.py
configs/models/aquila/hf_aquilachat2_7b.py
+33
-0
configs/models/aquila/hf_aquilachat2_7b_16k.py
configs/models/aquila/hf_aquilachat2_7b_16k.py
+34
-0
configs/models/chatglm/hf_chatglm2_6b.py
configs/models/chatglm/hf_chatglm2_6b.py
+5
-2
configs/models/chatglm/hf_chatglm3_6b.py
configs/models/chatglm/hf_chatglm3_6b.py
+31
-0
configs/models/chatglm/hf_chatglm3_6b_base.py
configs/models/chatglm/hf_chatglm3_6b_base.py
+24
-0
configs/models/chatglm/hf_chatglm_6b.py
configs/models/chatglm/hf_chatglm_6b.py
+4
-1
configs/models/hf_internlm/hf_internlm_20b.py
configs/models/hf_internlm/hf_internlm_20b.py
+22
-0
configs/models/hf_internlm/hf_internlm_7b.py
configs/models/hf_internlm/hf_internlm_7b.py
+4
-1
configs/models/hf_internlm/hf_internlm_chat_20b.py
configs/models/hf_internlm/hf_internlm_chat_20b.py
+33
-0
configs/models/hf_internlm/hf_internlm_chat_7b.py
configs/models/hf_internlm/hf_internlm_chat_7b.py
+4
-5
configs/models/hf_internlm/hf_internlm_chat_7b_8k.py
configs/models/hf_internlm/hf_internlm_chat_7b_8k.py
+4
-1
configs/models/lingowhale/hf_lingowhale_8b.py
configs/models/lingowhale/hf_lingowhale_8b.py
+25
-0
configs/models/mistral/hf_mistral_7b.py
configs/models/mistral/hf_mistral_7b.py
+24
-0
No files found.
.gitignore
View file @
d3de5c41
...
@@ -89,3 +89,4 @@ docs/zh_cn/_build/
...
@@ -89,3 +89,4 @@ docs/zh_cn/_build/
# sft config ignore list
# sft config ignore list
configs/sft_cfg/*B_*
configs/sft_cfg/*B_*
configs/cky/
configs/datasets/GLUE_MRPC/GLUE_MRPC_ppl_96564c.py
View file @
d3de5c41
...
@@ -22,8 +22,8 @@ MRPC_infer_cfg = dict(
...
@@ -22,8 +22,8 @@ MRPC_infer_cfg = dict(
},
},
ice_token
=
'</E>'
,
ice_token
=
'</E>'
,
),
),
retriever
=
dict
(
type
=
FixKRetriever
),
retriever
=
dict
(
type
=
FixKRetriever
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]
),
inferencer
=
dict
(
type
=
PPLInferencer
,
fix_id_list
=
[
0
,
1
,
2
,
3
,
4
]
))
inferencer
=
dict
(
type
=
PPLInferencer
))
MRPC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
MRPC_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
AccEvaluator
),
)
...
...
configs/datasets/humaneval/humaneval_gen_4a6eef.py
0 → 100644
View file @
d3de5c41
from
opencompass.openicl.icl_prompt_template
import
PromptTemplate
from
opencompass.openicl.icl_retriever
import
ZeroRetriever
from
opencompass.openicl.icl_inferencer
import
GenInferencer
from
opencompass.datasets
import
HFDataset
,
HumanEvaluator
,
humaneval_postprocess
humaneval_reader_cfg
=
dict
(
input_columns
=
[
'prompt'
],
output_column
=
'task_id'
,
train_split
=
'test'
)
# TODO: allow empty output-column
humaneval_infer_cfg
=
dict
(
prompt_template
=
dict
(
type
=
PromptTemplate
,
template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
prompt
=
'Below is an instruction that describes a task. Write a response that appropriately completes the request.
\n\n
### Instruction:
\n
Create a Python script for this problem:
\n
{prompt}
\n\n
### Response:
\n
'
),
])),
retriever
=
dict
(
type
=
ZeroRetriever
),
inferencer
=
dict
(
type
=
GenInferencer
,
max_out_len
=
512
))
humaneval_eval_cfg
=
dict
(
evaluator
=
dict
(
type
=
HumanEvaluator
),
pred_role
=
'BOT'
,
k
=
[
1
,
10
,
100
],
# the parameter only for humaneval
pred_postprocessor
=
dict
(
type
=
humaneval_postprocess
),
)
humaneval_datasets
=
[
dict
(
type
=
HFDataset
,
path
=
'openai_humaneval'
,
reader_cfg
=
humaneval_reader_cfg
,
infer_cfg
=
humaneval_infer_cfg
,
eval_cfg
=
humaneval_eval_cfg
)
]
configs/models/aquila/hf_aquila2_34b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquila2-34b-hf'
,
path
=
"BAAI/Aquila2-34B"
,
tokenizer_path
=
'BAAI/Aquila2-34B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
)
]
configs/models/aquila/hf_aquila2_7b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquila2-7b-hf'
,
path
=
"BAAI/Aquila2-7B"
,
tokenizer_path
=
'BAAI/Aquila2-7B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/aquila/hf_aquilachat2_34b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'### Human: '
,
end
=
'
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'### Assistant: '
,
end
=
'</s>'
,
generate
=
True
),
],
eos_token_id
=
100007
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-34b-hf'
,
path
=
"BAAI/AquilaChat2-34B"
,
tokenizer_path
=
'BAAI/AquilaChat2-34B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
)
]
configs/models/aquila/hf_aquilachat2_34b_16k.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
begin
=
'###'
,
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'Human: '
,
end
=
'###'
),
dict
(
role
=
'BOT'
,
begin
=
'Assistant: '
,
end
=
'</s>'
,
generate
=
True
),
],
eos_token_id
=
100007
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-34b-16k-hf'
,
path
=
"BAAI/AquilaChat2-34B-16K"
,
tokenizer_path
=
'BAAI/AquilaChat2-34B-16K'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
max_out_len
=
100
,
max_seq_len
=
4096
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
)
]
configs/models/aquila/hf_aquilachat2_7b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|startofpiece|>'
,
end
=
''
),
dict
(
role
=
'BOT'
,
begin
=
'<|endofpiece|>'
,
end
=
'</s>'
,
generate
=
True
),
],
eos_token_id
=
2
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-7b-hf'
,
path
=
"BAAI/AquilaChat2-7B"
,
tokenizer_path
=
'BAAI/AquilaChat2-7B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/aquila/hf_aquilachat2_7b_16k.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
begin
=
'###'
,
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'Human: '
,
end
=
'###'
),
dict
(
role
=
'BOT'
,
begin
=
'Assistant: '
,
end
=
'</s>'
,
generate
=
True
),
],
eos_token_id
=
100007
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'aquilachat2-7b-16k-hf'
,
path
=
"BAAI/AquilaChat2-7B-16K"
,
tokenizer_path
=
'BAAI/AquilaChat2-7B-16K'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
max_out_len
=
100
,
max_seq_len
=
4096
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/chatglm/hf_chatglm2_6b.py
View file @
d3de5c41
...
@@ -7,15 +7,18 @@ models = [
...
@@ -7,15 +7,18 @@ models = [
abbr
=
'chatglm2-6b-hf'
,
abbr
=
'chatglm2-6b-hf'
,
path
=
'THUDM/chatglm2-6b'
,
path
=
'THUDM/chatglm2-6b'
,
tokenizer_path
=
'THUDM/chatglm2-6b'
,
tokenizer_path
=
'THUDM/chatglm2-6b'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
),
),
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
4096
,
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
revision
=
'a6d54fac46dff2db65d53416c207a4485ca6bd40'
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
]
]
configs/models/chatglm/hf_chatglm3_6b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceChatGLM3
api_meta_template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
api_role
=
'HUMAN'
),
dict
(
role
=
'BOT'
,
api_role
=
'BOT'
,
generate
=
True
),
]
)
models
=
[
dict
(
type
=
HuggingFaceChatGLM3
,
abbr
=
'chatglm3-6b-hf'
,
path
=
'THUDM/chatglm3-6b'
,
tokenizer_path
=
'THUDM/chatglm3-6b'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
),
meta_template
=
api_meta_template
,
max_out_len
=
100
,
max_seq_len
=
4096
,
batch_size
=
1
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
)
)
]
configs/models/chatglm/hf_chatglm3_6b_base.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFace
models
=
[
dict
(
type
=
HuggingFace
,
abbr
=
'chatglm3-6b-base-hf'
,
path
=
'THUDM/chatglm3-6b-base'
,
tokenizer_path
=
'THUDM/chatglm3-6b-base'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
4096
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/chatglm/hf_chatglm_6b.py
View file @
d3de5c41
...
@@ -7,6 +7,10 @@ models = [
...
@@ -7,6 +7,10 @@ models = [
abbr
=
'chatglm-6b-hf'
,
abbr
=
'chatglm-6b-hf'
,
path
=
'THUDM/chatglm-6b'
,
path
=
'THUDM/chatglm-6b'
,
tokenizer_path
=
'THUDM/chatglm-6b'
,
tokenizer_path
=
'THUDM/chatglm-6b'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -15,7 +19,6 @@ models = [
...
@@ -15,7 +19,6 @@ models = [
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
revision
=
'1d240ba371910e9282298d4592532d7f0f3e9f3e'
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
]
]
configs/models/hf_internlm/hf_internlm_20b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'internlm-20b-hf'
,
path
=
"internlm/internlm-20b"
,
tokenizer_path
=
'internlm/internlm-20b'
,
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
use_fast
=
False
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
)
]
configs/models/hf_internlm/hf_internlm_7b.py
View file @
d3de5c41
...
@@ -7,6 +7,10 @@ models = [
...
@@ -7,6 +7,10 @@ models = [
abbr
=
'internlm-7b-hf'
,
abbr
=
'internlm-7b-hf'
,
path
=
"internlm/internlm-7b"
,
path
=
"internlm/internlm-7b"
,
tokenizer_path
=
'internlm/internlm-7b'
,
tokenizer_path
=
'internlm/internlm-7b'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -16,7 +20,6 @@ models = [
...
@@ -16,7 +20,6 @@ models = [
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
]
]
configs/models/hf_internlm/hf_internlm_chat_20b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|User|>:'
,
end
=
'<eoh>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|Bot|>:'
,
end
=
'<eoa>
\n
'
,
generate
=
True
),
],
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'internlm-chat-20b-hf'
,
path
=
"internlm/internlm-chat-20b"
,
tokenizer_path
=
'internlm/internlm-chat-20b'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
use_fast
=
False
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
)
]
configs/models/hf_internlm/hf_internlm_chat_7b.py
View file @
d3de5c41
...
@@ -14,21 +14,20 @@ models = [
...
@@ -14,21 +14,20 @@ models = [
abbr
=
'internlm-chat-7b-hf'
,
abbr
=
'internlm-chat-7b-hf'
,
path
=
"internlm/internlm-chat-7b"
,
path
=
"internlm/internlm-chat-7b"
,
tokenizer_path
=
'internlm/internlm-chat-7b'
,
tokenizer_path
=
'internlm/internlm-chat-7b'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
use_fast
=
False
,
use_fast
=
False
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
revision
=
"1a6328795c6e207904e1eb58177e03ad24ae06f3"
),
),
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
revision
=
"1a6328795c6e207904e1eb58177e03ad24ae06f3"
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
]
]
configs/models/hf_internlm/hf_internlm_chat_7b_8k.py
View file @
d3de5c41
...
@@ -14,6 +14,10 @@ models = [
...
@@ -14,6 +14,10 @@ models = [
abbr
=
'internlm-chat-7b-8k-hf'
,
abbr
=
'internlm-chat-7b-8k-hf'
,
path
=
"internlm/internlm-chat-7b-8k"
,
path
=
"internlm/internlm-chat-7b-8k"
,
tokenizer_path
=
'internlm/internlm-chat-7b-8k'
,
tokenizer_path
=
'internlm/internlm-chat-7b-8k'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
...
@@ -24,7 +28,6 @@ models = [
...
@@ -24,7 +28,6 @@ models = [
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
)
]
]
configs/models/lingowhale/hf_lingowhale_8b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFace
models
=
[
dict
(
type
=
HuggingFace
,
abbr
=
'lingowhale-8b-hf'
,
path
=
'deeplang-ai/LingoWhale-8B'
,
tokenizer_path
=
'deeplang-ai/LingoWhale-8B'
,
model_kwargs
=
dict
(
trust_remote_code
=
True
,
device_map
=
'auto'
,
torch_dtype
=
'auto'
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/mistral/hf_mistral_7b.py
0 → 100644
View file @
d3de5c41
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
abbr
=
'mistral-7b-v0.1-hf'
,
type
=
HuggingFaceCausalLM
,
path
=
'mistralai/Mistral-7B-v0.1'
,
tokenizer_path
=
'mistralai/Mistral-7B-v0.1'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
,
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment