Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
d34ba111
"vscode:/vscode.git/clone" did not exist on "51fea27f18619e07e9513ffda513f68b8ac62208"
Unverified
Commit
d34ba111
authored
Feb 05, 2024
by
Fengzhe Zhou
Committed by
GitHub
Feb 05, 2024
Browse files
[Sync] Merge branch 'dev' into zfz/update-keyset-demo (#876)
parent
32b5948f
Changes
97
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
404 additions
and
2 deletions
+404
-2
configs/models/qwen/hf_qwen2_beta_0_5b.py
configs/models/qwen/hf_qwen2_beta_0_5b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_0_5b_chat.py
configs/models/qwen/hf_qwen2_beta_0_5b_chat.py
+34
-0
configs/models/qwen/hf_qwen2_beta_14b.py
configs/models/qwen/hf_qwen2_beta_14b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_14b_chat.py
configs/models/qwen/hf_qwen2_beta_14b_chat.py
+34
-0
configs/models/qwen/hf_qwen2_beta_1_8b.py
configs/models/qwen/hf_qwen2_beta_1_8b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_1_8b_chat.py
configs/models/qwen/hf_qwen2_beta_1_8b_chat.py
+34
-0
configs/models/qwen/hf_qwen2_beta_4b.py
configs/models/qwen/hf_qwen2_beta_4b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_4b_chat.py
configs/models/qwen/hf_qwen2_beta_4b_chat.py
+34
-0
configs/models/qwen/hf_qwen2_beta_72b.py
configs/models/qwen/hf_qwen2_beta_72b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_72b_chat.py
configs/models/qwen/hf_qwen2_beta_72b_chat.py
+34
-0
configs/models/qwen/hf_qwen2_beta_7b.py
configs/models/qwen/hf_qwen2_beta_7b.py
+25
-0
configs/models/qwen/hf_qwen2_beta_7b_chat.py
configs/models/qwen/hf_qwen2_beta_7b_chat.py
+34
-0
configs/models/qwen/hf_qwen_14b.py
configs/models/qwen/hf_qwen_14b.py
+1
-0
configs/models/qwen/hf_qwen_1_8b.py
configs/models/qwen/hf_qwen_1_8b.py
+1
-0
configs/models/qwen/hf_qwen_1_8b_chat.py
configs/models/qwen/hf_qwen_1_8b_chat.py
+4
-1
configs/models/qwen/hf_qwen_72b.py
configs/models/qwen/hf_qwen_72b.py
+1
-0
configs/models/qwen/hf_qwen_7b.py
configs/models/qwen/hf_qwen_7b.py
+1
-0
configs/models/qwen/vllm_qwen2_beta_72b.py
configs/models/qwen/vllm_qwen2_beta_72b.py
+16
-0
configs/models/qwen/vllm_qwen2_beta_72b_chat.py
configs/models/qwen/vllm_qwen2_beta_72b_chat.py
+26
-0
configs/models/yi/hf_yi_6b_200k.py
configs/models/yi/hf_yi_6b_200k.py
+0
-1
No files found.
configs/models/qwen/hf_qwen2_beta_0_5b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-0.5b-hf'
,
path
=
"Qwen/Qwen2-beta-0_5B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-0_5B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_0_5b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-0.5b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-0_5B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen2_beta_14b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-14b-hf'
,
path
=
"Qwen/Qwen2-beta-14B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-14B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_14b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-14b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-14B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen2_beta_1_8b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-1.8b-hf'
,
path
=
"Qwen/Qwen2-beta-1_8B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-1_8B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_1_8b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-1.8b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-1_8B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen2_beta_4b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-4b-hf'
,
path
=
"Qwen/Qwen2-beta-4B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-4B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_4b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-4b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-4B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen2_beta_72b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-72b-hf'
,
path
=
"Qwen/Qwen2-beta-72B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-72B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_72b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-72b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-72B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen2_beta_7b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-7b-hf'
,
path
=
"Qwen/Qwen2-beta-7B"
,
tokenizer_path
=
'Qwen/Qwen2-beta-7B'
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
)
]
configs/models/qwen/hf_qwen2_beta_7b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
HuggingFaceCausalLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
HuggingFaceCausalLM
,
abbr
=
'qwen2-beta-7b-chat-hf'
,
path
=
"Qwen/Qwen2-beta-7B-Chat"
,
model_kwargs
=
dict
(
device_map
=
'auto'
,
trust_remote_code
=
True
),
tokenizer_kwargs
=
dict
(
padding_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
use_fast
=
False
,
),
meta_template
=
_meta_template
,
pad_token_id
=
151645
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
8
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
]
configs/models/qwen/hf_qwen_14b.py
View file @
d34ba111
...
@@ -17,6 +17,7 @@ models = [
...
@@ -17,6 +17,7 @@ models = [
use_fast
=
False
,
use_fast
=
False
,
),
),
pad_token_id
=
151643
,
pad_token_id
=
151643
,
min_out_len
=
1
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
...
...
configs/models/qwen/hf_qwen_1_8b.py
View file @
d34ba111
...
@@ -17,6 +17,7 @@ models = [
...
@@ -17,6 +17,7 @@ models = [
use_fast
=
False
,
use_fast
=
False
,
),
),
pad_token_id
=
151643
,
pad_token_id
=
151643
,
min_out_len
=
1
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
...
...
configs/models/qwen/hf_qwen_1_8b_chat.py
View file @
d34ba111
...
@@ -5,6 +5,7 @@ _meta_template = dict(
...
@@ -5,6 +5,7 @@ _meta_template = dict(
dict
(
role
=
"HUMAN"
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
"HUMAN"
,
begin
=
'
\n
<|im_start|>user
\n
'
,
end
=
'<|im_end|>'
),
dict
(
role
=
"BOT"
,
begin
=
"
\n
<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>'
,
generate
=
True
),
dict
(
role
=
"BOT"
,
begin
=
"
\n
<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>'
,
generate
=
True
),
],
],
eos_token_id
=
151645
,
)
)
models
=
[
models
=
[
...
@@ -21,12 +22,14 @@ models = [
...
@@ -21,12 +22,14 @@ models = [
padding_side
=
'left'
,
padding_side
=
'left'
,
truncation_side
=
'left'
,
truncation_side
=
'left'
,
trust_remote_code
=
True
,
trust_remote_code
=
True
,
use_fast
=
False
,),
use_fast
=
False
,
),
pad_token_id
=
151643
,
pad_token_id
=
151643
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
)
)
]
]
configs/models/qwen/hf_qwen_72b.py
View file @
d34ba111
...
@@ -17,6 +17,7 @@ models = [
...
@@ -17,6 +17,7 @@ models = [
use_fast
=
False
,
use_fast
=
False
,
),
),
pad_token_id
=
151643
,
pad_token_id
=
151643
,
min_out_len
=
1
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
...
...
configs/models/qwen/hf_qwen_7b.py
View file @
d34ba111
...
@@ -17,6 +17,7 @@ models = [
...
@@ -17,6 +17,7 @@ models = [
use_fast
=
False
,
use_fast
=
False
,
),
),
pad_token_id
=
151643
,
pad_token_id
=
151643
,
min_out_len
=
1
,
max_out_len
=
100
,
max_out_len
=
100
,
max_seq_len
=
2048
,
max_seq_len
=
2048
,
batch_size
=
8
,
batch_size
=
8
,
...
...
configs/models/qwen/vllm_qwen2_beta_72b.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
VLLM
models
=
[
dict
(
type
=
VLLM
,
abbr
=
'qwen2-beta-72b-vllm'
,
path
=
"Qwen/Qwen2-beta-72B"
,
model_kwargs
=
dict
(
tensor_parallel_size
=
4
),
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
32
,
generation_kwargs
=
dict
(
temperature
=
0
),
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
)
]
configs/models/qwen/vllm_qwen2_beta_72b_chat.py
0 → 100644
View file @
d34ba111
from
opencompass.models
import
VLLM
_meta_template
=
dict
(
round
=
[
dict
(
role
=
"HUMAN"
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
"BOT"
,
begin
=
"<|im_start|>assistant
\n
"
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
eos_token_id
=
151645
,
)
models
=
[
dict
(
type
=
VLLM
,
abbr
=
'qwen2-beta-72b-chat-vllm'
,
path
=
"Qwen/Qwen2-beta-72B-Chat"
,
model_kwargs
=
dict
(
tensor_parallel_size
=
4
),
meta_template
=
_meta_template
,
max_out_len
=
100
,
max_seq_len
=
2048
,
batch_size
=
32
,
generation_kwargs
=
dict
(
temperature
=
0
),
end_str
=
'<|im_end|>'
,
run_cfg
=
dict
(
num_gpus
=
4
,
num_procs
=
1
),
)
]
configs/models/yi/hf_yi_6b_200k.py
View file @
d34ba111
from
opencompass.models
import
HuggingFace
from
opencompass.models
import
HuggingFace
models
=
[
models
=
[
dict
(
dict
(
type
=
HuggingFace
,
type
=
HuggingFace
,
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment