Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
opencompass
Commits
c2d4717b
Unverified
Commit
c2d4717b
authored
Mar 15, 2024
by
Jingming
Committed by
GitHub
Mar 15, 2024
Browse files
[Fix] Fix a bug in internlm2 series configs (#977)
parent
7baa711f
Changes
8
Show whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
10 additions
and
2 deletions
+10
-2
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_20b.py
configs/models/hf_internlm/hf_internlm2_chat_20b.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
...s/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
+2
-1
configs/models/hf_internlm/hf_internlm2_chat_7b.py
configs/models/hf_internlm/hf_internlm2_chat_7b.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
+1
-0
configs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
...gs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
+2
-1
No files found.
configs/models/hf_internlm/hf_internlm2_chat_1_8b.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_1_8b_sft.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b_sft.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_20b_with_system.py
View file @
c2d4717b
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
eos_token_id
=
92542
...
@@ -32,5 +32,6 @@ models = [
...
@@ -32,5 +32,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
2
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b_sft.py
View file @
c2d4717b
...
@@ -31,5 +31,6 @@ models = [
...
@@ -31,5 +31,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
configs/models/hf_internlm/hf_internlm2_chat_7b_with_system.py
View file @
c2d4717b
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
...
@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template
=
dict
(
_meta_template
=
dict
(
round
=
[
round
=
[
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'SYSTEM'
,
begin
=
'<|im_start|>system
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'HUMAN'
,
begin
=
'<|im_start|>user
\n
'
,
end
=
'<|im_end|>
\n
'
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
dict
(
role
=
'BOT'
,
begin
=
'<|im_start|>assistant
\n
'
,
end
=
'<|im_end|>
\n
'
,
generate
=
True
),
],
],
eos_token_id
=
92542
eos_token_id
=
92542
...
@@ -32,5 +32,6 @@ models = [
...
@@ -32,5 +32,6 @@ models = [
meta_template
=
_meta_template
,
meta_template
=
_meta_template
,
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
run_cfg
=
dict
(
num_gpus
=
1
,
num_procs
=
1
),
end_str
=
'<|im_end|>'
,
end_str
=
'<|im_end|>'
,
generation_kwargs
=
{
"eos_token_id"
:
[
2
,
92542
]},
)
)
]
]
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment