Unverified Commit d34ba111 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Sync] Merge branch 'dev' into zfz/update-keyset-demo (#876)

parent 32b5948f
from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-base-20b-hf',
path="internlm/internlm2-base-20b",
tokenizer_path='internlm/internlm2-base-20b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
min_out_len=1,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=2, num_procs=1),
)
]
from opencompass.models import HuggingFaceCausalLM
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-base-7b-hf',
path="internlm/internlm2-base-7b",
tokenizer_path='internlm/internlm2-base-7b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
min_out_len=1,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-1.8b-sft-hf',
path="internlm/internlm2-chat-1_8b-sft",
tokenizer_path='internlm/internlm2-chat-1_8b-sft',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
]
......@@ -4,7 +4,6 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
......
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-20b-sft-hf',
path="internlm/internlm2-chat-20b-sft",
tokenizer_path='internlm/internlm2-chat-20b-sft',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-20b-hf',
path="internlm/internlm2-chat-20b",
tokenizer_path='internlm/internlm2-chat-20b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
)
]
......@@ -4,7 +4,6 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
......
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-7b-sft-hf',
path="internlm/internlm2-chat-7b-sft",
tokenizer_path='internlm/internlm2-chat-7b-sft',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-7b-hf',
path="internlm/internlm2-chat-7b",
tokenizer_path='internlm/internlm2-chat-7b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-20b-hf',
path="internlm/internlm2-math-20b",
tokenizer_path='internlm/internlm2-math-20b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='[UNUSED_TOKEN_145]',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-20b-hf',
path="internlm/internlm2-math-20b",
tokenizer_path='internlm/internlm2-math-20b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='[UNUSED_TOKEN_145]',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-7b-hf',
path="internlm/internlm2-math-7b",
tokenizer_path='internlm/internlm2-math-7b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='[UNUSED_TOKEN_145]',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role='HUMAN', begin='[UNUSED_TOKEN_146]user\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='SYSTEM', begin='[UNUSED_TOKEN_146]system\n', end='[UNUSED_TOKEN_145]\n'),
dict(role='BOT', begin='[UNUSED_TOKEN_146]assistant\n', end='[UNUSED_TOKEN_145]\n', generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-7b-hf',
path="internlm/internlm2-math-7b",
tokenizer_path='internlm/internlm2-math-7b',
model_kwargs=dict(
trust_remote_code=True,
device_map='auto',
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
use_fast=False,
trust_remote_code=True,
),
max_out_len=100,
max_seq_len=2048,
batch_size=8,
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='[UNUSED_TOKEN_145]',
)
]
......@@ -19,16 +19,17 @@ models = [
torch_dtype='auto',
),
tokenizer_kwargs=dict(
padding_side='right',
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
meta_template=_meta_template,
batch_padding=False,
max_out_len=1024,
max_out_len=100,
max_seq_len=4096,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='</s>',
)
]
......@@ -19,16 +19,16 @@ models = [
torch_dtype='auto',
),
tokenizer_kwargs=dict(
padding_side='right',
padding_side='left',
truncation_side='left',
trust_remote_code=True,
use_fast=False,
),
meta_template=_meta_template,
batch_padding=False,
max_out_len=1024,
max_seq_len=8192,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='</s>',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='GPT4 Correct User: ', end='<|end_of_turn|>'),
dict(role="BOT", begin="GPT4 Correct Assistant: ", end='<|end_of_turn|>', generate=True),
],
)
models = [
dict(
abbr='openchat-3.5-0106-hf',
type=HuggingFaceCausalLM,
path='openchat/openchat-3.5-0106',
tokenizer_path='openchat/openchat-3.5-0106',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
),
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|end_of_turn|>',
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='GPT4 Correct User: ', end='<|end_of_turn|>'),
dict(role="BOT", begin="GPT4 Correct Assistant: ", end='<|end_of_turn|>', generate=True),
],
)
models = [
dict(
abbr='openchat-3.5-1210-hf',
type=HuggingFaceCausalLM,
path='openchat/openchat-3.5-1210',
tokenizer_path='openchat/openchat-3.5-1210',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
),
meta_template=_meta_template,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|end_of_turn|>',
)
]
from opencompass.models import HuggingFaceCausalLM
models = [
dict(
abbr='orionstar-14b-base-hf',
type=HuggingFaceCausalLM,
path='OrionStarAI/Orion-14B-Base',
tokenizer_path='OrionStarAI/Orion-14B-Base',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
),
tokenizer_kwargs=dict(
padding_side='left',
truncation_side='left',
trust_remote_code=True,
),
max_out_len=100,
min_out_len=1,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=2, num_procs=1),
)
]
......@@ -11,7 +11,7 @@ _meta_template = dict(
models = [
dict(
abbr='telechat-7b-hf',
abbr='telechat-7b-hf--rerun',
type=HuggingFaceCausalLM,
path='Tele-AI/telechat-7B',
tokenizer_path='Tele-AI/telechat-7B',
......
......@@ -17,7 +17,7 @@ models = [
trust_remote_code=True,
),
max_out_len=100,
min_out_len=3,
min_out_len=1,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=4, num_procs=1),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment