Unverified Commit 8c85edd1 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Sync] deprecate old mbpps (#1064)

parent c1724013
......@@ -9,8 +9,8 @@ from opencompass.tasks import OpenICLInferTask
with read_base():
from .datasets.humaneval.humaneval_repeat10_gen_8e312c import humaneval_datasets
from .datasets.mbpp.mbpp_repeat10_gen_1e1056 import mbpp_datasets
from .datasets.mbpp.sanitized_mbpp_repeat10_gen_1e1056 import sanitized_mbpp_datasets
from .datasets.mbpp.deprecated_mbpp_repeat10_gen_1e1056 import mbpp_datasets
from .datasets.mbpp.deprecated_sanitized_mbpp_repeat10_gen_1e1056 import sanitized_mbpp_datasets
datasets = []
datasets += humaneval_datasets
......
......@@ -8,7 +8,7 @@ with read_base():
from .datasets.gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
from .datasets.math.math_evaluatorv2_gen_cecb31 import math_datasets
from .datasets.humaneval.humaneval_gen_8e312c import humaneval_datasets
from .datasets.mbpp.sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
from .datasets.mbpp.deprecated_sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
from .models.hf_internlm.hf_internlm2_chat_7b import models as hf_internlm2_chat_7b_model
from .models.hf_internlm.hf_internlm2_chat_20b import models as hf_internlm2_chat_20b_model
......
......@@ -7,7 +7,7 @@ with read_base():
from .datasets.gsm8k.gsm8k_gen_1d7fe4 import gsm8k_datasets
from .datasets.math.math_gen_265cce import math_datasets
from .datasets.humaneval.humaneval_gen_a82cae import humaneval_datasets
from .datasets.mbpp.sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
from .datasets.mbpp.deprecated_sanitized_mbpp_gen_1e1056 import sanitized_mbpp_datasets
from .models.hf_internlm.hf_internlm2_7b import models as hf_internlm2_7b_model
from .models.hf_internlm.hf_internlm2_20b import models as hf_internlm2_20b_model
......
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
begin='<|begin▁of▁sentence|>',
round=[
dict(role="HUMAN", begin='User: ', end='\n\n'),
dict(role="BOT", begin="Assistant: ", end='<|end▁of▁sentence|>', generate=True),
......@@ -12,7 +13,6 @@ models = [
type=HuggingFaceCausalLM,
abbr='deepseek-67b-chat-hf',
path="deepseek-ai/deepseek-llm-67b-chat",
tokenizer_path='deepseek-ai/deepseek-llm-67b-chat',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
......@@ -28,6 +28,6 @@ models = [
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=4, num_procs=1),
end_str='<|end▁of▁sentence|>',
batch_padding=True,
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
begin='<|begin▁of▁sentence|>',
round=[
dict(role="HUMAN", begin='User: ', end='\n\n'),
dict(role="BOT", begin="Assistant: ", end='<|end▁of▁sentence|>', generate=True),
......@@ -12,7 +13,6 @@ models = [
type=HuggingFaceCausalLM,
abbr='deepseek-7b-chat-hf',
path="deepseek-ai/deepseek-llm-7b-chat",
tokenizer_path='deepseek-ai/deepseek-llm-7b-chat',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
......@@ -28,5 +28,6 @@ models = [
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
batch_padding=True,
)
]
from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
begin='<|begin▁of▁sentence|>',
round=[
dict(role="HUMAN", begin='User: ', end='\n\n'),
dict(role="BOT", begin="Assistant: ", end='<|end▁of▁sentence|>', generate=True),
......@@ -12,7 +13,6 @@ models = [
type=HuggingFaceCausalLM,
abbr='deepseek-moe-16b-chat-hf',
path="deepseek-ai/deepseek-moe-16b-chat",
tokenizer_path='deepseek-ai/deepseek-moe-16b-chat',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True,
......@@ -26,7 +26,7 @@ models = [
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|end▁of▁sentence|>',
run_cfg=dict(num_gpus=1, num_procs=1),
batch_padding=True,
)
]
......@@ -5,7 +5,6 @@ _meta_template = dict(
dict(role="HUMAN", begin='<start_of_turn>user\n', end='<end_of_turn>\n'),
dict(role="BOT", begin="<start_of_turn>model\n", end='<end_of_turn>\n', generate=True),
],
eos_token_id=151645,
)
models = [
......@@ -24,9 +23,11 @@ models = [
use_fast=False,
),
meta_template=_meta_template,
min_out_len=1,
max_out_len=100,
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
batch_padding=True,
)
]
......@@ -5,7 +5,6 @@ _meta_template = dict(
dict(role="HUMAN", begin='<start_of_turn>user\n', end='<end_of_turn>\n'),
dict(role="BOT", begin="<start_of_turn>model\n", end='<end_of_turn>\n', generate=True),
],
eos_token_id=151645,
)
models = [
......@@ -29,5 +28,6 @@ models = [
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -7,7 +7,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -33,5 +32,6 @@ models = [
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -6,7 +6,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -32,5 +31,6 @@ models = [
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -7,7 +7,6 @@ _meta_template = dict(
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=92542
)
models = [
......@@ -33,5 +32,6 @@ models = [
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
batch_padding=True,
)
]
......@@ -3,27 +3,31 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
generate=True),
dict(role="HUMAN", begin="<|im_start|>user\n", end="<|im_end|>\n"),
dict(role="BOT", begin="<|im_start|>assistant\n", end="<|im_end|>\n", generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=TurboMindModel,
abbr='internlm2-chat-20b-turbomind',
abbr="internlm2-chat-20b-turbomind",
path="internlm/internlm2-chat-20b",
meta_template=_meta_template,
engine_config=dict(session_len=210000,
max_batch_size=8,
rope_scaling_factor=3.0,
model_name="internlm2-chat-20b",
tp=2),
gen_config=dict(top_k=1, top_p=0.8,
temperature=1.0,
max_new_tokens=2000,),
engine_config=dict(
session_len=210000,
max_batch_size=8,
rope_scaling_factor=3.0,
model_name="internlm2-chat-20b",
tp=2,
stop_words=[2, 92542],
),
gen_config=dict(
top_k=1,
top_p=0.8,
temperature=1.0,
max_new_tokens=2000,
),
max_out_len=2000,
max_seq_len=210000,
batch_size=1,
......
......@@ -3,29 +3,34 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n',
generate=True),
dict(role="HUMAN", begin="<|im_start|>user\n", end="<|im_end|>\n"),
dict(role="BOT", begin="<|im_start|>assistant\n", end="<|im_end|>\n", generate=True),
],
eos_token_id=92542
)
models = [
dict(
type=TurboMindModel,
abbr='internlm2-chat-7b-turbomind',
abbr="internlm2-chat-7b-turbomind",
path="internlm/internlm2-chat-7b",
meta_template=_meta_template,
engine_config=dict(session_len=210000,
max_batch_size=8,
rope_scaling_factor=2.0,
model_name="internlm2-chat-7b"),
gen_config=dict(top_k=1, top_p=0.8,
temperature=1.0,
max_new_tokens=2000),
engine_config=dict(
session_len=210000,
max_batch_size=8,
rope_scaling_factor=2.0,
model_name="internlm2-chat-7b",
tp=1,
stop_words=[2, 92542],
),
gen_config=dict(
top_k=1,
top_p=0.8,
temperature=1.0,
max_new_tokens=2000,
),
max_out_len=2000,
max_seq_len=210000,
batch_size=8,
batch_size=1,
concurrency=8,
run_cfg=dict(num_gpus=1, num_procs=1),
)
......
......@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin=' [INST] ', end=' [/INST] '),
dict(role="BOT", begin='', end='', generate=True),
dict(role="HUMAN", begin='[INST] ', end=' [/INST]'),
dict(role="BOT", begin=' ', end=' ', generate=True),
],
)
......@@ -27,5 +27,6 @@ models = [
batch_size=8,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='[INST]',
batch_padding=True,
)
]
......@@ -2,8 +2,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin=' [INST] ', end=' [/INST] '),
dict(role="BOT", begin='', end='', generate=True),
dict(role="HUMAN", begin='[INST] ', end=' [/INST]'),
dict(role="BOT", begin=' ', end=' ', generate=True),
],
)
......@@ -27,5 +27,6 @@ models = [
batch_size=8,
run_cfg=dict(num_gpus=4, num_procs=1),
end_str='[INST]',
batch_padding=True,
)
]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment