Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -4,7 +4,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-1.8b-hf',
path="internlm/internlm2-1_8b",
path='internlm/internlm2-1_8b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=1),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-20b-hf',
path="internlm/internlm2-20b",
path='internlm/internlm2-20b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=2),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-7b-hf',
path="internlm/internlm2-7b",
path='internlm/internlm2-7b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=1),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-base-20b-hf',
path="internlm/internlm2-base-20b",
path='internlm/internlm2-base-20b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=2),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-base-7b-hf',
path="internlm/internlm2-base-7b",
path='internlm/internlm2-base-7b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=1),
......
......@@ -13,7 +13,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-20b-hf',
path="internlm/internlm2-chat-20b",
path='internlm/internlm2-chat-20b',
tokenizer_path='internlm/internlm2-chat-20b',
model_kwargs=dict(
trust_remote_code=True,
......@@ -31,7 +31,7 @@ models = [
meta_template=_meta_template,
run_cfg=dict(num_gpus=2, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
generation_kwargs = {'eos_token_id': [2, 92542]},
batch_padding=True,
)
]
......@@ -13,7 +13,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-7b-hf',
path="internlm/internlm2-chat-7b",
path='internlm/internlm2-chat-7b',
tokenizer_path='internlm/internlm2-chat-7b',
model_kwargs=dict(
trust_remote_code=True,
......@@ -31,7 +31,7 @@ models = [
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542]},
generation_kwargs = {'eos_token_id': [2, 92542]},
batch_padding=True,
)
]
......@@ -13,7 +13,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-20b-hf',
path="internlm/internlm2-math-20b",
path='internlm/internlm2-math-20b',
tokenizer_path='internlm/internlm2-math-20b',
model_kwargs=dict(
trust_remote_code=True,
......
......@@ -13,7 +13,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-math-7b-hf',
path="internlm/internlm2-math-7b",
path='internlm/internlm2-math-7b',
tokenizer_path='internlm/internlm2-math-7b',
model_kwargs=dict(
trust_remote_code=True,
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-math-20b-hf',
path="internlm/internlm2-math-20b",
path='internlm/internlm2-math-20b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=2),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm2-math-7b-hf',
path="internlm/internlm2-math-7b",
path='internlm/internlm2-math-7b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=1),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm-20b-hf',
path="internlm/internlm-20b",
path='internlm/internlm-20b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=2),
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceBaseModel,
abbr='internlm-7b-hf',
path="internlm/internlm-7b",
path='internlm/internlm-7b',
max_out_len=1024,
batch_size=8,
run_cfg=dict(num_gpus=1),
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm-chat-20b-hf',
path="internlm/internlm-chat-20b",
path='internlm/internlm-chat-20b',
tokenizer_path='internlm/internlm-chat-20b',
model_kwargs=dict(
trust_remote_code=True,
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm-chat-7b-hf',
path="internlm/internlm-chat-7b",
path='internlm/internlm-chat-7b',
tokenizer_path='internlm/internlm-chat-7b',
model_kwargs=dict(
trust_remote_code=True,
......
......@@ -4,12 +4,12 @@ from opencompass.models.turbomind import TurboMindModel
models = [
dict(
type=TurboMindModel,
abbr="internlm2-20b-turbomind",
path="internlm/internlm2-20b",
abbr='internlm2-20b-turbomind',
path='internlm/internlm2-20b',
engine_config=dict(
session_len=32768,
max_batch_size=32,
model_name="internlm2-20b",
model_name='internlm2-20b',
tp=2,
),
gen_config=dict(
......
......@@ -3,21 +3,21 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role="HUMAN", begin="<|im_start|>user\n", end="<|im_end|>\n"),
dict(role="BOT", begin="<|im_start|>assistant\n", end="<|im_end|>\n", generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
)
models = [
dict(
type=TurboMindModel,
abbr="internlm2-chat-20b-turbomind",
path="internlm/internlm2-chat-20b",
abbr='internlm2-chat-20b-turbomind',
path='internlm/internlm2-chat-20b',
meta_template=_meta_template,
engine_config=dict(
session_len=32768,
max_batch_size=32,
model_name="internlm2-chat-20b",
model_name='internlm2-chat-20b',
tp=2,
stop_words=[2, 92542],
),
......
......@@ -3,21 +3,21 @@ from opencompass.models.turbomind import TurboMindModel
_meta_template = dict(
round=[
dict(role="HUMAN", begin="<|im_start|>user\n", end="<|im_end|>\n"),
dict(role="BOT", begin="<|im_start|>assistant\n", end="<|im_end|>\n", generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
)
models = [
dict(
type=TurboMindModel,
abbr="internlm2-chat-7b-turbomind",
path="internlm/internlm2-chat-7b",
abbr='internlm2-chat-7b-turbomind',
path='internlm/internlm2-chat-7b',
meta_template=_meta_template,
engine_config=dict(
session_len=32768,
max_batch_size=32,
model_name="internlm2-chat-7b",
model_name='internlm2-chat-7b',
tp=1,
stop_words=[2, 92542],
),
......
......@@ -2,8 +2,8 @@ from opencompass.models import TurboMindModel
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|begin_of_text|>user<|end_header_id|>\n\n', end='<|eot_id|>'),
dict(role="BOT", begin='<|begin_of_text|>assistant<|end_header_id|>\n\n', end='<|eot_id|>', generate=True),
dict(role='HUMAN', begin='<|begin_of_text|>user<|end_header_id|>\n\n', end='<|eot_id|>'),
dict(role='BOT', begin='<|begin_of_text|>assistant<|end_header_id|>\n\n', end='<|eot_id|>', generate=True),
],
)
......
......@@ -2,8 +2,8 @@ from opencompass.models import TurboMindModel
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|begin_of_text|>user<|end_header_id|>\n\n', end='<|eot_id|>'),
dict(role="BOT", begin='<|begin_of_text|>assistant<|end_header_id|>\n\n', end='<|eot_id|>', generate=True),
dict(role='HUMAN', begin='<|begin_of_text|>user<|end_header_id|>\n\n', end='<|eot_id|>'),
dict(role='BOT', begin='<|begin_of_text|>assistant<|end_header_id|>\n\n', end='<|eot_id|>', generate=True),
],
)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment