Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -4,9 +4,9 @@ from opencompass.models import InternLM
models = [
dict(
type=InternLM,
path="./internData/",
path='./internData/',
tokenizer_path='./internData/V7.model',
model_config="./internData/model_config.py",
model_config='./internData/model_config.py',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -10,7 +10,7 @@ https://huggingface.co/GAIR/autoj-bilingual-6b
models = [dict(
type=HuggingFaceCausalLM,
abbr='autoj-bilingual-6b',
path="GAIR/autoj-bilingual-6b",
path='GAIR/autoj-bilingual-6b',
tokenizer_path='GAIR/autoj-bilingual-6b',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='autoj-13b',
path="GAIR/autoj-13b",
path='GAIR/autoj-13b',
tokenizer_path='GAIR/autoj-13b',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -9,7 +9,7 @@ https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
models = [dict(
type=HuggingFaceCausalLM,
abbr='autoj-13b-GPTQ-4bits',
path="GAIR/autoj-13b-GPTQ-4bits",
path='GAIR/autoj-13b-GPTQ-4bits',
tokenizer_path='GAIR/autoj-13b-GPTQ-4bits',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='autoj-scenario-classifier',
path="GAIR/autoj-scenario-classifier",
path='GAIR/autoj-scenario-classifier',
tokenizer_path='GAIR/autoj-scenario-classifier',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='judgelm-13b-v1-hf',
path="BAAI/JudgeLM-13B-v1.0",
path='BAAI/JudgeLM-13B-v1.0',
tokenizer_path='BAAI/JudgeLM-13B-v1.0',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='judgelm-33b-v1-hf',
path="BAAI/JudgeLM-33B-v1.0",
path='BAAI/JudgeLM-33B-v1.0',
tokenizer_path='BAAI/JudgeLM-33B-v1.0',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='judgelm-7b-v1-hf',
path="BAAI/JudgeLM-7B-v1.0",
path='BAAI/JudgeLM-7B-v1.0',
tokenizer_path='BAAI/JudgeLM-7B-v1.0',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='alpaca-pandalm-7b-v1-hf',
path="WeOpenML/PandaLM-Alpaca-7B-v1",
path='WeOpenML/PandaLM-Alpaca-7B-v1',
tokenizer_path='WeOpenML/PandaLM-Alpaca-7B-v1',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict(
type=HuggingFaceCausalLM,
abbr='pandalm-7b-v1-hf',
path="WeOpenML/PandaLM-7B-v1",
path='WeOpenML/PandaLM-7B-v1',
tokenizer_path='WeOpenML/PandaLM-7B-v1',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
],
)
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='lemur-70b-chat-v1',
path="OpenLemur/lemur-70b-chat-v1",
path='OpenLemur/lemur-70b-chat-v1',
tokenizer_path='OpenLemur/lemur-70b-chat-v1',
# tokenizer_kwargs=dict(
# padding_side='left',
......
......@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-2-13b",
abbr='llama-2-13b',
type=Llama2,
path="./models/llama2/llama/llama-2-13b/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-13b/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict(
round=[
dict(role="HUMAN", api_role="HUMAN"),
dict(role="BOT", api_role="BOT", generate=True),
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
],
)
models = [
dict(
abbr="llama-2-13b-chat",
abbr='llama-2-13b-chat',
type=Llama2Chat,
path="./models/llama2/llama/llama-2-13b-chat/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-13b-chat/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template,
max_out_len=100,
max_seq_len=2048,
......
......@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-2-70b",
abbr='llama-2-70b',
type=Llama2,
path="./models/llama2/llama/llama-2-70b/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-70b/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict(
round=[
dict(role="HUMAN", api_role="HUMAN"),
dict(role="BOT", api_role="BOT", generate=True),
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
],
)
models = [
dict(
abbr="llama-2-70b-chat",
abbr='llama-2-70b-chat',
type=Llama2Chat,
path="./models/llama2/llama/llama-2-70b-chat/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-70b-chat/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template,
max_out_len=100,
max_seq_len=2048,
......
......@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-2-7b",
abbr='llama-2-7b',
type=Llama2,
path="./models/llama2/llama/llama-2-7b/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-7b/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict(
round=[
dict(role="HUMAN", api_role="HUMAN"),
dict(role="BOT", api_role="BOT", generate=True),
dict(role='HUMAN', api_role='HUMAN'),
dict(role='BOT', api_role='BOT', generate=True),
],
)
models = [
dict(
abbr="llama-2-7b-chat",
abbr='llama-2-7b-chat',
type=Llama2Chat,
path="./models/llama2/llama/llama-2-7b-chat/",
tokenizer_path="./models/llama2/llama/tokenizer.model",
path='./models/llama2/llama/llama-2-7b-chat/',
tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template,
max_out_len=100,
max_seq_len=2048,
......
......@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-13b",
abbr='llama-13b',
type=Llama2,
path="./models/llama/13B/",
tokenizer_path="./models/llama/tokenizer.model",
path='./models/llama/13B/',
tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-30b",
abbr='llama-30b',
type=Llama2,
path="./models/llama/30B/",
tokenizer_path="./models/llama/tokenizer.model",
path='./models/llama/30B/',
tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
......@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [
dict(
abbr="llama-65b",
abbr='llama-65b',
type=Llama2,
path="./models/llama/65B/",
tokenizer_path="./models/llama/tokenizer.model",
path='./models/llama/65B/',
tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100,
max_seq_len=2048,
batch_size=16,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment