Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
...@@ -4,9 +4,9 @@ from opencompass.models import InternLM ...@@ -4,9 +4,9 @@ from opencompass.models import InternLM
models = [ models = [
dict( dict(
type=InternLM, type=InternLM,
path="./internData/", path='./internData/',
tokenizer_path='./internData/V7.model', tokenizer_path='./internData/V7.model',
model_config="./internData/model_config.py", model_config='./internData/model_config.py',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
from opencompass.models import HuggingFaceCausalLM from opencompass.models import HuggingFaceCausalLM
''' '''
This is a bilingual 6B version of Auto-J. This is a bilingual 6B version of Auto-J.
It is trained on both the original training data It is trained on both the original training data
and its Chinese translation, which can be find in and its Chinese translation, which can be find in
https://huggingface.co/GAIR/autoj-bilingual-6b https://huggingface.co/GAIR/autoj-bilingual-6b
''' '''
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='autoj-bilingual-6b', abbr='autoj-bilingual-6b',
path="GAIR/autoj-bilingual-6b", path='GAIR/autoj-bilingual-6b',
tokenizer_path='GAIR/autoj-bilingual-6b', tokenizer_path='GAIR/autoj-bilingual-6b',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
......
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='autoj-13b', abbr='autoj-13b',
path="GAIR/autoj-13b", path='GAIR/autoj-13b',
tokenizer_path='GAIR/autoj-13b', tokenizer_path='GAIR/autoj-13b',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
from opencompass.models import HuggingFaceCausalLM from opencompass.models import HuggingFaceCausalLM
''' '''
#This is a 4bits quantized version of Auto-J by using AutoGPTQ, #This is a 4bits quantized version of Auto-J by using AutoGPTQ,
which is available on huggingface-hub: which is available on huggingface-hub:
https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
''' '''
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='autoj-13b-GPTQ-4bits', abbr='autoj-13b-GPTQ-4bits',
path="GAIR/autoj-13b-GPTQ-4bits", path='GAIR/autoj-13b-GPTQ-4bits',
tokenizer_path='GAIR/autoj-13b-GPTQ-4bits', tokenizer_path='GAIR/autoj-13b-GPTQ-4bits',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -20,4 +20,4 @@ models = [dict( ...@@ -20,4 +20,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='autoj-scenario-classifier', abbr='autoj-scenario-classifier',
path="GAIR/autoj-scenario-classifier", path='GAIR/autoj-scenario-classifier',
tokenizer_path='GAIR/autoj-scenario-classifier', tokenizer_path='GAIR/autoj-scenario-classifier',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='judgelm-13b-v1-hf', abbr='judgelm-13b-v1-hf',
path="BAAI/JudgeLM-13B-v1.0", path='BAAI/JudgeLM-13B-v1.0',
tokenizer_path='BAAI/JudgeLM-13B-v1.0', tokenizer_path='BAAI/JudgeLM-13B-v1.0',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='judgelm-33b-v1-hf', abbr='judgelm-33b-v1-hf',
path="BAAI/JudgeLM-33B-v1.0", path='BAAI/JudgeLM-33B-v1.0',
tokenizer_path='BAAI/JudgeLM-33B-v1.0', tokenizer_path='BAAI/JudgeLM-33B-v1.0',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=4, num_procs=1), run_cfg=dict(num_gpus=4, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='judgelm-7b-v1-hf', abbr='judgelm-7b-v1-hf',
path="BAAI/JudgeLM-7B-v1.0", path='BAAI/JudgeLM-7B-v1.0',
tokenizer_path='BAAI/JudgeLM-7B-v1.0', tokenizer_path='BAAI/JudgeLM-7B-v1.0',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='alpaca-pandalm-7b-v1-hf', abbr='alpaca-pandalm-7b-v1-hf',
path="WeOpenML/PandaLM-Alpaca-7B-v1", path='WeOpenML/PandaLM-Alpaca-7B-v1',
tokenizer_path='WeOpenML/PandaLM-Alpaca-7B-v1', tokenizer_path='WeOpenML/PandaLM-Alpaca-7B-v1',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -4,7 +4,7 @@ from opencompass.models import HuggingFaceCausalLM
models = [dict( models = [dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='pandalm-7b-v1-hf', abbr='pandalm-7b-v1-hf',
path="WeOpenML/PandaLM-7B-v1", path='WeOpenML/PandaLM-7B-v1',
tokenizer_path='WeOpenML/PandaLM-7B-v1', tokenizer_path='WeOpenML/PandaLM-7B-v1',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
...@@ -15,4 +15,4 @@ models = [dict( ...@@ -15,4 +15,4 @@ models = [dict(
batch_size=8, batch_size=8,
model_kwargs=dict(device_map='auto', trust_remote_code=True), model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1), run_cfg=dict(num_gpus=1, num_procs=1),
)] )]
\ No newline at end of file
...@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM ...@@ -3,8 +3,8 @@ from opencompass.models import HuggingFaceCausalLM
_meta_template = dict( _meta_template = dict(
round=[ round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'), dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True), dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
], ],
) )
...@@ -12,7 +12,7 @@ models = [ ...@@ -12,7 +12,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='lemur-70b-chat-v1', abbr='lemur-70b-chat-v1',
path="OpenLemur/lemur-70b-chat-v1", path='OpenLemur/lemur-70b-chat-v1',
tokenizer_path='OpenLemur/lemur-70b-chat-v1', tokenizer_path='OpenLemur/lemur-70b-chat-v1',
# tokenizer_kwargs=dict( # tokenizer_kwargs=dict(
# padding_side='left', # padding_side='left',
......
...@@ -11,10 +11,10 @@ from opencompass.models import Llama2 ...@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-2-13b", abbr='llama-2-13b',
type=Llama2, type=Llama2,
path="./models/llama2/llama/llama-2-13b/", path='./models/llama2/llama/llama-2-13b/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat ...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict( api_meta_template = dict(
round=[ round=[
dict(role="HUMAN", api_role="HUMAN"), dict(role='HUMAN', api_role='HUMAN'),
dict(role="BOT", api_role="BOT", generate=True), dict(role='BOT', api_role='BOT', generate=True),
], ],
) )
models = [ models = [
dict( dict(
abbr="llama-2-13b-chat", abbr='llama-2-13b-chat',
type=Llama2Chat, type=Llama2Chat,
path="./models/llama2/llama/llama-2-13b-chat/", path='./models/llama2/llama/llama-2-13b-chat/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template, meta_template=api_meta_template,
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
......
...@@ -11,10 +11,10 @@ from opencompass.models import Llama2 ...@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-2-70b", abbr='llama-2-70b',
type=Llama2, type=Llama2,
path="./models/llama2/llama/llama-2-70b/", path='./models/llama2/llama/llama-2-70b/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat ...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict( api_meta_template = dict(
round=[ round=[
dict(role="HUMAN", api_role="HUMAN"), dict(role='HUMAN', api_role='HUMAN'),
dict(role="BOT", api_role="BOT", generate=True), dict(role='BOT', api_role='BOT', generate=True),
], ],
) )
models = [ models = [
dict( dict(
abbr="llama-2-70b-chat", abbr='llama-2-70b-chat',
type=Llama2Chat, type=Llama2Chat,
path="./models/llama2/llama/llama-2-70b-chat/", path='./models/llama2/llama/llama-2-70b-chat/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template, meta_template=api_meta_template,
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
......
...@@ -11,10 +11,10 @@ from opencompass.models import Llama2 ...@@ -11,10 +11,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-2-7b", abbr='llama-2-7b',
type=Llama2, type=Llama2,
path="./models/llama2/llama/llama-2-7b/", path='./models/llama2/llama/llama-2-7b/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat ...@@ -11,17 +11,17 @@ from opencompass.models import Llama2Chat
api_meta_template = dict( api_meta_template = dict(
round=[ round=[
dict(role="HUMAN", api_role="HUMAN"), dict(role='HUMAN', api_role='HUMAN'),
dict(role="BOT", api_role="BOT", generate=True), dict(role='BOT', api_role='BOT', generate=True),
], ],
) )
models = [ models = [
dict( dict(
abbr="llama-2-7b-chat", abbr='llama-2-7b-chat',
type=Llama2Chat, type=Llama2Chat,
path="./models/llama2/llama/llama-2-7b-chat/", path='./models/llama2/llama/llama-2-7b-chat/',
tokenizer_path="./models/llama2/llama/tokenizer.model", tokenizer_path='./models/llama2/llama/tokenizer.model',
meta_template=api_meta_template, meta_template=api_meta_template,
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
......
...@@ -12,10 +12,10 @@ from opencompass.models import Llama2 ...@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-13b", abbr='llama-13b',
type=Llama2, type=Llama2,
path="./models/llama/13B/", path='./models/llama/13B/',
tokenizer_path="./models/llama/tokenizer.model", tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
...@@ -12,10 +12,10 @@ from opencompass.models import Llama2 ...@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-30b", abbr='llama-30b',
type=Llama2, type=Llama2,
path="./models/llama/30B/", path='./models/llama/30B/',
tokenizer_path="./models/llama/tokenizer.model", tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
...@@ -12,10 +12,10 @@ from opencompass.models import Llama2 ...@@ -12,10 +12,10 @@ from opencompass.models import Llama2
models = [ models = [
dict( dict(
abbr="llama-65b", abbr='llama-65b',
type=Llama2, type=Llama2,
path="./models/llama/65B/", path='./models/llama/65B/',
tokenizer_path="./models/llama/tokenizer.model", tokenizer_path='./models/llama/tokenizer.model',
max_out_len=100, max_out_len=100,
max_seq_len=2048, max_seq_len=2048,
batch_size=16, batch_size=16,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment