Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
...@@ -24,8 +24,8 @@ api_meta_template = dict( ...@@ -24,8 +24,8 @@ api_meta_template = dict(
_meta_template = dict( _meta_template = dict(
round=[ round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'), dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True), dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
], ],
) )
# -------------Inference Stage ---------------------------------------- # -------------Inference Stage ----------------------------------------
...@@ -34,7 +34,7 @@ models = [ ...@@ -34,7 +34,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='qwen-7b-chat-hf', abbr='qwen-7b-chat-hf',
path="Qwen/Qwen-7B-Chat", path='Qwen/Qwen-7B-Chat',
tokenizer_path='Qwen/Qwen-7B-Chat', tokenizer_path='Qwen/Qwen-7B-Chat',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -16,7 +16,7 @@ meta_template_system_patches = { ...@@ -16,7 +16,7 @@ meta_template_system_patches = {
'internlm2-chat-20b-hf': dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'), 'internlm2-chat-20b-hf': dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
} }
_origin_models = sum([v for k, v in locals().items() if k.endswith("_model")], []) _origin_models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
models = [] models = []
for m in _origin_models: for m in _origin_models:
m = deepcopy(m) m = deepcopy(m)
......
...@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel ...@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models = [ models = [
dict( dict(
abbr="Accessory_llama2_7b", abbr='Accessory_llama2_7b',
type=LLaMA2AccessoryModel, type=LLaMA2AccessoryModel,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa # additional_stop_symbols=["###"], # for models tuned with chat template # noqa
...@@ -18,10 +18,10 @@ models = [ ...@@ -18,10 +18,10 @@ models = [
# - consolidated.00.pth # - consolidated.00.pth
# - params.json # - params.json
# - tokenizer.model # - tokenizer.model
pretrained_path="path/to/Llama-2-7b/", pretrained_path='path/to/Llama-2-7b/',
llama_type="llama", llama_type='llama',
llama_config="path/to/Llama-2-7b/params.json", llama_config='path/to/Llama-2-7b/params.json',
tokenizer_path="path/to/Llama-2-7b/tokenizer.model", tokenizer_path='path/to/Llama-2-7b/tokenizer.model',
with_visual=False, with_visual=False,
max_seq_len=4096, max_seq_len=4096,
quant=False, quant=False,
......
...@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel ...@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models = [ models = [
dict( dict(
abbr="Accessory_mixtral_8x7b", abbr='Accessory_mixtral_8x7b',
type=LLaMA2AccessoryModel, type=LLaMA2AccessoryModel,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa # additional_stop_symbols=["###"], # for models tuned with chat template # noqa
...@@ -15,7 +15,7 @@ models = [ ...@@ -15,7 +15,7 @@ models = [
# <begin> kwargs for accessory.MetaModel.from_pretrained # <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/MoE-Mixtral-7B-8Expert/tree/main/converted_sparse # noqa # download from https://huggingface.co/Alpha-VLLM/MoE-Mixtral-7B-8Expert/tree/main/converted_sparse # noqa
# see https://llama2-accessory.readthedocs.io/en/latest/projects/mixtral-8x7b.html for more details # noqa # see https://llama2-accessory.readthedocs.io/en/latest/projects/mixtral-8x7b.html for more details # noqa
pretrained_path="path/to/MoE-Mixtral-7B-8Expert/converted_sparse", pretrained_path='path/to/MoE-Mixtral-7B-8Expert/converted_sparse',
llama_type=None, # None for automatic probe from pretrained_path llama_type=None, # None for automatic probe from pretrained_path
llama_config=None, # None for automatic probe from pretrained_path llama_config=None, # None for automatic probe from pretrained_path
tokenizer_path=None, # None for automatic probe from pretrained_path tokenizer_path=None, # None for automatic probe from pretrained_path
......
...@@ -6,14 +6,14 @@ from opencompass.models import LLaMA2AccessoryModel ...@@ -6,14 +6,14 @@ from opencompass.models import LLaMA2AccessoryModel
models = [ models = [
dict( dict(
abbr="Accessory_sphinx_v2_1k", abbr='Accessory_sphinx_v2_1k',
type=LLaMA2AccessoryModel, type=LLaMA2AccessoryModel,
additional_stop_symbols=["###"], # for models tuned with chat template additional_stop_symbols=['###'], # for models tuned with chat template
# <begin> kwargs for accessory.MetaModel.from_pretrained # <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/LLaMA2-Accessory/tree/main/finetune/mm/SPHINX/SPHINX-v2-1k # noqa # download from https://huggingface.co/Alpha-VLLM/LLaMA2-Accessory/tree/main/finetune/mm/SPHINX/SPHINX-v2-1k # noqa
pretrained_path="path/to/sphinx_v2_1k", pretrained_path='path/to/sphinx_v2_1k',
llama_type=None, # None for automatic probe from pretrained_path llama_type=None, # None for automatic probe from pretrained_path
llama_config=None, # None for automatic probe from pretrained_path llama_config=None, # None for automatic probe from pretrained_path
tokenizer_path=None, # None for automatic probe from pretrained_path tokenizer_path=None, # None for automatic probe from pretrained_path
......
...@@ -5,7 +5,7 @@ models = [ ...@@ -5,7 +5,7 @@ models = [
dict( dict(
type=AlayaLM, type=AlayaLM,
abbr='alaya-7b-hf', abbr='alaya-7b-hf',
path="DataCanvas/Alaya-7B-Base", path='DataCanvas/Alaya-7B-Base',
tokenizer_path='DataCanvas/Alaya-7B-Base', tokenizer_path='DataCanvas/Alaya-7B-Base',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='aquilachat2-34b-hf', abbr='aquilachat2-34b-hf',
path="BAAI/AquilaChat2-34B", path='BAAI/AquilaChat2-34B',
tokenizer_path='BAAI/AquilaChat2-34B', tokenizer_path='BAAI/AquilaChat2-34B',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -12,7 +12,7 @@ models = [ ...@@ -12,7 +12,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='aquilachat2-34b-16k-hf', abbr='aquilachat2-34b-16k-hf',
path="BAAI/AquilaChat2-34B-16K", path='BAAI/AquilaChat2-34B-16K',
tokenizer_path='BAAI/AquilaChat2-34B-16K', tokenizer_path='BAAI/AquilaChat2-34B-16K',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='aquilachat2-7b-hf', abbr='aquilachat2-7b-hf',
path="BAAI/AquilaChat2-7B", path='BAAI/AquilaChat2-7B',
tokenizer_path='BAAI/AquilaChat2-7B', tokenizer_path='BAAI/AquilaChat2-7B',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -12,7 +12,7 @@ models = [ ...@@ -12,7 +12,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='aquilachat2-7b-16k-hf', abbr='aquilachat2-7b-16k-hf',
path="BAAI/AquilaChat2-7B-16K", path='BAAI/AquilaChat2-7B-16K',
tokenizer_path='BAAI/AquilaChat2-7B-16K', tokenizer_path='BAAI/AquilaChat2-7B-16K',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='baichuan2-13b-chat-hf', abbr='baichuan2-13b-chat-hf',
path="baichuan-inc/Baichuan2-13B-Chat", path='baichuan-inc/Baichuan2-13B-Chat',
tokenizer_path='baichuan-inc/Baichuan2-13B-Chat', tokenizer_path='baichuan-inc/Baichuan2-13B-Chat',
tokenizer_kwargs=dict( tokenizer_kwargs=dict(
padding_side='left', padding_side='left',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='baichuan2-7b-chat-hf', abbr='baichuan2-7b-chat-hf',
path="baichuan-inc/Baichuan2-7B-Chat", path='baichuan-inc/Baichuan2-7B-Chat',
tokenizer_path='baichuan-inc/Baichuan2-7B-Chat', tokenizer_path='baichuan-inc/Baichuan2-7B-Chat',
tokenizer_kwargs=dict( tokenizer_kwargs=dict(
padding_side='left', padding_side='left',
......
...@@ -5,7 +5,7 @@ models = [ ...@@ -5,7 +5,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='baichuan-13b-base-hf', abbr='baichuan-13b-base-hf',
path="baichuan-inc/Baichuan-13B-Base", path='baichuan-inc/Baichuan-13B-Base',
tokenizer_path='baichuan-inc/Baichuan-13B-Base', tokenizer_path='baichuan-inc/Baichuan-13B-Base',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
......
...@@ -5,7 +5,7 @@ models = [ ...@@ -5,7 +5,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='baichuan-13b-chat-hf', abbr='baichuan-13b-chat-hf',
path="baichuan-inc/Baichuan-13B-Chat", path='baichuan-inc/Baichuan-13B-Chat',
tokenizer_path='baichuan-inc/Baichuan-13B-Chat', tokenizer_path='baichuan-inc/Baichuan-13B-Chat',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
......
...@@ -5,7 +5,7 @@ models = [ ...@@ -5,7 +5,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='baichuan-7b-hf', abbr='baichuan-7b-hf',
path="baichuan-inc/baichuan-7B", path='baichuan-inc/baichuan-7B',
tokenizer_path='baichuan-inc/baichuan-7B', tokenizer_path='baichuan-inc/baichuan-7B',
tokenizer_kwargs=dict(padding_side='left', tokenizer_kwargs=dict(padding_side='left',
truncation_side='left', truncation_side='left',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='bluelm-7b-chat-hf', abbr='bluelm-7b-chat-hf',
path="vivo-ai/BlueLM-7B-Chat", path='vivo-ai/BlueLM-7B-Chat',
tokenizer_path='vivo-ai/BlueLM-7B-Chat', tokenizer_path='vivo-ai/BlueLM-7B-Chat',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -11,7 +11,7 @@ models = [ ...@@ -11,7 +11,7 @@ models = [
dict( dict(
type=HuggingFaceCausalLM, type=HuggingFaceCausalLM,
abbr='bluelm-7b-chat-32k-hf', abbr='bluelm-7b-chat-32k-hf',
path="vivo-ai/BlueLM-7B-Chat-32K", path='vivo-ai/BlueLM-7B-Chat-32K',
tokenizer_path='vivo-ai/BlueLM-7B-Chat-32K', tokenizer_path='vivo-ai/BlueLM-7B-Chat-32K',
model_kwargs=dict( model_kwargs=dict(
device_map='auto', device_map='auto',
......
...@@ -13,7 +13,7 @@ models = [ ...@@ -13,7 +13,7 @@ models = [
type=Gemini, type=Gemini,
path='gemini-pro', path='gemini-pro',
key='your keys', # The key will be obtained from Environment, but you can write down your key here as well key='your keys', # The key will be obtained from Environment, but you can write down your key here as well
url = "your url", url = 'your url',
meta_template=api_meta_template, meta_template=api_meta_template,
query_per_second=16, query_per_second=16,
max_out_len=100, max_out_len=100,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment