Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -24,8 +24,8 @@ api_meta_template = dict(
_meta_template = dict(
round=[
dict(role="HUMAN", begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role="BOT", begin="\n<|im_start|>assistant\n", end='<|im_end|>', generate=True),
dict(role='HUMAN', begin='\n<|im_start|>user\n', end='<|im_end|>'),
dict(role='BOT', begin='\n<|im_start|>assistant\n', end='<|im_end|>', generate=True),
],
)
# -------------Inference Stage ----------------------------------------
......@@ -34,7 +34,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen-7b-chat-hf',
path="Qwen/Qwen-7B-Chat",
path='Qwen/Qwen-7B-Chat',
tokenizer_path='Qwen/Qwen-7B-Chat',
model_kwargs=dict(
device_map='auto',
......
......@@ -16,7 +16,7 @@ meta_template_system_patches = {
'internlm2-chat-20b-hf': dict(role='SYSTEM', begin='<|im_start|>system\n', end='<|im_end|>\n'),
}
_origin_models = sum([v for k, v in locals().items() if k.endswith("_model")], [])
_origin_models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
models = []
for m in _origin_models:
m = deepcopy(m)
......
......@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models = [
dict(
abbr="Accessory_llama2_7b",
abbr='Accessory_llama2_7b',
type=LLaMA2AccessoryModel,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
......@@ -18,10 +18,10 @@ models = [
# - consolidated.00.pth
# - params.json
# - tokenizer.model
pretrained_path="path/to/Llama-2-7b/",
llama_type="llama",
llama_config="path/to/Llama-2-7b/params.json",
tokenizer_path="path/to/Llama-2-7b/tokenizer.model",
pretrained_path='path/to/Llama-2-7b/',
llama_type='llama',
llama_config='path/to/Llama-2-7b/params.json',
tokenizer_path='path/to/Llama-2-7b/tokenizer.model',
with_visual=False,
max_seq_len=4096,
quant=False,
......
......@@ -6,7 +6,7 @@ from opencompass.models import LLaMA2AccessoryModel
models = [
dict(
abbr="Accessory_mixtral_8x7b",
abbr='Accessory_mixtral_8x7b',
type=LLaMA2AccessoryModel,
# additional_stop_symbols=["###"], # for models tuned with chat template # noqa
......@@ -15,7 +15,7 @@ models = [
# <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/MoE-Mixtral-7B-8Expert/tree/main/converted_sparse # noqa
# see https://llama2-accessory.readthedocs.io/en/latest/projects/mixtral-8x7b.html for more details # noqa
pretrained_path="path/to/MoE-Mixtral-7B-8Expert/converted_sparse",
pretrained_path='path/to/MoE-Mixtral-7B-8Expert/converted_sparse',
llama_type=None, # None for automatic probe from pretrained_path
llama_config=None, # None for automatic probe from pretrained_path
tokenizer_path=None, # None for automatic probe from pretrained_path
......
......@@ -6,14 +6,14 @@ from opencompass.models import LLaMA2AccessoryModel
models = [
dict(
abbr="Accessory_sphinx_v2_1k",
abbr='Accessory_sphinx_v2_1k',
type=LLaMA2AccessoryModel,
additional_stop_symbols=["###"], # for models tuned with chat template
additional_stop_symbols=['###'], # for models tuned with chat template
# <begin> kwargs for accessory.MetaModel.from_pretrained
# download from https://huggingface.co/Alpha-VLLM/LLaMA2-Accessory/tree/main/finetune/mm/SPHINX/SPHINX-v2-1k # noqa
pretrained_path="path/to/sphinx_v2_1k",
pretrained_path='path/to/sphinx_v2_1k',
llama_type=None, # None for automatic probe from pretrained_path
llama_config=None, # None for automatic probe from pretrained_path
tokenizer_path=None, # None for automatic probe from pretrained_path
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=AlayaLM,
abbr='alaya-7b-hf',
path="DataCanvas/Alaya-7B-Base",
path='DataCanvas/Alaya-7B-Base',
tokenizer_path='DataCanvas/Alaya-7B-Base',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='aquilachat2-34b-hf',
path="BAAI/AquilaChat2-34B",
path='BAAI/AquilaChat2-34B',
tokenizer_path='BAAI/AquilaChat2-34B',
model_kwargs=dict(
device_map='auto',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='aquilachat2-34b-16k-hf',
path="BAAI/AquilaChat2-34B-16K",
path='BAAI/AquilaChat2-34B-16K',
tokenizer_path='BAAI/AquilaChat2-34B-16K',
model_kwargs=dict(
device_map='auto',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='aquilachat2-7b-hf',
path="BAAI/AquilaChat2-7B",
path='BAAI/AquilaChat2-7B',
tokenizer_path='BAAI/AquilaChat2-7B',
model_kwargs=dict(
device_map='auto',
......
......@@ -12,7 +12,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='aquilachat2-7b-16k-hf',
path="BAAI/AquilaChat2-7B-16K",
path='BAAI/AquilaChat2-7B-16K',
tokenizer_path='BAAI/AquilaChat2-7B-16K',
model_kwargs=dict(
device_map='auto',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='baichuan2-13b-chat-hf',
path="baichuan-inc/Baichuan2-13B-Chat",
path='baichuan-inc/Baichuan2-13B-Chat',
tokenizer_path='baichuan-inc/Baichuan2-13B-Chat',
tokenizer_kwargs=dict(
padding_side='left',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='baichuan2-7b-chat-hf',
path="baichuan-inc/Baichuan2-7B-Chat",
path='baichuan-inc/Baichuan2-7B-Chat',
tokenizer_path='baichuan-inc/Baichuan2-7B-Chat',
tokenizer_kwargs=dict(
padding_side='left',
......@@ -26,4 +26,4 @@ models = [
model_kwargs=dict(device_map='auto', trust_remote_code=True),
run_cfg=dict(num_gpus=1, num_procs=1),
)
]
\ No newline at end of file
]
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='baichuan-13b-base-hf',
path="baichuan-inc/Baichuan-13B-Base",
path='baichuan-inc/Baichuan-13B-Base',
tokenizer_path='baichuan-inc/Baichuan-13B-Base',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='baichuan-13b-chat-hf',
path="baichuan-inc/Baichuan-13B-Chat",
path='baichuan-inc/Baichuan-13B-Chat',
tokenizer_path='baichuan-inc/Baichuan-13B-Chat',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -5,7 +5,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='baichuan-7b-hf',
path="baichuan-inc/baichuan-7B",
path='baichuan-inc/baichuan-7B',
tokenizer_path='baichuan-inc/baichuan-7B',
tokenizer_kwargs=dict(padding_side='left',
truncation_side='left',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='bluelm-7b-chat-hf',
path="vivo-ai/BlueLM-7B-Chat",
path='vivo-ai/BlueLM-7B-Chat',
tokenizer_path='vivo-ai/BlueLM-7B-Chat',
model_kwargs=dict(
device_map='auto',
......
......@@ -11,7 +11,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='bluelm-7b-chat-32k-hf',
path="vivo-ai/BlueLM-7B-Chat-32K",
path='vivo-ai/BlueLM-7B-Chat-32K',
tokenizer_path='vivo-ai/BlueLM-7B-Chat-32K',
model_kwargs=dict(
device_map='auto',
......
from opencompass.models.claude_api.claude_api import Claude
from opencompass.utils.text_postprocessors import last_option_postprocess, first_option_postprocess
from opencompass.models.claude_api.postprocessors import (yes_no_postprocess, humaneval_claude2_postprocess, record_postprocess,
from opencompass.models.claude_api.postprocessors import (yes_no_postprocess, humaneval_claude2_postprocess, record_postprocess,
gsm8k_postprocess, strategyqa_pred_postprocess, mbpp_postprocess,
lcsts_postprocess)
......
......@@ -22,4 +22,4 @@ models = [
model_kwargs=dict(trust_remote_code=True, device_map='auto'),
run_cfg=dict(num_gpus=1, num_procs=1),
)
]
\ No newline at end of file
]
......@@ -13,7 +13,7 @@ models = [
type=Gemini,
path='gemini-pro',
key='your keys', # The key will be obtained from Environment, but you can write down your key here as well
url = "your url",
url = 'your url',
meta_template=api_meta_template,
query_per_second=16,
max_out_len=100,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment