Unverified Commit aa2dd2b5 authored by Fengzhe Zhou's avatar Fengzhe Zhou Committed by GitHub
Browse files

[Format] Add config lints (#892)

parent 3dbba119
......@@ -17,8 +17,8 @@ with read_base():
work_dir = './outputs/internlm2-chat-keyset/'
_origin_datasets = sum([v for k, v in locals().items() if k.endswith("_datasets")], [])
_origin_models = sum([v for k, v in locals().items() if k.endswith("_model")], [])
_origin_datasets = sum([v for k, v in locals().items() if k.endswith('_datasets')], [])
_origin_models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
_vanilla_datasets = [deepcopy(d) for d in _origin_datasets]
_vanilla_models = []
......
......@@ -16,5 +16,5 @@ with read_base():
work_dir = './outputs/internlm2-keyset/'
datasets = sum([v for k, v in locals().items() if k.endswith("_datasets")], [])
models = sum([v for k, v in locals().items() if k.endswith("_model")], [])
datasets = sum([v for k, v in locals().items() if k.endswith('_datasets')], [])
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
......@@ -49,4 +49,3 @@ internlm_chat_7b = dict(
)
models = [internlm_chat_20b]
......@@ -29,7 +29,7 @@ models = [
dict(
type=LmdeployTisModel,
abbr='internlm-chat-20b-lmdeploy-tis',
path="internlm/internlm-chat-20b",
path='internlm/internlm-chat-20b',
tis_addr='0.0.0.0:33337',
max_out_len=100,
max_seq_len=2048,
......
......@@ -29,7 +29,7 @@ models = [
dict(
type=TurboMindTisModel,
abbr='internlm-chat-20b-turbomind',
path="internlm",
path='internlm',
tis_addr='0.0.0.0:33337',
max_out_len=100,
max_seq_len=2048,
......
......@@ -31,7 +31,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='internlm2-chat-7b-hf',
path="internlm/internlm2-chat-7b",
path='internlm/internlm2-chat-7b',
tokenizer_path='internlm/internlm2-chat-7b',
model_kwargs=dict(
trust_remote_code=True,
......@@ -49,7 +49,7 @@ models = [
meta_template=_meta_template,
run_cfg=dict(num_gpus=1, num_procs=1),
end_str='<|im_end|>',
generation_kwargs = {"eos_token_id": [2, 92542], "do_sample": True},
generation_kwargs = {'eos_token_id': [2, 92542], 'do_sample': True},
batch_padding=True,
)
]
......@@ -91,7 +91,7 @@ judge_models = [
use_fast=False,
trust_remote_code=True,
),
generation_kwargs = {"do_sample": True},
generation_kwargs = {'do_sample': True},
max_out_len=512,
max_seq_len=4096,
batch_size=8,
......@@ -122,4 +122,4 @@ summarizer = dict(
type=FlamesSummarizer, judge_type = 'general'
)
work_dir = 'outputs/flames/'
\ No newline at end of file
work_dir = 'outputs/flames/'
......@@ -35,4 +35,3 @@ internlm_chat_7b = dict(
)
models = [internlm_chat_20b]
......@@ -19,7 +19,7 @@ datasets = sum((v for k, v in locals().items() if k.endswith('_datasets')), [])
internlm_7b = dict(
type=TurboMindModel,
abbr='internlm-7b-turbomind',
path="internlm/internlm-7b",
path='internlm/internlm-7b',
engine_config=dict(session_len=2048,
max_batch_size=32,
rope_scaling_factor=1.0),
......@@ -38,7 +38,7 @@ internlm_7b = dict(
internlm_20b = dict(
type=TurboMindModel,
abbr='internlm-20b-turbomind',
path="internlm/internlm-20b",
path='internlm/internlm-20b',
engine_config=dict(session_len=2048,
max_batch_size=8,
rope_scaling_factor=1.0),
......
......@@ -18,7 +18,7 @@ models = [
dict(
type=TurboMindTisModel,
abbr='internlm-chat-20b-turbomind',
path="internlm",
path='internlm',
tis_addr='0.0.0.0:33337',
max_out_len=100,
max_seq_len=2048,
......
......@@ -5,4 +5,4 @@ with read_base():
from .models.llama.llama2_7b import models
datasets = [*piqa_datasets, *siqa_datasets]
\ No newline at end of file
datasets = [*piqa_datasets, *siqa_datasets]
......@@ -6,11 +6,11 @@ with read_base():
from .summarizers.lveval import summarizer
models[0][
"path"
] = "/path/to/your/huggingface_models/Llama-2-7b-chat-hf"
'path'
] = '/path/to/your/huggingface_models/Llama-2-7b-chat-hf'
models[0][
"tokenizer_path"
] = "/path/to/your/huggingface_models/Llama-2-7b-chat-hf"
models[0]["max_seq_len"] = 4096
models[0]["generation_kwargs"] = dict(do_sample=False)
models[0]["mode"] = "mid" # truncate in the middle
'tokenizer_path'
] = '/path/to/your/huggingface_models/Llama-2-7b-chat-hf'
models[0]['max_seq_len'] = 4096
models[0]['generation_kwargs'] = dict(do_sample=False)
models[0]['mode'] = 'mid' # truncate in the middle
......@@ -10,7 +10,7 @@ with read_base():
work_dir = 'outputs/debug/llama3-instruct'
models = sum([v for k, v in locals().items() if k.endswith("_model")], [])
models = sum([v for k, v in locals().items() if k.endswith('_model')], [])
# dataset version metric mode llama-3-8b-instruct-hf
# -------------------- --------- ---------------------------- ------ ------------------------
......
......@@ -41,7 +41,7 @@ for mdl in models:
infer = dict(
# The OpenCompass implementation of BPC currently only supports NaivePartitioner, as the sliding window approach requires the dataset to be loaded sequentially. Using other partitioner types may produce incorrect results.
partitioner=dict(type=NaivePartitioner),
partitioner=dict(type=NaivePartitioner),
runner=dict(
type=LocalRunner,
task=dict(type=OpenICLInferTask),
......
......@@ -68,7 +68,7 @@ Examples:
(give benefit of the doubt to units)
Expression 1: 64
Expression 2:
Expression 2:
[No]
(only mark as equivalent if both expressions are nonempty)
......@@ -80,7 +80,7 @@ YOUR TASK
Respond with only "[Yes]" or "[No]" (without quotes). Do not include a rationale.
Expression 1: {obj_gold}
Expression 2: {prediction}
Expression 2: {prediction}
"""
......@@ -99,7 +99,7 @@ for d in eng_datasets:
d['eval_cfg']= dict(
evaluator=dict(
type=LMEvaluator,
# If you need to preprocess the prediction before judging,
# If you need to preprocess the prediction before judging,
# you can specify the pred_postprocessor function here
pred_postprocessor=dict(type=math_judement_preprocess),
prompt_template=dict(
......@@ -112,7 +112,7 @@ for d in eng_datasets:
]),
),
),
pred_role="BOT",
pred_role='BOT',
)
infer = dict(
......
......@@ -18,7 +18,7 @@ models=[
dict(
type=HuggingFaceCausalLM,
abbr='internlm-chat-7b-hf',
path="internlm/internlm-chat-7b",
path='internlm/internlm-chat-7b',
tokenizer_path='internlm/internlm-chat-7b',
tokenizer_kwargs=dict(
padding_side='left',
......@@ -45,4 +45,4 @@ summarizer = dict(
{'name': 'winogrande', 'subsets': _winogrande_all},
{'name': 'winogrande_std', 'subsets': _winogrande_all, 'std': True},
]
)
\ No newline at end of file
)
......@@ -8,4 +8,4 @@ with read_base():
datasets = lawbench_zero_shot_datasets + lawbench_one_shot_datasets
for d in datasets:
d["infer_cfg"]["inferencer"]["save_every"] = 1
d['infer_cfg']['inferencer']['save_every'] = 1
......@@ -90,7 +90,7 @@ judge_models = [dict(
## ------------- Evaluation Configuration
eval = dict(
partitioner=dict(
type=SubjectiveSizePartitioner, max_task_size=1000, mode='m2n', base_models=[gpt4], compare_models=models,
type=SubjectiveSizePartitioner, max_task_size=1000, mode='m2n', base_models=[gpt4], compare_models=models,
infer_order='random',
judge_models=judge_models
),
......@@ -101,4 +101,4 @@ work_dir = 'outputs/alpaca/'
summarizer = dict(type=AlpacaSummarizer, judge_type='v2')
\ No newline at end of file
summarizer = dict(type=AlpacaSummarizer, judge_type='v2')
......@@ -60,7 +60,7 @@ gpt4_judge = dict(
abbr='GPT4-Turbo',
path='gpt-4-1106-preview',
key='', # The key will be obtained from $OPENAI_API_KEY, but you can write down your key here as well
config='weighted_alpaca_eval_gpt4_turbo'
config='weighted_alpaca_eval_gpt4_turbo'
)
## ------------- Evaluation Configuration
eval = dict(
......
......@@ -25,20 +25,20 @@ api_meta_template = dict(
_meta_template = dict(
round=[
dict(role="HUMAN", begin="<|begin_of_text|>user<|end_header_id|>\n\n", end="<|eot_id|>"),
dict(role="BOT", begin="<|begin_of_text|>assistant<|end_header_id|>\n\n", end="<|eot_id|>", generate=True),
dict(role='HUMAN', begin='<|begin_of_text|>user<|end_header_id|>\n\n', end='<|eot_id|>'),
dict(role='BOT', begin='<|begin_of_text|>assistant<|end_header_id|>\n\n', end='<|eot_id|>', generate=True),
],
)
models = [
dict(
type=HuggingFaceCausalLM,
abbr="llama-3-8b-instruct-hf",
path="meta-llama/Meta-Llama-3-8B-Instruct",
model_kwargs=dict(device_map="auto"),
abbr='llama-3-8b-instruct-hf',
path='meta-llama/Meta-Llama-3-8B-Instruct',
model_kwargs=dict(device_map='auto'),
tokenizer_kwargs=dict(
padding_side="left",
truncation_side="left",
padding_side='left',
truncation_side='left',
use_fast=False,
),
meta_template=_meta_template,
......@@ -46,7 +46,7 @@ models = [
max_seq_len=2048,
batch_size=8,
run_cfg=dict(num_gpus=1, num_procs=1),
generation_kwargs={"eos_token_id": [128001, 128009]},
generation_kwargs={'eos_token_id': [128001, 128009]},
batch_padding=True,
)
]
......@@ -69,7 +69,7 @@ judge_models = [dict(
abbr='GPT4-Turbo',
type=OpenAI,
path='gpt-4-1106-preview',
key='',
key='',
meta_template=api_meta_template,
query_per_second=1,
max_out_len=1024,
......@@ -101,4 +101,4 @@ eval = dict(
summarizer = dict(
type=ArenaHardSummarizer
)
\ No newline at end of file
)
......@@ -25,8 +25,8 @@ api_meta_template = dict(
_meta_template = dict(
round=[
dict(role="HUMAN", begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role="BOT", begin="<|im_start|>assistant\n", end='<|im_end|>\n', generate=True),
dict(role='HUMAN', begin='<|im_start|>user\n', end='<|im_end|>\n'),
dict(role='BOT', begin='<|im_start|>assistant\n', end='<|im_end|>\n', generate=True),
],
eos_token_id=151645,
)
......@@ -35,7 +35,7 @@ models = [
dict(
type=HuggingFaceCausalLM,
abbr='qwen1.5-7b-chat-hf',
path="Qwen/Qwen1.5-7B-Chat",
path='Qwen/Qwen1.5-7B-Chat',
model_kwargs=dict(
device_map='auto',
trust_remote_code=True
......@@ -79,7 +79,7 @@ judge_models = [dict(
abbr='GPT4-Turbo',
type=OpenAI,
path='gpt-4-1106-preview',
key='',
key='',
meta_template=api_meta_template,
query_per_second=1,
max_out_len=1024,
......@@ -108,4 +108,4 @@ eval = dict(
summarizer = dict(
type=MultiroundSummarizer
)
\ No newline at end of file
)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment