eval_llama2_7b_lveval.py 540 Bytes
Newer Older
1
2
3
4
5
6
7
8
from mmengine.config import read_base

with read_base():
    from .datasets.lveval.lveval import LVEval_datasets as datasets
    from .models.hf_llama.hf_llama2_7b_chat import models
    from .summarizers.lveval import summarizer

models[0][
9
10
    'path'
] = '/path/to/your/huggingface_models/Llama-2-7b-chat-hf'
11
models[0][
12
13
14
15
16
    'tokenizer_path'
] = '/path/to/your/huggingface_models/Llama-2-7b-chat-hf'
models[0]['max_seq_len'] = 4096
models[0]['generation_kwargs'] = dict(do_sample=False)
models[0]['mode'] = 'mid'  # truncate in the middle