llama_7b.py 768 Bytes
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from opencompass.models import Llama2

# Please follow the instruction in the Meta AI website https://github.com/facebookresearch/llama/tree/llama_v1
# and download the LLaMA model and tokenizer to the path './models/llama/'.
#
# The LLaMA requirement is also needed to be installed.
# *Note* that the LLaMA-2 branch is fully compatible with LLAMA-1, and the LLaMA-2 branch is used here.
#
# git clone https://github.com/facebookresearch/llama.git
# cd llama
# pip install -e .

models = [
    dict(
15
        abbr='llama-7b',
16
        type=Llama2,
17
18
        path='./models/llama/7B/',
        tokenizer_path='./models/llama/tokenizer.model',
19
20
21
22
23
24
        max_out_len=100,
        max_seq_len=2048,
        batch_size=16,
        run_cfg=dict(num_gpus=1, num_procs=1),
    ),
]