"mmdet3d/datasets/vscode:/vscode.git/clone" did not exist on "ffcea26ccc8f19a2a3198b31a398825e4c3e8bba"
pod-llama.sh 8.17 KB
Newer Older
mashun1's avatar
v1  
mashun1 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
#!/bin/bash
#
# Use this script only on fresh pods (runpod.io)!
# Otherwise, it can break your environment!
#

if [ -z "$1" ]; then
    echo "Usage: $0 <data>"
    echo "  0: no models"
    echo "  1: tinyllama-1b"
    echo "  2: codellama-7b"
    echo "  3: codellama-13b"
    echo "  4: codellama-34b"
    echo "  5: codellama-7b-instruct"
    echo "  6: codellama-13b-instruct"
    echo "  7: codellama-34b-instruct"

    exit 1
fi

set -x

# setup deps
apt-get update
apt-get install -y git-lfs cmake cmake-curses-gui vim ruby
git-lfs install

if [ ! -d "/workspace" ]; then
    ln -sfn $(pwd) /workspace
fi

# download data
cd /workspace

# this is useful to git clone repos without doubling the disk size due to .git
git clone https://github.com/iboB/git-lfs-download
ln -sfn /workspace/git-lfs-download/git-lfs-download /usr/local/bin/git-lfs-download

# llama.cpp
cd /workspace
git clone https://github.com/ggerganov/llama.cpp

cd llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
45
GGML_CUDA=1 make -j
mashun1's avatar
v1  
mashun1 committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

ln -sfn /workspace/TinyLlama-1.1B-Chat-v0.3  ./models/tinyllama-1b
ln -sfn /workspace/CodeLlama-7b-hf           ./models/codellama-7b
ln -sfn /workspace/CodeLlama-13b-hf          ./models/codellama-13b
ln -sfn /workspace/CodeLlama-34b-hf          ./models/codellama-34b
ln -sfn /workspace/CodeLlama-7b-Instruct-hf  ./models/codellama-7b-instruct
ln -sfn /workspace/CodeLlama-13b-Instruct-hf ./models/codellama-13b-instruct
ln -sfn /workspace/CodeLlama-34b-Instruct-hf ./models/codellama-34b-instruct

pip install -r requirements.txt

# cmake
cd /workspace/llama.cpp

mkdir build-cublas
cd build-cublas

xuxzh1's avatar
init  
xuxzh1 committed
63
cmake -DGGML_CUDA=1 ../
mashun1's avatar
v1  
mashun1 committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
make -j

if [ "$1" -eq "0" ]; then
    exit 0
fi

# more models
if [ "$1" -eq "1" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/PY007/TinyLlama-1.1B-Chat-v0.3

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
78
    python3 examples/convert_legacy_llama.py ./models/tinyllama-1b  --outfile ./models/tinyllama-1b/ggml-model-f16.gguf  --outtype f16
mashun1's avatar
v1  
mashun1 committed
79

xuxzh1's avatar
init  
xuxzh1 committed
80
81
82
    ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/tinyllama-1b/ggml-model-f16.gguf ./models/tinyllama-1b/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
83
84
85
86
87
88
89
90
91
92
fi

if [ "$1" -eq "2" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-7b-hf  --without *safetensors*
    rm -v ./CodeLlama-7b-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
93
    python3 examples/convert_legacy_llama.py ./models/codellama-7b  --outfile ./models/codellama-7b/ggml-model-f16.gguf  --outtype f16
mashun1's avatar
v1  
mashun1 committed
94

xuxzh1's avatar
init  
xuxzh1 committed
95
96
97
    ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-7b/ggml-model-f16.gguf ./models/codellama-7b/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
98
99
100
101
102
103
104
105
106
107
fi

if [ "$1" -eq "3" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-13b-hf --without *safetensors*
    rm -v ./CodeLlama-13b-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
108
    python3 examples/convert_legacy_llama.py ./models/codellama-13b --outfile ./models/codellama-13b/ggml-model-f16.gguf --outtype f16
mashun1's avatar
v1  
mashun1 committed
109

xuxzh1's avatar
init  
xuxzh1 committed
110
111
112
    ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-13b/ggml-model-f16.gguf ./models/codellama-13b/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
113
114
115
116
117
118
119
120
121
122
fi

if [ "$1" -eq "4" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-34b-hf --without *safetensors*
    rm -v ./CodeLlama-34b-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
123
    python3 examples/convert_legacy_llama.py ./models/codellama-34b --outfile ./models/codellama-34b/ggml-model-f16.gguf --outtype f16
mashun1's avatar
v1  
mashun1 committed
124

xuxzh1's avatar
init  
xuxzh1 committed
125
126
127
    ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-34b/ggml-model-f16.gguf ./models/codellama-34b/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
128
129
130
131
132
133
134
135
136
137
fi

if [ "$1" -eq "5" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf  --without *safetensors*
    rm -v ./CodeLlama-7b-Instruct-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
138
    python3 examples/convert_legacy_llama.py ./models/codellama-7b-instruct  --outfile ./models/codellama-7b-instruct/ggml-model-f16.gguf  --outtype f16
mashun1's avatar
v1  
mashun1 committed
139

xuxzh1's avatar
init  
xuxzh1 committed
140
141
142
    ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-7b-instruct/ggml-model-f16.gguf ./models/codellama-7b-instruct/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
143
144
145
146
147
148
149
150
151
152
fi

if [ "$1" -eq "6" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf --without *safetensors*
    rm -v ./CodeLlama-13b-Instruct-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
153
    python3 examples/convert_legacy_llama.py ./models/codellama-13b-instruct --outfile ./models/codellama-13b-instruct/ggml-model-f16.gguf --outtype f16
mashun1's avatar
v1  
mashun1 committed
154

xuxzh1's avatar
init  
xuxzh1 committed
155
156
157
    ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-13b-instruct/ggml-model-f16.gguf ./models/codellama-13b-instruct/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
158
159
160
161
162
163
164
165
166
167
fi

if [ "$1" -eq "7" ]; then
    cd /workspace

    git-lfs-download https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf --without *safetensors*
    rm -v ./CodeLlama-34b-Instruct-hf/*safetensors*

    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
168
    python3 examples/convert_legacy_llama.py ./models/codellama-34b-instruct --outfile ./models/codellama-34b-instruct/ggml-model-f16.gguf --outtype f16
mashun1's avatar
v1  
mashun1 committed
169

xuxzh1's avatar
init  
xuxzh1 committed
170
171
172
    ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q4_0.gguf q4_0
    ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q4_k.gguf q4_k
    ./llama-quantize ./models/codellama-34b-instruct/ggml-model-f16.gguf ./models/codellama-34b-instruct/ggml-model-q8_0.gguf q8_0
mashun1's avatar
v1  
mashun1 committed
173
174
175
176
177
178
179
180
181
182
183
fi

if [ "$1" -eq "1" ]; then
    # perf + perplexity
    cd /workspace/llama.cpp/build-cublas

    make -j && ../scripts/run-all-perf.sh tinyllama-1b "f16" "-ngl 99 -t 1 -p 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,32,64,128,256,512,1024,2048 -n 128"

    ../scripts/get-wikitext-2.sh
    unzip wikitext-2-raw-v1.zip

xuxzh1's avatar
init  
xuxzh1 committed
184
    make -j && ./bin/llama-perplexity -m ../models/tinyllama-1b/ggml-model-f16.gguf -f ./wikitext-2-raw/wiki.test.raw -ngl 100 --chunks 32
mashun1's avatar
v1  
mashun1 committed
185
186
187
188

    # batched
    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
189
    GGML_CUDA=1 make -j && ./llama-batched ./models/tinyllama-1b/ggml-model-f16.gguf "Hello, my name is" 8 128 999
mashun1's avatar
v1  
mashun1 committed
190
191
192
193

    # batched-bench
    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
194
    GGML_CUDA=1 make -j && ./llama-batched-bench ./models/tinyllama-1b/ggml-model-f16.gguf 4608 1 99 0 512 128 1,2,3,4,5,6,7,8,16,32
mashun1's avatar
v1  
mashun1 committed
195
196
197
198

    # parallel
    cd /workspace/llama.cpp

xuxzh1's avatar
init  
xuxzh1 committed
199
    GGML_CUDA=1 make -j && ./llama-parallel -m ./models/tinyllama-1b/ggml-model-f16.gguf -t 1 -ngl 100 -c 4096 -b 512 -s 1 -np 8 -ns 128 -n 100 -cb
mashun1's avatar
v1  
mashun1 committed
200
201
202
203
204
205
206

fi

# speculative
#if [ "$1" -eq "7" ]; then
#    cd /workspace/llama.cpp
#
xuxzh1's avatar
init  
xuxzh1 committed
207
#    GGML_CUDA=1 make -j && ./llama-speculative -m ./models/codellama-34b-instruct/ggml-model-f16.gguf -md ./models/codellama-7b-instruct/ggml-model-q4_0.gguf -p "# Dijkstra's shortest path algorithm in Python (4 spaces indentation) + complexity analysis:\n\n" -e -ngl 999 -ngld 999 -t 4 -n 512 -c 4096 -s 21 --draft 16 -np 1 --temp 0.0
mashun1's avatar
v1  
mashun1 committed
208
209
210
#fi

# more benches
xuxzh1's avatar
init  
xuxzh1 committed
211
212
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf  4096 1 99 1 512,3200 128,128,800 1
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1