launch_tgi_server.sh 442 Bytes
Newer Older
1
2
#!/bin/bash

3
PORT=8000
4
5
6
7
8
MODEL=$1
TOKENS=$2

docker run --gpus all --shm-size 1g -p $PORT:80 \
           -v $PWD/data:/data \
9
           ghcr.io/huggingface/text-generation-inference:1.4.0 \
10
11
12
13
14
15
16
           --model-id $MODEL \
           --sharded false  \
           --max-input-length 1024 \
           --max-total-tokens 2048 \
           --max-best-of 5 \
           --max-concurrent-requests 5000 \
           --max-batch-total-tokens $TOKENS