launch_tgi_server.sh 440 Bytes
Newer Older
1
2
#!/bin/bash

3
PORT=8000
4
5
6
7
8
9
10
11
12
13
14
15
16
MODEL=$1
TOKENS=$2

docker run --gpus all --shm-size 1g -p $PORT:80 \
           -v $PWD/data:/data \
           ghcr.io/huggingface/text-generation-inference:0.8 \
           --model-id $MODEL \
           --sharded false  \
           --max-input-length 1024 \
           --max-total-tokens 2048 \
           --max-best-of 5 \
           --max-concurrent-requests 5000 \
           --max-batch-total-tokens $TOKENS