Makefile 1.6 KB
Newer Older
1
include Makefile-flash-att
2
include Makefile-flash-att-v2
3
include Makefile-vllm
4
include Makefile-awq
5
include Makefile-eetq
drbh's avatar
drbh committed
6
include Makefile-selective-scan
drbh's avatar
drbh committed
7
include Makefile-lorax-punica
8
include Makefile-fbgemm
Nicolas Patry's avatar
Nicolas Patry committed
9
include Makefile-exllamav2
10
include Makefile-flashinfer
11

12
unit-tests:
13
	pytest -s -vv -m "not private" tests
14

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
15
gen-server:
Nicolas Patry's avatar
Nicolas Patry committed
16
	# Compile protos
17
	pip install grpcio-tools==1.62.2 mypy-protobuf==3.6.0 'types-protobuf' --no-cache-dir
18
	mkdir text_generation_server/pb || true
OlivierDehaene's avatar
OlivierDehaene committed
19
20
	python -m grpc_tools.protoc -I../proto/v3 --python_out=text_generation_server/pb \
		--grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb ../proto/v3/generate.proto
21
22
	find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
	touch text_generation_server/pb/__init__.py
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
23

24
install-server: gen-server
Nicolas Patry's avatar
Nicolas Patry committed
25
	pip install pip --upgrade
fxmarty's avatar
fxmarty committed
26
	pip install -r requirements_cuda.txt
27
	pip install -e ".[accelerate, quantize, peft, outlines]"
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
28

29
30
31
32

install: install-cuda
	echo "Installed server"

33
install-cuda: install-server install-flash-attention-v2-cuda install-vllm-cuda install-flash-attention install-fbgemm
34
	pip install -e ".[bnb]"
OlivierDehaene's avatar
OlivierDehaene committed
35
	pip install nvidia-nccl-cu12==2.22.3
36
37
38

install-rocm: install-server install-flash-attention-v2-rocm  install-vllm-rocm

Nicolas Patry's avatar
Nicolas Patry committed
39
run-dev:
40
41
42
	SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded

export-requirements:
43
	poetry export -o requirements_cuda.txt --without-hashes
fxmarty's avatar
fxmarty committed
44
	poetry export -o requirements_rocm.txt --without-hashes
45
	poetry export -o requirements_intel.txt --without-hashes