Makefile 1.51 KB
Newer Older
jixx's avatar
init  
jixx committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
include Makefile-awq
include Makefile-eetq
include Makefile-selective-scan
include Makefile-lorax-punica

unit-tests:
	pytest -s -vv -m "not private" tests

install-exllama:
	cd exllama_kernels && python setup.py install

install-exllamav2:
	cd exllamav2_kernels && python setup.py install

gen-server:
	# Compile protos
	pip install grpcio-tools==1.62.2 mypy-protobuf==3.6.0 'types-protobuf' --no-cache-dir
	mkdir text_generation_server/pb || true
	python -m grpc_tools.protoc -I../proto/v3 --python_out=text_generation_server/pb \
		--grpc_python_out=text_generation_server/pb --mypy_out=text_generation_server/pb ../proto/v3/generate.proto
	find text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \;
	touch text_generation_server/pb/__init__.py

install-server: gen-server
	pip install pip --upgrade
	pip install -r requirements_rocm.txt
	# pip install -e ".[bnb, accelerate, quantize, peft, outlines]"
	pip install -e ".[accelerate, quantize, peft, outlines]"

install: install-server
	echo "Installed server"

install-cuda: install-server install-flash-attention-v2-cuda install-vllm-cuda install-flash-attention

install-rocm: install-server install-flash-attention-v2-rocm  install-vllm-rocm

run-dev:
	SAFETENSORS_FAST_GPU=1 python -m torch.distributed.run --nproc_per_node=2 text_generation_server/cli.py serve bigscience/bloom-560m --sharded

export-requirements:
	poetry export -o requirements_cuda.txt --without-hashes
	poetry export -o requirements_rocm.txt --without-hashes