compose.yaml 1.23 KB
Newer Older
QIN2DIM's avatar
QIN2DIM committed
1
# Documentation:
Xiaomeng Zhao's avatar
Xiaomeng Zhao committed
2
# https://docs.sglang.ai/backend/server_arguments.html#common-launch-commands
3
4
5
6
7
8
9
10
11
12
13
14
15
services:
  mineru-sglang:
    image: mineru-sglang:latest
    container_name: mineru-sglang
    restart: always
    ports:
      - 30000:30000
    environment:
      MINERU_MODEL_SOURCE: local
    entrypoint: mineru-sglang-server
    command:
      --host 0.0.0.0
      --port 30000
Xiaomeng Zhao's avatar
Xiaomeng Zhao committed
16
      # --enable-torch-compile  # You can also enable torch.compile to accelerate inference speed by approximately 15%
17
18
      # --dp-size 2  # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode
      # --tp-size 2  # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode.
Xiaomeng Zhao's avatar
Xiaomeng Zhao committed
19
      # --mem-fraction-static 0.5  # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
20
21
22
23
24
25
26
27
28
29
30
31
    ulimits:
      memlock: -1
      stack: 67108864
    ipc: host
    healthcheck:
      test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
    deploy:
      resources:
        reservations:
          devices:
            - driver: nvidia
              device_ids: ["0"]
Xiaomeng Zhao's avatar
Xiaomeng Zhao committed
32
              capabilities: [gpu]