Commit 497ff9fe authored by helloyongyang's avatar helloyongyang
Browse files

update docker

parent 793ec1db
[submodule "3rd/flash-attention"]
path = 3rd/flash-attention
url = https://github.com/Dao-AILab/flash-attention.git
Subproject commit a1be1cc38d18385fec82e2e1ee203d482c35c24c
FROM nvidia/cuda:12.8.1-cudnn-devel-ubuntu22.04 AS base FROM pytorch/pytorch:2.7.1-cuda12.8-cudnn9-devel AS base
WORKDIR /workspace WORKDIR /app
COPY . /workspace/lightx2v/
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ENV LANG=C.UTF-8 ENV LANG=C.UTF-8
...@@ -12,27 +10,28 @@ ENV LC_ALL=C.UTF-8 ...@@ -12,27 +10,28 @@ ENV LC_ALL=C.UTF-8
RUN sed -i 's|http://archive.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list \ RUN sed -i 's|http://archive.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list \
&& sed -i 's|http://security.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list && sed -i 's|http://security.ubuntu.com/ubuntu/|https://mirrors.tuna.tsinghua.edu.cn/ubuntu/|g' /etc/apt/sources.list
RUN apt-get update && apt install -y software-properties-common \ RUN apt-get update && apt-get install -y vim tmux zip unzip wget git build-essential libibverbs-dev ca-certificates \
&& add-apt-repository ppa:deadsnakes/ppa \ curl iproute2 ffmpeg libsm6 libxext6 kmod ccache libnuma-dev \
&& apt-get update \
&& apt-get install -y vim tmux zip unzip wget git cmake build-essential \
curl libibverbs-dev ca-certificates iproute2 \
ffmpeg libsm6 libxext6 \
&& apt-get install -y python3.11 python3.11-venv python3.11-dev python3-pip \
&& apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get clean && rm -rf /var/lib/apt/lists/*
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 \ RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
&& update-alternatives --install /usr/bin/python python /usr/bin/python3.11 1
RUN pip install --no-cache-dir packaging ninja cmake scikit-build-core uv ruff pre-commit -U
RUN git clone https://github.com/vllm-project/vllm.git && cd vllm \
&& python use_existing_torch.py && pip install -r requirements/build.txt \
&& pip install --no-cache-dir --no-build-isolation -v -e .
RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple \ RUN git clone https://github.com/sgl-project/sglang.git && cd sglang/sgl-kernel \
&& pip install -r /workspace/lightx2v/requirements.txt && make build && make clean
# Install again separately to bypass the version conflict check RUN pip install --no-cache-dir diffusers transformers tokenizers accelerate safetensors opencv-python numpy imageio \
RUN pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple \ imageio-ffmpeg einops loguru qtorch ftfy easydict
&& pip install transformers==4.45.2
# install flash-attention 2 RUN git clone https://github.com/Dao-AILab/flash-attention.git --recursive
RUN cd lightx2v/3rd/flash-attention && pip install --no-cache-dir -v -e .
# install flash-attention 3, only if hopper RUN cd flash-attention && python setup.py install && rm -rf build
RUN cd lightx2v/3rd/flash-attention/hopper && pip install --no-cache-dir -v -e .
RUN cd flash-attention/hopper && python setup.py install && rm -rf build
WORKDIR /workspace
# Prepare Environment # Prepare Environment
We recommend using a docker environment. Here is the [dockerhub](https://hub.docker.com/r/lightx2v/lightx2v/tags) for lightx2v. Please select the tag with the latest date, for example, 25042502. We recommend using a docker environment. Here is the [dockerhub](https://hub.docker.com/r/lightx2v/lightx2v/tags) for lightx2v. Please select the tag with the latest date, for example, 25061301.
```shell ```shell
docker pull lightx2v/lightx2v:25042502 docker pull lightx2v/lightx2v:25061301
docker run --gpus all -itd --ipc=host --name [container_name] -v [mount_settings] --entrypoint /bin/bash [image_id] docker run --gpus all -itd --ipc=host --name [container_name] -v [mount_settings] --entrypoint /bin/bash [image_id]
``` ```
...@@ -12,7 +12,6 @@ If you want to set up the environment yourself using conda, you can refer to the ...@@ -12,7 +12,6 @@ If you want to set up the environment yourself using conda, you can refer to the
```shell ```shell
# clone repo and submodules # clone repo and submodules
git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v
git submodule update --init --recursive
conda create -n lightx2v python=3.11 && conda activate lightx2v conda create -n lightx2v python=3.11 && conda activate lightx2v
pip install -r requirements.txt pip install -r requirements.txt
...@@ -22,10 +21,11 @@ pip install -r requirements.txt ...@@ -22,10 +21,11 @@ pip install -r requirements.txt
pip install transformers==4.45.2 pip install transformers==4.45.2
# install flash-attention 2 # install flash-attention 2
cd lightx2v/3rd/flash-attention && pip install --no-cache-dir -v -e . git clone https://github.com/Dao-AILab/flash-attention.git --recursive
cd flash-attention && python setup.py install
# install flash-attention 3, only if hopper # install flash-attention 3, only if hopper
cd lightx2v/3rd/flash-attention/hopper && pip install --no-cache-dir -v -e . cd flash-attention/hopper && python setup.py install
``` ```
# Infer # Infer
......
# 准备环境 # 准备环境
我们推荐使用docker环境,这是lightx2v的[dockerhub](https://hub.docker.com/r/lightx2v/lightx2v/tags),请选择一个最新日期的tag,比如25042502 我们推荐使用docker环境,这是lightx2v的[dockerhub](https://hub.docker.com/r/lightx2v/lightx2v/tags),请选择一个最新日期的tag,比如25061301
```shell ```shell
docker pull lightx2v/lightx2v:25042502 docker pull lightx2v/lightx2v:25061301
docker run --gpus all -itd --ipc=host --name [容器名] -v [挂载设置] --entrypoint /bin/bash [镜像id] docker run --gpus all -itd --ipc=host --name [容器名] -v [挂载设置] --entrypoint /bin/bash [镜像id]
``` ```
对于中国大陆地区,若拉取镜像的时候,网络不稳定,可以从[渡渡鸟](https://docker.aityp.com/r/docker.io/lightx2v/lightx2v)上拉取
```shell
docker pull swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/lightx2v/lightx2v:25061301
```
如果你想使用conda自己搭建环境,可以参考如下步骤: 如果你想使用conda自己搭建环境,可以参考如下步骤:
```shell ```shell
# 下载github代码 # 下载github代码
git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v git clone https://github.com/ModelTC/lightx2v.git lightx2v && cd lightx2v
git submodule update --init --recursive
conda create -n lightx2v python=3.11 && conda activate lightx2v conda create -n lightx2v python=3.11 && conda activate lightx2v
pip install -r requirements.txt pip install -r requirements.txt
...@@ -22,10 +28,11 @@ pip install -r requirements.txt ...@@ -22,10 +28,11 @@ pip install -r requirements.txt
pip install transformers==4.45.2 pip install transformers==4.45.2
# 安装 flash-attention 2 # 安装 flash-attention 2
cd lightx2v/3rd/flash-attention && pip install --no-cache-dir -v -e . git clone https://github.com/Dao-AILab/flash-attention.git --recursive
cd flash-attention && python setup.py install
# 安装 flash-attention 3, 用于 hopper 显卡 # 安装 flash-attention 3, 用于 hopper 显卡
cd lightx2v/3rd/flash-attention/hopper && pip install --no-cache-dir -v -e . cd flash-attention/hopper && python setup.py install
``` ```
# 推理 # 推理
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment