Commit d948fb4e authored by chenpangpang's avatar chenpangpang
Browse files

feat:解决了很多bug

parent fdec5b2a
...@@ -4,18 +4,23 @@ ARG IMAGE_UPPER=voicechat2 ...@@ -4,18 +4,23 @@ ARG IMAGE_UPPER=voicechat2
ARG BRANCH=gpu ARG BRANCH=gpu
RUN cd /root && git clone -b $BRANCH http://developer.hpccube.com/codes/chenpangpang/$IMAGE.git RUN cd /root && git clone -b $BRANCH http://developer.hpccube.com/codes/chenpangpang/$IMAGE.git
WORKDIR /root/$IMAGE/$IMAGE_UPPER WORKDIR /root/$IMAGE/$IMAGE_UPPER
RUN pip install -r requirements.txt
######### #########
# Prod # # Prod #
######### #########
FROM image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-py3.10-cuda12.1-ubuntu22.04-devel FROM image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-py3.10-cuda12.1-ubuntu22.04-devel
COPY chenyh/$IMAGE/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf /root/$IMAGE/$IMAGE_UPPER/llama.cpp/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf ARG IMAGE=voicechat2
ARG IMAGE_UPPER=voicechat2
COPY chenyh/$IMAGE/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf /root/$IMAGE_UPPER/llama.cpp/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf
COPY chenyh/$IMAGE/tts_models--en--vctk--vits /root/.local/share/tts/tts_models--en--vctk--vits COPY chenyh/$IMAGE/tts_models--en--vctk--vits /root/.local/share/tts/tts_models--en--vctk--vits
COPY chenyh/$IMAGE/distil-whisper/large-v2 /root/$IMAGE/$IMAGE_UPPER/distil-whisper/large-v2 COPY chenyh/$IMAGE/distil-whisper/large-v2 /root/$IMAGE_UPPER/distil-whisper/large-v2
RUN apt-get update && apt-get install -y build-essential byobu curl wget espeak-ng ffmpeg libopus0 libopus-dev RUN apt-get update && apt-get install -y build-essential byobu curl wget espeak-ng ffmpeg libopus0 libopus-dev
COPY --from=base /opt/conda/lib/python3.10/site-packages /opt/conda/lib/python3.10/site-packages
COPY --from=base /root/$IMAGE/$IMAGE_UPPER /root/$IMAGE_UPPER COPY --from=base /root/$IMAGE/$IMAGE_UPPER /root/$IMAGE_UPPER
WORKDIR /root/$IMAGE_UPPER
# 如果直接复制库,则缺少可执行程序,故在子镜像中安装
RUN pip install -r requirements.txt
COPY --from=base /root/$IMAGE/启动器.ipynb /root/$IMAGE/start.sh /root/ COPY --from=base /root/$IMAGE/启动器.ipynb /root/$IMAGE/start.sh /root/
COPY --from=base /root/$IMAGE/assets /root/assets
# 需要在同种型号gpu上编译 # 需要在同种型号gpu上编译
# RUN cd llama.cpp && make GGML_CUDA=1 -j # RUN cd llama.cpp && make GGML_CUDA=1 -j
# voicechat2 # voicechat2
## 需要的模型
-https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf -https://huggingface.co/bartowski/Meta-Llama-3-8B-Instruct-GGUF/resolve/main/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf
- distil-whisper/distil-large-v2 - distil-whisper/distil-large-v2
- https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--vctk--vits.zip - https://coqui.gateway.scarf.sh/v0.6.1_models/tts_models--en--vctk--vits.zip
\ No newline at end of file ## 创建方式
1. Dockerfile构建镜像
2. 平台上编译:`cd llama.cpp && make GGML_CUDA=1 -j`
3. 打开notebook页面,运行成功后,清空页面,导出镜像
\ No newline at end of file
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
"source": [ "source": [
"## 说明\n", "## 说明\n",
"- 启动需要加载模型,需要1分钟左右的时间\n", "- 启动需要加载模型,需要1分钟左右的时间\n",
"- 启动和重启 Notebook 点上方工具栏中的「重启并运行所有单元格」\n", "- 启动和重启 Notebook 点上方工具栏中的「重启并运行所有单元格」,出现`Uvicorn running on http://0.0.0.0:8003 (Press CTRL+C to quit)`就成功,可以开启页面了\n",
"- 通过以下方式开启页面:\n", "- 通过以下方式开启页面:\n",
" - 控制台打开「自定义服务」了,访问自定义服务端口号设置为8000\n", " - 控制台打开「自定义服务」了,访问自定义服务端口号设置为8000\n",
"\n", "\n",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment