Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
voicechat2
Commits
fdec5b2a
Commit
fdec5b2a
authored
Aug 12, 2024
by
chenpangpang
Browse files
feat:解决了notebook启动uvicorn显示不正常的问题
parent
2b67188c
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
67 additions
and
17 deletions
+67
-17
Dockerfile
Dockerfile
+15
-6
assets/示例.png
assets/示例.png
+0
-0
export-hf-model.py
export-hf-model.py
+25
-0
start.sh
start.sh
+1
-2
voicechat2/run.py
voicechat2/run.py
+19
-0
启动器.ipynb
启动器.ipynb
+7
-9
No files found.
Dockerfile
View file @
fdec5b2a
FROM
image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-py3.10-cuda12.1-ubuntu22.04-devel
FROM
image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-py3.10-cuda12.1-ubuntu22.04-devel
as base
ARG
IMAGE=voicechat2
ARG
IMAGE_UPPER=voicechat2
ARG
BRANCH=gpu
RUN
cd
/root
&&
git clone
-b
$BRANCH
http://developer.hpccube.com/codes/chenpangpang/
$IMAGE
.git
WORKDIR
/root/$IMAGE/$IMAGE_UPPER
RUN
apt-get update
&&
apt-get
install
-y
build-essential byobu curl wget espeak-ng ffmpeg libopus0 libopus-dev
RUN
pip
install
-r
requirements.txt
RUN
cd
llama.cpp
&&
make
GGML_CUDA
=
1
-j
COPY
chenyh/$IMAGE/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf /root/$IMAGE/$IMAGE_UPPER/llama.cpp/
COPY
chenyh/$IMAGE/tts_models--en--vctk--vits /root/.local/share/tts/
#COPY chenyh/$IMAGE/distil-whisper/large-v2 /root/$IMAGE/$IMAGE_UPPER/
#########
# Prod #
#########
FROM
image.sourcefind.cn:5000/gpu/admin/base/jupyterlab-pytorch:2.2.0-py3.10-cuda12.1-ubuntu22.04-devel
COPY
chenyh/$IMAGE/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf /root/$IMAGE/$IMAGE_UPPER/llama.cpp/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf
COPY
chenyh/$IMAGE/tts_models--en--vctk--vits /root/.local/share/tts/tts_models--en--vctk--vits
COPY
chenyh/$IMAGE/distil-whisper/large-v2 /root/$IMAGE/$IMAGE_UPPER/distil-whisper/large-v2
RUN
apt-get update
&&
apt-get
install
-y
build-essential byobu curl wget espeak-ng ffmpeg libopus0 libopus-dev
COPY
--from=base /opt/conda/lib/python3.10/site-packages /opt/conda/lib/python3.10/site-packages
COPY
--from=base /root/$IMAGE/$IMAGE_UPPER /root/$IMAGE_UPPER
COPY
--from=base /root/$IMAGE/启动器.ipynb /root/$IMAGE/start.sh /root/
# 需要在同种型号gpu上编译
# RUN cd llama.cpp && make GGML_CUDA=1 -j
assets/示例.png
0 → 100644
View file @
fdec5b2a
357 KB
export-hf-model.py
0 → 100644
View file @
fdec5b2a
import
os
import
shutil
MODEL_NAME
=
"distil-whisper/large-v2"
HF_CACHE_DIR
=
"/root/.cache/huggingface/hub/"
hash_code
=
None
# models--distil-whisper--large-v2/snapshots/66bb165856c86b9eae9dba7830c0cd7d859f4ef4/"
for
cache_model_name
in
os
.
listdir
(
HF_CACHE_DIR
):
flag
=
False
for
model_name_str_split
in
MODEL_NAME
.
split
(
"/"
):
if
model_name_str_split
in
cache_model_name
:
flag
=
True
else
:
flag
=
False
break
if
flag
:
if
hash_code
is
None
:
full_path
=
os
.
path
.
join
(
HF_CACHE_DIR
,
cache_model_name
,
"snapshots"
,
os
.
listdir
(
os
.
path
.
join
(
HF_CACHE_DIR
,
cache_model_name
,
"snapshots"
))[
0
])
else
:
full_path
=
os
.
path
.
join
(
HF_CACHE_DIR
,
cache_model_name
,
"snapshots"
,
hash_code
)
os
.
makedirs
(
MODEL_NAME
,
exist_ok
=
True
)
for
filename
in
os
.
listdir
(
full_path
):
shutil
.
copy
(
os
.
path
.
join
(
full_path
,
filename
),
os
.
path
.
join
(
MODEL_NAME
,
filename
))
print
(
"copy: "
,
os
.
path
.
join
(
full_path
,
filename
),
" to "
,
os
.
path
.
join
(
MODEL_NAME
,
filename
))
start.sh
View file @
fdec5b2a
#!/bin/bash
export
PATH
=
/opt/conda/lib/python3.10/site-packages/ninja/data/bin:
$PATH
cd
/root/voicechat2
python
app
.py
python
run
.py
\ No newline at end of file
voicechat2/run.py
0 → 100644
View file @
fdec5b2a
import
os
from
multiprocessing
import
Pool
cmd_list
=
[
"uvicorn voicechat2:app --host 0.0.0.0 --port 8000 --reload"
,
"uvicorn srt-server:app --host 0.0.0.0 --port 8001 --reload"
,
"llama.cpp/llama-server --host 127.0.0.1 --port 8002 -m llama.cpp/Meta-Llama-3-8B-Instruct-Q4_K_M.gguf
\
-ngl 99 -c 8192"
,
"uvicorn tts-server:app --host 0.0.0.0 --port 8003"
]
def
run
(
cmd
):
os
.
system
(
cmd
)
if
__name__
==
"__main__"
:
pool
=
Pool
(
4
)
# 创建拥有3个进程数量的进程池
pool
.
map
(
run
,
cmd_list
)
pool
.
close
()
# 关闭进程池,不再接受新的进程
pool
.
join
()
# 主进程阻塞等待子进程的退出
启动器.ipynb
View file @
fdec5b2a
...
...
@@ -8,17 +8,15 @@
},
"source": [
"## 说明\n",
"- 启动需要加载模型,需要2分钟左右的时间\n",
"- 启动和重启 Notebook 点上方工具栏中的「重启并运行所有单元格」。出现如下内容就算成功了:\n",
" - `Running on local URL: http://0.0.0.0:7860`\n",
" - `Running on public URL: https://xxxxxxxxxxxxxxx.gradio.live`\n",
"- 启动需要加载模型,需要1分钟左右的时间\n",
"- 启动和重启 Notebook 点上方工具栏中的「重启并运行所有单元格」\n",
"- 通过以下方式开启页面:\n",
" - 控制台打开「自定义服务」了,访问自定义服务端口号设置为7860\n",
" - 直接打开显示的公开链接`public URL`\n",
" - 控制台打开「自定义服务」了,访问自定义服务端口号设置为8000\n",
"\n",
"## 功能介绍\n",
"- 原项目地址:https://github.com/lhl/voicechat2\n",
"- voicechat2:2D图片到3D模型转化工具,单张图片仅需10秒即可生成高质量3D模型"
"- voicechat2:一款可交互的AI语音聊天器\n",
""
]
},
{
...
...
@@ -26,8 +24,8 @@
"execution_count": null,
"id": "53a96614-e2d2-4710-a82b-0d5ca9cb9872",
"metadata": {
"
tags": []
,
"
is_executing": true
"
is_executing": true
,
"
tags": []
},
"outputs": [],
"source": [
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment