Commit 9867304a authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #1408 canceled with stages
OPENBLAS_NUM_THREADS = 1
no_proxy = localhost, 127.0.0.1, ::1
# You can change the location of the model, etc. by changing here
weight_root = assets/weights
weight_uvr5_root = assets/uvr5_weights
index_root = logs
outside_index_root = assets/indices
rmvpe_root = assets/rmvpe
.DS_Store
__pycache__
/TEMP
*.pyd
.venv
/opt
tools/aria2c/
tools/flag.txt
# Imported from huggingface.co/lj1995/VoiceConversionWebUI
/pretrained
/pretrained_v2
/uvr5_weights
hubert_base.pt
rmvpe.onnx
rmvpe.pt
# Generated by RVC
/logs
/weights
# To set a Python version for the project
.tool-versions
/runtime
/assets/weights/*
ffmpeg.*
ffprobe.*
\ No newline at end of file
# 贡献规则
1. 一般来说,作者`@RVC-Boss`将拒绝所有的算法更改,除非它是为了修复某个代码层面的错误或警告
2. 您可以贡献本仓库的其他位置,如翻译和WebUI,但请尽量作最小更改
3. 所有更改都需要由`@RVC-Boss`批准,因此您的PR可能会被搁置
4. 由此带来的不便请您谅解
# Contributing Rules
1. Generally, the author `@RVC-Boss` will reject all algorithm changes unless what is to fix a code-level error or warning.
2. You can contribute to other parts of this repo like translations and WebUI, but please minimize your changes as much as possible.
3. All changes need to be approved by `@RVC-Boss`, so your PR may be put on hold.
4. Please accept our apologies for any inconvenience caused.
# syntax=docker/dockerfile:1
FROM nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu20.04
EXPOSE 7865
WORKDIR /app
COPY . .
# Install dependenceis to add PPAs
RUN apt-get update && \
apt-get install -y -qq ffmpeg aria2 && apt clean && \
apt-get install -y software-properties-common && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Add the deadsnakes PPA to get Python 3.9
RUN add-apt-repository ppa:deadsnakes/ppa
# Install Python 3.9 and pip
RUN apt-get update && \
apt-get install -y build-essential python-dev python3-dev python3.9-distutils python3.9-dev python3.9 curl && \
apt-get clean && \
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 && \
curl https://bootstrap.pypa.io/get-pip.py | python3.9
# Set Python 3.9 as the default
RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
RUN python3 -m pip install --no-cache-dir -r requirements.txt
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt
RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/rmvpe -o rmvpe.pt
VOLUME [ "/app/weights", "/app/opt" ]
CMD ["python3", "infer-web.py"]
MIT License
Copyright (c) 2023 liujing04
Copyright (c) 2023 源文雨
Copyright (c) 2023 Ftps
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
本软件及其相关代码以MIT协议开源,作者不对软件具备任何控制力,使用软件者、传播软件导出的声音者自负全责。
如不认可该条款,则不能使用或引用软件包内任何代码和文件。
特此授予任何获得本软件和相关文档文件(以下简称“软件”)副本的人免费使用、复制、修改、合并、出版、分发、再授权和/或销售本软件的权利,以及授予本软件所提供的人使用本软件的权利,但须符合以下条件:
上述版权声明和本许可声明应包含在软件的所有副本或实质部分中。
软件是“按原样”提供的,没有任何明示或暗示的保证,包括但不限于适销性、适用于特定目的和不侵权的保证。在任何情况下,作者或版权持有人均不承担因软件或软件的使用或其他交易而产生、产生或与之相关的任何索赔、损害赔偿或其他责任,无论是在合同诉讼、侵权诉讼还是其他诉讼中。
The LICENCEs for related libraries are as follows.
相关引用库协议如下:
ContentVec
https://github.com/auspicious3000/contentvec/blob/main/LICENSE
MIT License
VITS
https://github.com/jaywalnut310/vits/blob/main/LICENSE
MIT License
HIFIGAN
https://github.com/jik876/hifi-gan/blob/master/LICENSE
MIT License
gradio
https://github.com/gradio-app/gradio/blob/main/LICENSE
Apache License 2.0
ffmpeg
https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip
LPGLv3 License
MIT License
ultimatevocalremovergui
https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE
https://github.com/yang123qwe/vocal_separation_by_uvr5
MIT License
audio-slicer
https://github.com/openvpi/audio-slicer/blob/main/LICENSE
MIT License
PySimpleGUI
https://github.com/PySimpleGUI/PySimpleGUI/blob/master/license.txt
LPGLv3 License
# RVC
一个基于VITS简单易用的变声框架,使用少量数据进行训练也能得到较好结果,方便直播娱乐。
## 论文
`Conditional Variational Autoencoder with Adversarial Learning for End-to-End Text-to-Speech`
- https://proceedings.mlr.press/v139/kim21f/kim21f.pdf
## 模型结构
VITS是一个基于Flow算法的端到端TTS模型,结合VAE、FLOW和GAN三种算法,提出随机持续时间预测器处理不同节奏,通过MonotonicAlignmentSearch实现文本和音频的对齐。
<div align=center>
<img src="./doc/structure.png"/>
</div>
<div align=center>
<img src="./doc/structure_info.png"/>
</div>
## 算法原理
RVC训练一个Encoder-Decoder对,通过更换Decoder(‌或微调Decoder的权重)‌来实现变声‌。
<div align=center>
<img src="./doc/algorithm.png"/>
</div>
## 环境配置
```
mv Retrieval-based-Voice-Conversion-WebUI_pytorch Retrieval-based-Voice-Conversion-WebUI # 去框架名后缀
```
### Docker(方法一)
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.1.0-ubuntu20.04-dtk24.04.1-py3.10
# <your IMAGE ID>为以上拉取的docker的镜像ID替换,本镜像为:a4dd5be0ca23
docker run -p 7865:7865 -it --shm-size=64G -v $PWD/Retrieval-based-Voice-Conversion-WebUI:/home/Retrieval-based-Voice-Conversion-WebUI -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=//dev/dri/ --group-add video --name rvc <your IMAGE ID> bash
cd /home/Retrieval-based-Voice-Conversion-WebUI
pip install -r requirements.txt # requirements.txt
apt install ffmpeg # 其它系统借鉴Ubuntu安装方法
pip install ffmpeg-python # 需先安装ffmpeg再安装ffmpeg-python才能正常生效
```
`若遇到无法安装ffmpeg则可以更新apt下载源为清华源:`
```
cp /etc/apt/sources.list /etc/apt/sources.list.backup
vim /etc/apt/sources.list
# 清华大学镜像源:
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal main restricted
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates main restricted
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal universe
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates universe
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-updates multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-backports main restricted universe multiverse
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security main restricted
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security universe
deb https://mirrors.tuna.tsinghua.edu.cn/ubuntu/ focal-security multiverse
# 以上清华镜像源添加到sources.list末尾
apt update
```
### Dockerfile(方法二)
```
cd Retrieval-based-Voice-Conversion-WebUI/docker
docker build --no-cache -t rvc:latest .
docker run -p 7865:7865 --shm-size=64G -v $PWD/Retrieval-based-Voice-Conversion-WebUI:/home/Retrieval-based-Voice-Conversion-WebUI -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video -v $PWD/../../Retrieval-based-Voice-Conversion-WebUI:/home/Retrieval-based-Voice-Conversion-WebUI -it rvc bash
# 若遇到Dockerfile启动的方式安装环境需要长时间等待,可注释掉里面的pip安装,启动容器后再安装python库:pip install -r requirements.txt。
apt install ffmpeg # 其它系统借鉴Ubuntu安装方法
pip install ffmpeg-python # 需先安装ffmpeg再安装ffmpeg-python才能正常生效
# 若遇到无法安装ffmpeg可借鉴方法一添加清华源
```
### Anaconda(方法三)
1、关于本项目DCU显卡所需的特殊深度学习库可从光合开发者社区下载安装:
- https://developer.hpccube.com/tool/
```
DTK驱动:dtk24.04.1
python:python3.10
torch:2.1.0
torchvision:0.16.0
```
`Tips:以上dtk驱动、python、torch等DCU相关工具版本需要严格一一对应。`
2、其它非特殊库参照requirements.txt安装
```
pip install -r requirements.txt # requirements.txt
apt install ffmpeg # 其它系统借鉴Ubuntu安装方法
pip install ffmpeg-python # 需先安装ffmpeg再安装ffmpeg-python才能正常生效
# 若遇到无法安装ffmpeg可借鉴方法一添加清华源
```
`Tips:建议芯片版本>=K100AI、dtk版本>=24.04.1`
## 数据集
本项目的训练数据需要干净纯粹的源声音和目标声音,可借助以下工具进行初步制作,专业玩家请忽视,直接采用专业软件进行制作:
在线声音剪辑/录音工具: [`喜马拉雅音剪`](https://yunjianji.ximalaya.com/) ,YouTube在线声音提取工具:[`YtMp3`](https://ytmp3s.nu/Ok4t/) ,人声背景音乐分离工具(本项目web版自带)`uvr5`,使用方式如下图:
<div align=center>
<img src="./doc/split.png"/>
</div>
本项目试验董宇辉模仿刘德华的声音,所有干声皆采用上述工具即可获得,本试验仅供娱乐和算法开源研究,无任何商业行为,训练数据目录结构如下:
```
assets/
├── vocal_liudehua.mp3_10.wav
├── source_audio/
│ ├── dongyuhui.mp3
```
## 训练
### 单机单卡
预训练权重准备如下:
```
# 基础预训练权重
cp -r VoiceConversionWebUI/pretrained_v2/f0G40k.pth assets/pretrained_v2/
cp -r VoiceConversionWebUI/pretrained_v2/f0D40k.pth assets/pretrained_v2/
# 语音识别预训练权重
cp -r VoiceConversionWebUI/hubert_base.pt assets/hubert/
# 人声背景分离预训练权重
cp -r VoiceConversionWebUI/uvr5_weights/* assets/uvr5_weights/
# RMVPE人声音高提取预训练权重
cp -r VoiceConversionWebUI/rmvpe.pt assets/rmvpe/
```
```
export HIP_VISIBLE_DEVICES=0
python infer-web.py
# 然后尝试在本地浏览器输入此类网址进行后续线上web操作:http://服务器ip:7865/、http://localhost:7865/
# 如下图填入所需路径,按数字步骤依次点击运行来完成训练,也可直接点击一键训练,如点击无反应可以通过刷新网页来解决(源项目不太稳定,可能出现些许报错,但仍可以完成训练。)。
```
<div align=center>
<img src="./doc/train.png"/>
</div>
更多资料可参考源项目的[`README_origin`](./README_origin.md)
## 推理
### 单机单卡
```
export HIP_VISIBLE_DEVICES=0
python infer-web.py
# 然后尝试在本地浏览器输入此类网址进行后续线上web操作:http://服务器ip:7865/、http://localhost:7865/
# 如下图填入所需路径,最先填"Auto-detect index path and select from the dropdown:",否则容易报错,最后点击convert即可,如点击无反应或报错可以通过刷新整个网页来解决(源项目不太稳定)。
# 本试验中RVC的DCU K100AI推理速度约为GPU A800的3.4倍。
```
<div align=center>
<img src="./doc/infer.png"/>
</div>
## result
输入输出效果同上述推理页面效果,点击播放、下载即可试听:
<div align=center>
<img src="./doc/result.png"/>
</div>
### 精度
DCU K100AI与GPU A800精度一致,训练框架:pytorch。
## 应用场景
### 算法类别
`语音合成`
### 热点应用行业
`直播,影视,电商`
## 预训练权重
预训练权重快速下载中心:[SCNet AIModels](http://113.200.138.88:18080/aimodels) ,项目中的预训练权重可从快速下载通道下载:[VoiceConversionWebUI](http://113.200.138.88:18080/aimodels/lj1995/VoiceConversionWebUI)
Hugging Face 预训练权重地址: [lj1995/VoiceConversionWebUI](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main)
## 源码仓库及问题反馈
- http://developer.hpccube.com/codes/modelzoo/retrieval-based-voice-conversion-webui_pytorch.git
## 参考资料
- https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git
<div align="center">
<h1>Retrieval-based-Voice-Conversion-WebUI</h1>
一个基于VITS的简单易用的变声框架<br><br>
[![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange
)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)
<img src="https://counter.seku.su/cmoe?name=rvc&theme=r34" /><br>
[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)
[![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE)
[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)
[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk)
[**更新日志**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_CN.md) | [**常见问题解答**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98%E8%A7%A3%E7%AD%94) | [**AutoDL·5毛钱训练AI歌手**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/Autodl%E8%AE%AD%E7%BB%83RVC%C2%B7AI%E6%AD%8C%E6%89%8B%E6%95%99%E7%A8%8B) | [**对照实验记录**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/Autodl%E8%AE%AD%E7%BB%83RVC%C2%B7AI%E6%AD%8C%E6%89%8B%E6%95%99%E7%A8%8B](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/%E5%AF%B9%E7%85%A7%E5%AE%9E%E9%AA%8C%C2%B7%E5%AE%9E%E9%AA%8C%E8%AE%B0%E5%BD%95)) | [**在线演示**](https://modelscope.cn/studios/FlowerCry/RVCv2demo)
[**English**](./docs/en/README.en.md) | [**中文简体**](./README.md) | [**日本語**](./docs/jp/README.ja.md) | [**한국어**](./docs/kr/README.ko.md) ([**韓國語**](./docs/kr/README.ko.han.md)) | [**Français**](./docs/fr/README.fr.md) | [**Türkçe**](./docs/tr/README.tr.md) | [**Português**](./docs/pt/README.pt.md)
</div>
> 底模使用接近50小时的开源高质量VCTK训练集训练,无版权方面的顾虑,请大家放心使用
> 请期待RVCv3的底模,参数更大,数据更大,效果更好,基本持平的推理速度,需要训练数据量更少。
<table>
<tr>
<td align="center">训练推理界面</td>
<td align="center">实时变声界面</td>
</tr>
<tr>
<td align="center"><img src="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/assets/129054828/092e5c12-0d49-4168-a590-0b0ef6a4f630"></td>
<td align="center"><img src="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/assets/129054828/730b4114-8805-44a1-ab1a-04668f3c30a6"></td>
</tr>
<tr>
<td align="center">go-web.bat</td>
<td align="center">go-realtime-gui.bat</td>
</tr>
<tr>
<td align="center">可以自由选择想要执行的操作。</td>
<td align="center">我们已经实现端到端170ms延迟。如使用ASIO输入输出设备,已能实现端到端90ms延迟,但非常依赖硬件驱动支持。</td>
</tr>
</table>
## 简介
本仓库具有以下特点
+ 使用top1检索替换输入源特征为训练集特征来杜绝音色泄漏
+ 即便在相对较差的显卡上也能快速训练
+ 使用少量数据进行训练也能得到较好结果(推荐至少收集10分钟低底噪语音数据)
+ 可以通过模型融合来改变音色(借助ckpt处理选项卡中的ckpt-merge)
+ 简单易用的网页界面
+ 可调用UVR5模型来快速分离人声和伴奏
+ 使用最先进的[人声音高提取算法InterSpeech2023-RMVPE](#参考项目)根绝哑音问题。效果最好(显著地)但比crepe_full更快、资源占用更小
+ A卡I卡加速支持
点此查看我们的[演示视频](https://www.bilibili.com/video/BV1pm4y1z7Gm/) !
## 环境配置
以下指令需在 Python 版本大于3.8的环境中执行。
### Windows/Linux/MacOS等平台通用方法
下列方法任选其一。
#### 1. 通过 pip 安装依赖
1. 安装Pytorch及其核心依赖,若已安装则跳过。参考自: https://pytorch.org/get-started/locally/
```bash
pip install torch torchvision torchaudio
```
2. 如果是 win 系统 + Nvidia Ampere 架构(RTX30xx),根据 #21 的经验,需要指定 pytorch 对应的 cuda 版本
```bash
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
```
3. 根据自己的显卡安装对应依赖
- N卡
```bash
pip install -r requirements.txt
```
- A卡/I卡
```bash
pip install -r requirements-dml.txt
```
- A卡ROCM(Linux)
```bash
pip install -r requirements-amd.txt
```
- I卡IPEX(Linux)
```bash
pip install -r requirements-ipex.txt
```
#### 2. 通过 poetry 来安装依赖
安装 Poetry 依赖管理工具,若已安装则跳过。参考自: https://python-poetry.org/docs/#installation
```bash
curl -sSL https://install.python-poetry.org | python3 -
```
通过 Poetry 安装依赖时,python 建议使用 3.7-3.10 版本,其余版本在安装 llvmlite==0.39.0 时会出现冲突
```bash
poetry init -n
poetry env use "path to your python.exe"
poetry run pip install -r requirments.txt
```
### MacOS
可以通过 `run.sh` 来安装依赖
```bash
sh ./run.sh
```
## 其他预模型准备
RVC需要其他一些预模型来推理和训练。
你可以从我们的[Hugging Face space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)下载到这些模型。
### 1. 下载 assets
以下是一份清单,包括了所有RVC所需的预模型和其他文件的名称。你可以在`tools`文件夹找到下载它们的脚本。
- ./assets/hubert/hubert_base.pt
- ./assets/pretrained
- ./assets/uvr5_weights
想使用v2版本模型的话,需要额外下载
- ./assets/pretrained_v2
### 2. 安装 ffmpeg
若ffmpeg和ffprobe已安装则跳过。
#### Ubuntu/Debian 用户
```bash
sudo apt install ffmpeg
```
#### MacOS 用户
```bash
brew install ffmpeg
```
#### Windows 用户
下载后放置在根目录。
- 下载[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe)
- 下载[ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe)
### 3. 下载 rmvpe 人声音高提取算法所需文件
如果你想使用最新的RMVPE人声音高提取算法,则你需要下载音高提取模型参数并放置于RVC根目录。
- 下载[rmvpe.pt](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.pt)
#### 下载 rmvpe 的 dml 环境(可选, A卡/I卡用户)
- 下载[rmvpe.onnx](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.onnx)
### 4. AMD显卡Rocm(可选, 仅Linux)
如果你想基于AMD的Rocm技术在Linux系统上运行RVC,请先在[这里](https://rocm.docs.amd.com/en/latest/deploy/linux/os-native/install.html)安装所需的驱动。
若你使用的是Arch Linux,可以使用pacman来安装所需驱动:
````
pacman -S rocm-hip-sdk rocm-opencl-sdk
````
对于某些型号的显卡,你可能需要额外配置如下的环境变量(如:RX6700XT):
````
export ROCM_PATH=/opt/rocm
export HSA_OVERRIDE_GFX_VERSION=10.3.0
````
同时确保你的当前用户处于`render``video`用户组内:
````
sudo usermod -aG render $USERNAME
sudo usermod -aG video $USERNAME
````
## 开始使用
### 直接启动
使用以下指令来启动 WebUI
```bash
python infer-web.py
```
若先前使用 Poetry 安装依赖,则可以通过以下方式启动WebUI
```bash
poetry run python infer-web.py
```
### 使用整合包
下载并解压`RVC-beta.7z`
#### Windows 用户
双击`go-web.bat`
#### MacOS 用户
```bash
sh ./run.sh
```
### 对于需要使用IPEX技术的I卡用户(仅Linux)
```bash
source /opt/intel/oneapi/setvars.sh
```
## 参考项目
+ [ContentVec](https://github.com/auspicious3000/contentvec/)
+ [VITS](https://github.com/jaywalnut310/vits)
+ [HIFIGAN](https://github.com/jik876/hifi-gan)
+ [Gradio](https://github.com/gradio-app/gradio)
+ [FFmpeg](https://github.com/FFmpeg/FFmpeg)
+ [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui)
+ [audio-slicer](https://github.com/openvpi/audio-slicer)
+ [Vocal pitch extraction:RMVPE](https://github.com/Dream-High/RMVPE)
+ The pretrained model is trained and tested by [yxlllc](https://github.com/yxlllc/RMVPE) and [RVC-Boss](https://github.com/RVC-Boss).
## 感谢所有贡献者作出的努力
<a href="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/graphs/contributors" target="_blank">
<img src="https://contrib.rocks/image?repo=RVC-Project/Retrieval-based-Voice-Conversion-WebUI" />
</a>
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# [Retrieval-based-Voice-Conversion-WebUI](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) Training notebook"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "ZFFCx5J80SGa"
},
"source": [
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GmFP6bN9dvOq"
},
"outputs": [],
"source": [
"# @title 查看显卡\n",
"!nvidia-smi"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jwu07JgqoFON"
},
"outputs": [],
"source": [
"# @title 挂载谷歌云盘\n",
"\n",
"from google.colab import drive\n",
"\n",
"drive.mount(\"/content/drive\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wjddIFr1oS3W"
},
"outputs": [],
"source": [
"# @title 安装依赖\n",
"!apt-get -y install build-essential python3-dev ffmpeg\n",
"!pip3 install --upgrade setuptools wheel\n",
"!pip3 install --upgrade pip\n",
"!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ge_97mfpgqTm"
},
"outputs": [],
"source": [
"# @title 克隆仓库\n",
"\n",
"!git clone --depth=1 -b stable https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI\n",
"%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
"!mkdir -p pretrained uvr5_weights"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "BLDEZADkvlw1"
},
"outputs": [],
"source": [
"# @title 更新仓库(一般无需执行)\n",
"!git pull"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "pqE0PrnuRqI2"
},
"outputs": [],
"source": [
"# @title 安装aria2\n",
"!apt -y install -qq aria2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UG3XpUwEomUz"
},
"outputs": [],
"source": [
"# @title 下载底模\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "HugjmZqZRuiF"
},
"outputs": [],
"source": [
"# @title 下载人声分离模型\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2RCaT9FTR0ej"
},
"outputs": [],
"source": [
"# @title 下载hubert_base\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# @title #下载rmvpe模型\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o rmvpe.pt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Mwk7Q0Loqzjx"
},
"outputs": [],
"source": [
"# @title 从谷歌云盘加载打包好的数据集到/content/dataset\n",
"\n",
"# @markdown 数据集位置\n",
"DATASET = (\n",
" \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" # @param {type:\"string\"}\n",
")\n",
"\n",
"!mkdir -p /content/dataset\n",
"!unzip -d /content/dataset -B {DATASET}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "PDlFxWHWEynD"
},
"outputs": [],
"source": [
"# @title 重命名数据集中的重名文件\n",
"!ls -a /content/dataset/\n",
"!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "7vh6vphDwO0b"
},
"outputs": [],
"source": [
"# @title 启动web\n",
"%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
"# %load_ext tensorboard\n",
"# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n",
"!python3 infer-web.py --colab --pycmd python3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FgJuNeAwx5Y_"
},
"outputs": [],
"source": [
"# @title 手动将训练后的模型文件备份到谷歌云盘\n",
"# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
"\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n",
"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OVQoLQJXS7WX"
},
"outputs": [],
"source": [
"# @title 从谷歌云盘恢复pth\n",
"# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
"\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 模型epoch\n",
"MODELEPOCH = 7500 # @param {type:\"integer\"}\n",
"\n",
"!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"\n",
"!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"!cp /content/drive/MyDrive/*.index /content/\n",
"!cp /content/drive/MyDrive/*.npy /content/\n",
"!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZKAyuKb9J6dz"
},
"outputs": [],
"source": [
"# @title 手动预处理(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 采样率\n",
"BITRATE = 48000 # @param {type:\"integer\"}\n",
"# @markdown 使用的进程数\n",
"THREADCOUNT = 8 # @param {type:\"integer\"}\n",
"\n",
"!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "CrxJqzAUKmPJ"
},
"outputs": [],
"source": [
"# @title 手动提取特征(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 使用的进程数\n",
"THREADCOUNT = 8 # @param {type:\"integer\"}\n",
"# @markdown 音高提取算法\n",
"ALGO = \"harvest\" # @param {type:\"string\"}\n",
"\n",
"!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n",
"\n",
"!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME} True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "IMLPLKOaKj58"
},
"outputs": [],
"source": [
"# @title 手动训练(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 使用的GPU\n",
"USEGPU = \"0\" # @param {type:\"string\"}\n",
"# @markdown 批大小\n",
"BATCHSIZE = 32 # @param {type:\"integer\"}\n",
"# @markdown 停止的epoch\n",
"MODELEPOCH = 3200 # @param {type:\"integer\"}\n",
"# @markdown 保存epoch间隔\n",
"EPOCHSAVE = 100 # @param {type:\"integer\"}\n",
"# @markdown 采样率\n",
"MODELSAMPLE = \"48k\" # @param {type:\"string\"}\n",
"# @markdown 是否缓存训练集\n",
"CACHEDATA = 1 # @param {type:\"integer\"}\n",
"# @markdown 是否仅保存最新的ckpt文件\n",
"ONLYLATEST = 0 # @param {type:\"integer\"}\n",
"\n",
"!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "haYA81hySuDl"
},
"outputs": [],
"source": [
"# @title 删除其它pth,只留选中的(慎点,仔细看代码)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 选中模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!echo \"备份选中的模型。。。\"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"\n",
"!echo \"正在删除。。。\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n",
"\n",
"!echo \"恢复选中的模型。。。\"\n",
"!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"\n",
"!echo \"删除完成\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "QhSiPTVPoIRh"
},
"outputs": [],
"source": [
"# @title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 选中模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!echo \"备份选中的模型。。。\"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"\n",
"!echo \"正在删除。。。\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n",
"\n",
"!echo \"恢复选中的模型。。。\"\n",
"!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"\n",
"!echo \"删除完成\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"private_outputs": true,
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"# [Retrieval-based-Voice-Conversion-WebUI](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) Training notebook"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "ZFFCx5J80SGa"
},
"source": [
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "GmFP6bN9dvOq"
},
"outputs": [],
"source": [
"# @title #查看显卡\n",
"!nvidia-smi"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jwu07JgqoFON"
},
"outputs": [],
"source": [
"# @title 挂载谷歌云盘\n",
"\n",
"from google.colab import drive\n",
"\n",
"drive.mount(\"/content/drive\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "wjddIFr1oS3W"
},
"outputs": [],
"source": [
"# @title #安装依赖\n",
"!apt-get -y install build-essential python3-dev ffmpeg\n",
"!pip3 install --upgrade setuptools wheel\n",
"!pip3 install --upgrade pip\n",
"!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ge_97mfpgqTm"
},
"outputs": [],
"source": [
"# @title #克隆仓库\n",
"\n",
"!mkdir Retrieval-based-Voice-Conversion-WebUI\n",
"%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
"!git init\n",
"!git remote add origin https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git\n",
"!git fetch origin cfd984812804ddc9247d65b14c82cd32e56c1133 --depth=1\n",
"!git reset --hard FETCH_HEAD"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "BLDEZADkvlw1"
},
"outputs": [],
"source": [
"# @title #更新仓库(一般无需执行)\n",
"!git pull"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "pqE0PrnuRqI2"
},
"outputs": [],
"source": [
"# @title #安装aria2\n",
"!apt -y install -qq aria2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "UG3XpUwEomUz"
},
"outputs": [],
"source": [
"# @title 下载底模\n",
"\n",
"# v1\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth\n",
"\n",
"# v2\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D40k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D48k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G40k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G48k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D40k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D48k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G32k.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G40k.pth\n",
"# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G48k.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "HugjmZqZRuiF"
},
"outputs": [],
"source": [
"# @title #下载人声分离模型\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "2RCaT9FTR0ej"
},
"outputs": [],
"source": [
"# @title #下载hubert_base\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# @title #下载rmvpe模型\n",
"!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o rmvpe.pt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "Mwk7Q0Loqzjx"
},
"outputs": [],
"source": [
"# @title #从谷歌云盘加载打包好的数据集到/content/dataset\n",
"\n",
"# @markdown 数据集位置\n",
"DATASET = (\n",
" \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" # @param {type:\"string\"}\n",
")\n",
"\n",
"!mkdir -p /content/dataset\n",
"!unzip -d /content/dataset -B {DATASET}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "PDlFxWHWEynD"
},
"outputs": [],
"source": [
"# @title #重命名数据集中的重名文件\n",
"!ls -a /content/dataset/\n",
"!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "7vh6vphDwO0b"
},
"outputs": [],
"source": [
"# @title #启动webui\n",
"%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
"# %load_ext tensorboard\n",
"# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n",
"!python3 infer-web.py --colab --pycmd python3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "FgJuNeAwx5Y_"
},
"outputs": [],
"source": [
"# @title #手动将训练后的模型文件备份到谷歌云盘\n",
"# @markdown #需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
"\n",
"# @markdown #模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown #模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n",
"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "OVQoLQJXS7WX"
},
"outputs": [],
"source": [
"# @title 从谷歌云盘恢复pth\n",
"# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
"\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 模型epoch\n",
"MODELEPOCH = 7500 # @param {type:\"integer\"}\n",
"\n",
"!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"\n",
"!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"!cp /content/drive/MyDrive/*.index /content/\n",
"!cp /content/drive/MyDrive/*.npy /content/\n",
"!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "ZKAyuKb9J6dz"
},
"outputs": [],
"source": [
"# @title 手动预处理(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 采样率\n",
"BITRATE = 48000 # @param {type:\"integer\"}\n",
"# @markdown 使用的进程数\n",
"THREADCOUNT = 8 # @param {type:\"integer\"}\n",
"\n",
"!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "CrxJqzAUKmPJ"
},
"outputs": [],
"source": [
"# @title 手动提取特征(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 使用的进程数\n",
"THREADCOUNT = 8 # @param {type:\"integer\"}\n",
"# @markdown 音高提取算法\n",
"ALGO = \"harvest\" # @param {type:\"string\"}\n",
"\n",
"!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n",
"\n",
"!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME} True"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "IMLPLKOaKj58"
},
"outputs": [],
"source": [
"# @title 手动训练(不推荐)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 使用的GPU\n",
"USEGPU = \"0\" # @param {type:\"string\"}\n",
"# @markdown 批大小\n",
"BATCHSIZE = 32 # @param {type:\"integer\"}\n",
"# @markdown 停止的epoch\n",
"MODELEPOCH = 3200 # @param {type:\"integer\"}\n",
"# @markdown 保存epoch间隔\n",
"EPOCHSAVE = 100 # @param {type:\"integer\"}\n",
"# @markdown 采样率\n",
"MODELSAMPLE = \"48k\" # @param {type:\"string\"}\n",
"# @markdown 是否缓存训练集\n",
"CACHEDATA = 1 # @param {type:\"integer\"}\n",
"# @markdown 是否仅保存最新的ckpt文件\n",
"ONLYLATEST = 0 # @param {type:\"integer\"}\n",
"\n",
"!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "haYA81hySuDl"
},
"outputs": [],
"source": [
"# @title 删除其它pth,只留选中的(慎点,仔细看代码)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 选中模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!echo \"备份选中的模型。。。\"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"\n",
"!echo \"正在删除。。。\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n",
"\n",
"!echo \"恢复选中的模型。。。\"\n",
"!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"\n",
"!echo \"删除完成\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "QhSiPTVPoIRh"
},
"outputs": [],
"source": [
"# @title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n",
"# @markdown 模型名\n",
"MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
"# @markdown 选中模型epoch\n",
"MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
"\n",
"!echo \"备份选中的模型。。。\"\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
"!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
"\n",
"!echo \"正在删除。。。\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
"!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n",
"\n",
"!echo \"恢复选中的模型。。。\"\n",
"!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
"!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
"\n",
"!echo \"删除完成\"\n",
"!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"private_outputs": true,
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
#api for 231006 release version by Xiaokai
import os
import sys
import json
import re
import time
import librosa
import torch
import numpy as np
import torch.nn.functional as F
import torchaudio.transforms as tat
import sounddevice as sd
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import threading
import uvicorn
import logging
# Initialize the logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Define FastAPI app
app = FastAPI()
class GUIConfig:
def __init__(self) -> None:
self.pth_path: str = ""
self.index_path: str = ""
self.pitch: int = 0
self.samplerate: int = 40000
self.block_time: float = 1.0 # s
self.buffer_num: int = 1
self.threhold: int = -60
self.crossfade_time: float = 0.05
self.extra_time: float = 2.5
self.I_noise_reduce = False
self.O_noise_reduce = False
self.rms_mix_rate = 0.0
self.index_rate = 0.3
self.f0method = "rmvpe"
self.sg_input_device = ""
self.sg_output_device = ""
class ConfigData(BaseModel):
pth_path: str
index_path: str
sg_input_device: str
sg_output_device: str
threhold: int = -60
pitch: int = 0
index_rate: float = 0.3
rms_mix_rate: float = 0.0
block_time: float = 0.25
crossfade_length: float = 0.05
extra_time: float = 2.5
n_cpu: int = 4
I_noise_reduce: bool = False
O_noise_reduce: bool = False
class AudioAPI:
def __init__(self) -> None:
self.gui_config = GUIConfig()
self.config = None # Initialize Config object as None
self.flag_vc = False
self.function = "vc"
self.delay_time = 0
self.rvc = None # Initialize RVC object as None
def load(self):
input_devices, output_devices, _, _ = self.get_devices()
try:
with open("configs/config.json", "r", encoding='utf-8') as j:
data = json.load(j)
data["rmvpe"] = True # Ensure rmvpe is the only f0method
if data["sg_input_device"] not in input_devices:
data["sg_input_device"] = input_devices[sd.default.device[0]]
if data["sg_output_device"] not in output_devices:
data["sg_output_device"] = output_devices[sd.default.device[1]]
except Exception as e:
logger.error(f"Failed to load configuration: {e}")
with open("configs/config.json", "w", encoding='utf-8') as j:
data = {
"pth_path": " ",
"index_path": " ",
"sg_input_device": input_devices[sd.default.device[0]],
"sg_output_device": output_devices[sd.default.device[1]],
"threhold": "-60",
"pitch": "0",
"index_rate": "0",
"rms_mix_rate": "0",
"block_time": "0.25",
"crossfade_length": "0.05",
"extra_time": "2.5",
"f0method": "rmvpe",
"use_jit": False,
}
data["rmvpe"] = True # Ensure rmvpe is the only f0method
json.dump(data, j, ensure_ascii=False)
return data
def set_values(self, values):
logger.info(f"Setting values: {values}")
if not values.pth_path.strip():
raise HTTPException(status_code=400, detail="Please select a .pth file")
if not values.index_path.strip():
raise HTTPException(status_code=400, detail="Please select an index file")
self.set_devices(values.sg_input_device, values.sg_output_device)
self.config.use_jit = False
self.gui_config.pth_path = values.pth_path
self.gui_config.index_path = values.index_path
self.gui_config.threhold = values.threhold
self.gui_config.pitch = values.pitch
self.gui_config.block_time = values.block_time
self.gui_config.crossfade_time = values.crossfade_length
self.gui_config.extra_time = values.extra_time
self.gui_config.I_noise_reduce = values.I_noise_reduce
self.gui_config.O_noise_reduce = values.O_noise_reduce
self.gui_config.rms_mix_rate = values.rms_mix_rate
self.gui_config.index_rate = values.index_rate
self.gui_config.n_cpu = values.n_cpu
self.gui_config.f0method = "rmvpe"
return True
def start_vc(self):
torch.cuda.empty_cache()
self.flag_vc = True
self.rvc = rvc_for_realtime.RVC(
self.gui_config.pitch,
self.gui_config.pth_path,
self.gui_config.index_path,
self.gui_config.index_rate,
0,
0,
0,
self.config,
self.rvc if self.rvc else None,
)
self.gui_config.samplerate = self.rvc.tgt_sr
self.zc = self.rvc.tgt_sr // 100
self.block_frame = (
int(
np.round(
self.gui_config.block_time
* self.gui_config.samplerate
/ self.zc
)
)
* self.zc
)
self.block_frame_16k = 160 * self.block_frame // self.zc
self.crossfade_frame = (
int(
np.round(
self.gui_config.crossfade_time
* self.gui_config.samplerate
/ self.zc
)
)
* self.zc
)
self.sola_search_frame = self.zc
self.extra_frame = (
int(
np.round(
self.gui_config.extra_time
* self.gui_config.samplerate
/ self.zc
)
)
* self.zc
)
self.input_wav = torch.zeros(
self.extra_frame + self.crossfade_frame + self.sola_search_frame + self.block_frame,
device=self.config.device,
dtype=torch.float32,
)
self.input_wav_res = torch.zeros(
160 * self.input_wav.shape[0] // self.zc,
device=self.config.device,
dtype=torch.float32,
)
self.pitch = np.zeros(self.input_wav.shape[0] // self.zc, dtype="int32")
self.pitchf = np.zeros(self.input_wav.shape[0] // self.zc, dtype="float64")
self.sola_buffer = torch.zeros(self.crossfade_frame, device=self.config.device, dtype=torch.float32)
self.nr_buffer = self.sola_buffer.clone()
self.output_buffer = self.input_wav.clone()
self.res_buffer = torch.zeros(2 * self.zc, device=self.config.device, dtype=torch.float32)
self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0]
self.fade_in_window = (
torch.sin(0.5 * np.pi * torch.linspace(0.0, 1.0, steps=self.crossfade_frame, device=self.config.device, dtype=torch.float32)) ** 2
)
self.fade_out_window = 1 - self.fade_in_window
self.resampler = tat.Resample(
orig_freq=self.gui_config.samplerate,
new_freq=16000,
dtype=torch.float32,
).to(self.config.device)
self.tg = TorchGate(
sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9
).to(self.config.device)
thread_vc = threading.Thread(target=self.soundinput)
thread_vc.start()
def soundinput(self):
channels = 1 if sys.platform == "darwin" else 2
with sd.Stream(
channels=channels,
callback=self.audio_callback,
blocksize=self.block_frame,
samplerate=self.gui_config.samplerate,
dtype="float32",
) as stream:
global stream_latency
stream_latency = stream.latency[-1]
while self.flag_vc:
time.sleep(self.gui_config.block_time)
logger.info("Audio block passed.")
logger.info("Ending VC")
def audio_callback(self, indata: np.ndarray, outdata: np.ndarray, frames, times, status):
start_time = time.perf_counter()
indata = librosa.to_mono(indata.T)
if self.gui_config.threhold > -60:
rms = librosa.feature.rms(y=indata, frame_length=4 * self.zc, hop_length=self.zc)
db_threhold = (librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold)
for i in range(db_threhold.shape[0]):
if db_threhold[i]:
indata[i * self.zc : (i + 1) * self.zc] = 0
self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(self.config.device)
self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
if self.gui_config.I_noise_reduce and self.function == "vc":
input_wav = self.input_wav[-self.crossfade_frame - self.block_frame - 2 * self.zc :]
input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2 * self.zc :]
input_wav[: self.crossfade_frame] *= self.fade_in_window
input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window
self.nr_buffer[:] = input_wav[-self.crossfade_frame :]
input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame]))
self.res_buffer[:] = input_wav[-2 * self.zc :]
self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(input_wav)[160:]
else:
self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(self.input_wav[-self.block_frame - 2 * self.zc :])[160:]
if self.function == "vc":
f0_extractor_frame = self.block_frame_16k + 800
if self.gui_config.f0method == "rmvpe":
f0_extractor_frame = (5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160)
infer_wav = self.rvc.infer(
self.input_wav_res,
self.input_wav_res[-f0_extractor_frame:].cpu().numpy(),
self.block_frame_16k,
self.valid_rate,
self.pitch,
self.pitchf,
self.gui_config.f0method,
)
infer_wav = infer_wav[-self.crossfade_frame - self.sola_search_frame - self.block_frame :]
else:
infer_wav = self.input_wav[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].clone()
if (self.gui_config.O_noise_reduce and self.function == "vc") or (self.gui_config.I_noise_reduce and self.function == "im"):
self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
self.output_buffer[-self.block_frame :] = infer_wav[-self.block_frame :]
infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
if self.gui_config.rms_mix_rate < 1 and self.function == "vc":
rms1 = librosa.feature.rms(y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :].cpu().numpy(), frame_length=640, hop_length=160)
rms1 = torch.from_numpy(rms1).to(self.config.device)
rms1 = F.interpolate(rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear", align_corners=True)[0, 0, :-1]
rms2 = librosa.feature.rms(y=infer_wav[:].cpu().numpy(), frame_length=4 * self.zc, hop_length=self.zc)
rms2 = torch.from_numpy(rms2).to(self.config.device)
rms2 = F.interpolate(rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear", align_corners=True)[0, 0, :-1]
rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate))
conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
cor_den = torch.sqrt(F.conv1d(conv_input**2, torch.ones(1, 1, self.crossfade_frame, device=self.config.device)) + 1e-8)
if sys.platform == "darwin":
_, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
sola_offset = sola_offset.item()
else:
sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
logger.info(f"sola_offset = {sola_offset}")
infer_wav = infer_wav[sola_offset : sola_offset + self.block_frame + self.crossfade_frame]
infer_wav[: self.crossfade_frame] *= self.fade_in_window
infer_wav[: self.crossfade_frame] += self.sola_buffer * self.fade_out_window
self.sola_buffer[:] = infer_wav[-self.crossfade_frame :]
if sys.platform == "darwin":
outdata[:] = infer_wav[: -self.crossfade_frame].cpu().numpy()[:, np.newaxis]
else:
outdata[:] = infer_wav[: -self.crossfade_frame].repeat(2, 1).t().cpu().numpy()
total_time = time.perf_counter() - start_time
logger.info(f"Infer time: {total_time:.2f}")
def get_devices(self, update: bool = True):
if update:
sd._terminate()
sd._initialize()
devices = sd.query_devices()
hostapis = sd.query_hostapis()
for hostapi in hostapis:
for device_idx in hostapi["devices"]:
devices[device_idx]["hostapi_name"] = hostapi["name"]
input_devices = [
f"{d['name']} ({d['hostapi_name']})"
for d in devices
if d["max_input_channels"] > 0
]
output_devices = [
f"{d['name']} ({d['hostapi_name']})"
for d in devices
if d["max_output_channels"] > 0
]
input_devices_indices = [
d["index"] if "index" in d else d["name"]
for d in devices
if d["max_input_channels"] > 0
]
output_devices_indices = [
d["index"] if "index" in d else d["name"]
for d in devices
if d["max_output_channels"] > 0
]
return (
input_devices,
output_devices,
input_devices_indices,
output_devices_indices,
)
def set_devices(self, input_device, output_device):
(
input_devices,
output_devices,
input_device_indices,
output_device_indices,
) = self.get_devices()
logger.debug(f"Available input devices: {input_devices}")
logger.debug(f"Available output devices: {output_devices}")
logger.debug(f"Selected input device: {input_device}")
logger.debug(f"Selected output device: {output_device}")
if input_device not in input_devices:
logger.error(f"Input device '{input_device}' is not in the list of available devices")
raise HTTPException(status_code=400, detail=f"Input device '{input_device}' is not available")
if output_device not in output_devices:
logger.error(f"Output device '{output_device}' is not in the list of available devices")
raise HTTPException(status_code=400, detail=f"Output device '{output_device}' is not available")
sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
sd.default.device[1] = output_device_indices[output_devices.index(output_device)]
logger.info(f"Input device set to {sd.default.device[0]}: {input_device}")
logger.info(f"Output device set to {sd.default.device[1]}: {output_device}")
audio_api = AudioAPI()
@app.get("/inputDevices", response_model=list)
def get_input_devices():
try:
input_devices, _, _, _ = audio_api.get_devices()
return input_devices
except Exception as e:
logger.error(f"Failed to get input devices: {e}")
raise HTTPException(status_code=500, detail="Failed to get input devices")
@app.get("/outputDevices", response_model=list)
def get_output_devices():
try:
_, output_devices, _, _ = audio_api.get_devices()
return output_devices
except Exception as e:
logger.error(f"Failed to get output devices: {e}")
raise HTTPException(status_code=500, detail="Failed to get output devices")
@app.post("/config")
def configure_audio(config_data: ConfigData):
try:
logger.info(f"Configuring audio with data: {config_data}")
if audio_api.set_values(config_data):
settings = config_data.dict()
settings["use_jit"] = False
settings["f0method"] = "rmvpe"
with open("configs/config.json", "w", encoding='utf-8') as j:
json.dump(settings, j, ensure_ascii=False)
logger.info("Configuration set successfully")
return {"message": "Configuration set successfully"}
except HTTPException as e:
logger.error(f"Configuration error: {e.detail}")
raise
except Exception as e:
logger.error(f"Configuration failed: {e}")
raise HTTPException(status_code=400, detail=f"Configuration failed: {e}")
@app.post("/start")
def start_conversion():
try:
if not audio_api.flag_vc:
audio_api.start_vc()
return {"message": "Audio conversion started"}
else:
logger.warning("Audio conversion already running")
raise HTTPException(status_code=400, detail="Audio conversion already running")
except HTTPException as e:
logger.error(f"Start conversion error: {e.detail}")
raise
except Exception as e:
logger.error(f"Failed to start conversion: {e}")
raise HTTPException(status_code=500, detail=f"Failed to start conversion: {e}")
@app.post("/stop")
def stop_conversion():
try:
if audio_api.flag_vc:
audio_api.flag_vc = False
global stream_latency
stream_latency = -1
return {"message": "Audio conversion stopped"}
else:
logger.warning("Audio conversion not running")
raise HTTPException(status_code=400, detail="Audio conversion not running")
except HTTPException as e:
logger.error(f"Stop conversion error: {e.detail}")
raise
except Exception as e:
logger.error(f"Failed to stop conversion: {e}")
raise HTTPException(status_code=500, detail=f"Failed to stop conversion: {e}")
if __name__ == "__main__":
if sys.platform == "win32":
from multiprocessing import freeze_support
freeze_support()
load_dotenv()
os.environ["OMP_NUM_THREADS"] = "4"
if sys.platform == "darwin":
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from tools.torchgate import TorchGate
import tools.rvc_for_realtime as rvc_for_realtime
from configs.config import Config
audio_api.config = Config()
uvicorn.run(app, host="0.0.0.0", port=6242)
This diff is collapsed.
*
!.gitignore
!hubert_inputs.pth
\ No newline at end of file
*
!.gitignore
!rmvpe_inputs.pth
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment