Commit 8d03db9a authored by wanglch's avatar wanglch
Browse files

Initial commit

parents
Pipeline #1602 canceled with stages
{
"1089_134686_000002_000000": [
"hello, my name is Jack. What is your name?"
]
}
\ No newline at end of file
# set random seed, so that you may reproduce your result.
__set_seed1: !apply:random.seed [1986]
__set_seed2: !apply:numpy.random.seed [1986]
__set_seed3: !apply:torch.manual_seed [1986]
__set_seed4: !apply:torch.cuda.manual_seed_all [1986]
# fixed params
sample_rate: 22050
text_encoder_input_size: 512
llm_input_size: 1024
llm_output_size: 1024
spk_embed_dim: 192
# model params
# for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
# for system/third_party class/function, we do not require this.
llm: !new:cosyvoice.llm.llm.TransformerLM
text_encoder_input_size: !ref <text_encoder_input_size>
llm_input_size: !ref <llm_input_size>
llm_output_size: !ref <llm_output_size>
text_token_size: 51866
speech_token_size: 4096
length_normalized_loss: True
lsm_weight: 0
spk_embed_dim: !ref <spk_embed_dim>
text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
input_size: !ref <text_encoder_input_size>
output_size: 1024
attention_heads: 8
linear_units: 2048
num_blocks: 3
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0
normalize_before: True
input_layer: 'linear'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
use_cnn_module: False
macaron_style: False
use_dynamic_chunk: False
use_dynamic_left_chunk: False
static_chunk_size: 1
llm: !new:cosyvoice.transformer.encoder.TransformerEncoder
input_size: !ref <llm_input_size>
output_size: !ref <llm_output_size>
attention_heads: 8
linear_units: 2048
num_blocks: 7
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0
input_layer: 'linear_legacy'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
static_chunk_size: 1
flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
input_size: 512
output_size: 80
spk_embed_dim: !ref <spk_embed_dim>
output_type: 'mel'
vocab_size: 4096
input_frame_rate: 50
only_mask_loss: True
encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
output_size: 512
attention_heads: 4
linear_units: 1024
num_blocks: 3
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0.1
normalize_before: True
input_layer: 'linear'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
input_size: 512
use_cnn_module: False
macaron_style: False
length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator
channels: 80
sampling_ratios: [1, 1, 1, 1]
decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM
in_channels: 240
n_spks: 1
spk_emb_dim: 80
cfm_params: !new:omegaconf.DictConfig
content:
sigma_min: 1e-06
solver: 'euler'
t_scheduler: 'cosine'
training_cfg_rate: 0.2
inference_cfg_rate: 0.7
reg_loss_type: 'l1'
estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder
in_channels: 320
out_channels: 80
channels: [256, 256]
dropout: 0
attention_head_dim: 64
n_blocks: 4
num_mid_blocks: 8
num_heads: 8
act_fn: 'gelu'
hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
in_channels: 80
base_channels: 512
nb_harmonics: 8
sampling_rate: !ref <sample_rate>
nsf_alpha: 0.1
nsf_sigma: 0.003
nsf_voiced_threshold: 10
upsample_rates: [8, 8]
upsample_kernel_sizes: [16, 16]
istft_params:
n_fft: 16
hop_len: 4
resblock_kernel_sizes: [3, 7, 11]
resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
source_resblock_kernel_sizes: [7, 11]
source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
lrelu_slope: 0.1
audio_limit: 0.99
f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
num_class: 1
in_channels: 80
cond_channels: 512
# processor functions
parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
get_tokenizer: !name:whisper.tokenizer.get_tokenizer
multilingual: True
num_languages: 100
language: 'en'
task: 'transcribe'
allowed_special: 'all'
tokenize: !name:cosyvoice.dataset.processor.tokenize
get_tokenizer: !ref <get_tokenizer>
allowed_special: !ref <allowed_special>
filter: !name:cosyvoice.dataset.processor.filter
max_length: 40960
min_length: 0
token_max_length: 200
token_min_length: 1
resample: !name:cosyvoice.dataset.processor.resample
resample_rate: !ref <sample_rate>
feat_extractor: !name:matcha.utils.audio.mel_spectrogram
n_fft: 1024
num_mels: 80
sampling_rate: !ref <sample_rate>
hop_size: 256
win_size: 1024
fmin: 0
fmax: 8000
center: False
compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
feat_extractor: !ref <feat_extractor>
parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
normalize: True
shuffle: !name:cosyvoice.dataset.processor.shuffle
shuffle_size: 1000
sort: !name:cosyvoice.dataset.processor.sort
sort_size: 500 # sort_size should be less than shuffle_size
batch: !name:cosyvoice.dataset.processor.batch
batch_type: 'dynamic'
max_frames_in_batch: 12000
padding: !name:cosyvoice.dataset.processor.padding
use_spk_embedding: False # change to True during sft
# dataset processor pipeline
data_pipeline: [
!ref <parquet_opener>,
!ref <tokenize>,
!ref <filter>,
!ref <resample>,
!ref <compute_fbank>,
!ref <parse_embedding>,
!ref <shuffle>,
!ref <sort>,
!ref <batch>,
!ref <padding>,
]
# train conf
train_conf:
optim: adam
optim_conf:
lr: 0.002 # change to 0.001 if you want to train flow from scratch
scheduler: warmuplr
scheduler_conf:
warmup_steps: 25000
max_epoch: 200
grad_clip: 5
accum_grad: 2
log_interval: 100
save_per_step: -1
\ No newline at end of file
# set random seed, so that you may reproduce your result.
__set_seed1: !apply:random.seed [1986]
__set_seed2: !apply:numpy.random.seed [1986]
__set_seed3: !apply:torch.manual_seed [1986]
__set_seed4: !apply:torch.cuda.manual_seed_all [1986]
# fixed params
sample_rate: 22050
text_encoder_input_size: 512
llm_input_size: 1024
llm_output_size: 1024
spk_embed_dim: 192
# model params
# for all class/function included in this repo, we use !<name> or !<new> for intialization, so that user may find all corresponding class/function according to one single yaml.
# for system/third_party class/function, we do not require this.
llm: !new:cosyvoice.llm.llm.TransformerLM
text_encoder_input_size: !ref <text_encoder_input_size>
llm_input_size: !ref <llm_input_size>
llm_output_size: !ref <llm_output_size>
text_token_size: 51866
speech_token_size: 4096
length_normalized_loss: True
lsm_weight: 0
spk_embed_dim: !ref <spk_embed_dim>
text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
input_size: !ref <text_encoder_input_size>
output_size: 1024
attention_heads: 16
linear_units: 4096
num_blocks: 6
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0
normalize_before: True
input_layer: 'linear'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
use_cnn_module: False
macaron_style: False
use_dynamic_chunk: False
use_dynamic_left_chunk: False
static_chunk_size: 1
llm: !new:cosyvoice.transformer.encoder.TransformerEncoder
input_size: !ref <llm_input_size>
output_size: !ref <llm_output_size>
attention_heads: 16
linear_units: 4096
num_blocks: 14
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0
input_layer: 'linear_legacy'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
static_chunk_size: 1
flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec
input_size: 512
output_size: 80
spk_embed_dim: !ref <spk_embed_dim>
output_type: 'mel'
vocab_size: 4096
input_frame_rate: 50
only_mask_loss: True
encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder
output_size: 512
attention_heads: 8
linear_units: 2048
num_blocks: 6
dropout_rate: 0.1
positional_dropout_rate: 0.1
attention_dropout_rate: 0.1
normalize_before: True
input_layer: 'linear'
pos_enc_layer_type: 'rel_pos_espnet'
selfattention_layer_type: 'rel_selfattn'
input_size: 512
use_cnn_module: False
macaron_style: False
length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator
channels: 80
sampling_ratios: [1, 1, 1, 1]
decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM
in_channels: 240
n_spks: 1
spk_emb_dim: 80
cfm_params: !new:omegaconf.DictConfig
content:
sigma_min: 1e-06
solver: 'euler'
t_scheduler: 'cosine'
training_cfg_rate: 0.2
inference_cfg_rate: 0.7
reg_loss_type: 'l1'
estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder
in_channels: 320
out_channels: 80
channels: [256, 256]
dropout: 0
attention_head_dim: 64
n_blocks: 4
num_mid_blocks: 12
num_heads: 8
act_fn: 'gelu'
hift: !new:cosyvoice.hifigan.generator.HiFTGenerator
in_channels: 80
base_channels: 512
nb_harmonics: 8
sampling_rate: !ref <sample_rate>
nsf_alpha: 0.1
nsf_sigma: 0.003
nsf_voiced_threshold: 10
upsample_rates: [8, 8]
upsample_kernel_sizes: [16, 16]
istft_params:
n_fft: 16
hop_len: 4
resblock_kernel_sizes: [3, 7, 11]
resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]]
source_resblock_kernel_sizes: [7, 11]
source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]]
lrelu_slope: 0.1
audio_limit: 0.99
f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor
num_class: 1
in_channels: 80
cond_channels: 512
# processor functions
parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener
get_tokenizer: !name:whisper.tokenizer.get_tokenizer
multilingual: True
num_languages: 100
language: 'en'
task: 'transcribe'
allowed_special: 'all'
tokenize: !name:cosyvoice.dataset.processor.tokenize
get_tokenizer: !ref <get_tokenizer>
allowed_special: !ref <allowed_special>
filter: !name:cosyvoice.dataset.processor.filter
max_length: 40960
min_length: 0
token_max_length: 200
token_min_length: 1
resample: !name:cosyvoice.dataset.processor.resample
resample_rate: !ref <sample_rate>
feat_extractor: !name:matcha.utils.audio.mel_spectrogram
n_fft: 1024
num_mels: 80
sampling_rate: !ref <sample_rate>
hop_size: 256
win_size: 1024
fmin: 0
fmax: 8000
center: False
compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank
feat_extractor: !ref <feat_extractor>
parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding
normalize: True
shuffle: !name:cosyvoice.dataset.processor.shuffle
shuffle_size: 1000
sort: !name:cosyvoice.dataset.processor.sort
sort_size: 500 # sort_size should be less than shuffle_size
batch: !name:cosyvoice.dataset.processor.batch
batch_type: 'dynamic'
max_frames_in_batch: 2000
padding: !name:cosyvoice.dataset.processor.padding
use_spk_embedding: False # change to True during sft
# dataset processor pipeline
data_pipeline: [
!ref <parquet_opener>,
!ref <tokenize>,
!ref <filter>,
!ref <resample>,
!ref <compute_fbank>,
!ref <parse_embedding>,
!ref <shuffle>,
!ref <sort>,
!ref <batch>,
!ref <padding>,
]
# train conf
train_conf:
optim: adam
optim_conf:
lr: 0.001 # change to 1e-5 during sft
scheduler: warmuplr # change to constantlr during sft
scheduler_conf:
warmup_steps: 2500
max_epoch: 200
grad_clip: 5
accum_grad: 2
log_interval: 100
save_per_step: -1
\ No newline at end of file
{
"train_micro_batch_size_per_gpu": 1,
"gradient_accumulation_steps": 1,
"steps_per_print": 100,
"gradient_clipping": 5,
"fp16": {
"enabled": false,
"auto_cast": false,
"loss_scale": 0,
"initial_scale_power": 16,
"loss_scale_window": 256,
"hysteresis": 2,
"consecutive_hysteresis": false,
"min_loss_scale": 1
},
"bf16": {
"enabled": false
},
"zero_force_ds_cpu_optimizer": false,
"zero_optimization": {
"stage": 2,
"offload_optimizer": {
"device": "none",
"pin_memory": true
},
"allgather_partitions": true,
"allgather_bucket_size": 5e8,
"overlap_comm": false,
"reduce_scatter": true,
"reduce_bucket_size": 5e8,
"contiguous_gradients" : true
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": 0.001,
"weight_decay": 0.0001,
"torch_adam": true,
"adam_w_mode": true
}
}
}
\ No newline at end of file
../../../cosyvoice
\ No newline at end of file
#!/bin/bash
# Copyright 2014 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
remove_archive=false
if [ "$1" == --remove-archive ]; then
remove_archive=true
shift
fi
if [ $# -ne 3 ]; then
echo "Usage: $0 [--remove-archive] <data-base> <url-base> <corpus-part>"
echo "e.g.: $0 /export/a15/vpanayotov/data www.openslr.org/resources/11 dev-clean"
echo "With --remove-archive it will remove the archive after successfully un-tarring it."
echo "<corpus-part> can be one of: dev-clean, test-clean, dev-other, test-other,"
echo " train-clean-100, train-clean-360, train-other-500."
exit 1
fi
data=$1
url=$2
part=$3
if [ ! -d "$data" ]; then
echo "$0: no such directory $data"
exit 1
fi
part_ok=false
list="dev_set test_set train_set"
for x in $list; do
if [ "$part" == $x ]; then part_ok=true; fi
done
if ! $part_ok; then
echo "$0: expected <corpus-part> to be one of $list, but got '$part'"
exit 1
fi
if [ -z "$url" ]; then
echo "$0: empty URL base."
exit 1
fi
if [ -f $data/.$part.complete ]; then
echo "$0: data part $part was already successfully extracted, nothing to do."
exit 0
fi
# sizes of the archive files in bytes. This is some older versions.
sizes_old="1035537823 2201936013 52627842921"
# sizes_new is the archive file sizes of the final release. Some of these sizes are of
# things we probably won't download.
sizes_new="3886385"
if [ -f $data/$part.tar.gz ]; then
size=$(/bin/ls -l $data/$part.tar.gz | awk '{print $5}')
size_ok=false
for s in $sizes_old $sizes_new; do if [ $s == $size ]; then size_ok=true; fi; done
if ! $size_ok; then
echo "$0: removing existing file $data/$part.tar.gz because its size in bytes $size"
echo "does not equal the size of one of the archives."
rm $data/$part.tar.gz
else
echo "$data/$part.tar.gz exists and appears to be complete."
fi
fi
if [ ! -f $data/$part.tar.gz ]; then
if ! which wget >/dev/null; then
echo "$0: wget is not installed."
exit 1
fi
full_url=$url/$part.tar.gz
echo "$0: downloading data from $full_url. This may take some time, please be patient."
if ! wget -P $data --no-check-certificate $full_url; then
echo "$0: error executing wget $full_url"
exit 1
fi
fi
if ! tar -C $data -xvzf $data/$part.tar.gz; then
echo "$0: error un-tarring archive $data/$part.tar.gz"
exit 1
fi
touch $data/.$part.complete
echo "$0: Successfully downloaded and un-tarred $data/$part.tar.gz"
if $remove_archive; then
echo "$0: removing $data/$part.tar.gz file since --remove-archive option was supplied."
rm $data/$part.tar.gz
fi
import argparse
import logging
import os
from tqdm import tqdm
logger = logging.getLogger()
def main():
utt2wav, utt2text, utt2spk, spk2utt = {}, {}, {}, {}
with open(os.path.join(args.src_dir, "TRANS.txt"), "r") as f:
lines = f.readlines()[1:]
lines = [l.split('\t') for l in lines]
for wav, spk, content in tqdm(lines):
wav, spk, content = wav.strip(), spk.strip(), content.strip()
content = content.replace('[FIL]', '')
content = content.replace('[SPK]', '')
wav = os.path.join(args.src_dir, spk, wav)
if not os.path.exists(wav):
continue
utt = os.path.basename(wav).replace('.wav', '')
utt2wav[utt] = wav
utt2text[utt] = content
utt2spk[utt] = spk
if spk not in spk2utt:
spk2utt[spk] = []
spk2utt[spk].append(utt)
with open('{}/wav.scp'.format(args.des_dir), 'w') as f:
for k, v in utt2wav.items():
f.write('{} {}\n'.format(k, v))
with open('{}/text'.format(args.des_dir), 'w') as f:
for k, v in utt2text.items():
f.write('{} {}\n'.format(k, v))
with open('{}/utt2spk'.format(args.des_dir), 'w') as f:
for k, v in utt2spk.items():
f.write('{} {}\n'.format(k, v))
with open('{}/spk2utt'.format(args.des_dir), 'w') as f:
for k, v in spk2utt.items():
f.write('{} {}\n'.format(k, ' '.join(v)))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir',
type=str)
parser.add_argument('--des_dir',
type=str)
args = parser.parse_args()
main()
# NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C
export PYTHONIOENCODING=UTF-8
export PYTHONPATH=../../../:../../../third_party/Matcha-TTS:$PYTHONPATH
#!/bin/bash
# Copyright 2024 Alibaba Inc. All Rights Reserved.
. ./path.sh || exit 1;
stage=-1
stop_stage=3
data_url=www.openslr.org/resources/68
data_dir=/mnt/hengwu.zty/data/tts/openslr/magicdata-read
pretrained_model_dir=../../../pretrained_models/CosyVoice-300M
if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then
echo "Data Download"
for part in dev_set test_set train_set; do
local/download_and_untar.sh ${data_dir} ${data_url} ${part}
done
fi
if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then
echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt"
for x in dev test train; do
mkdir -p data/$x
python local/prepare_data.py --src_dir $data_dir/$x --des_dir data/$x
done
fi
if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then
echo "Extract campplus speaker embedding, you will get spk2embedding.pt and utt2embedding.pt in data/$x dir"
for x in dev test train; do
tools/extract_embedding.py --dir data/$x \
--onnx_path $pretrained_model_dir/campplus.onnx
done
fi
if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then
echo "Extract discrete speech token, you will get utt2speech_token.pt in data/$x dir"
for x in dev test train; do
tools/extract_speech_token.py --dir data/$x \
--onnx_path $pretrained_model_dir/speech_tokenizer_v1.onnx
done
fi
if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then
echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt"
for x in dev test train; do
mkdir -p data/$x/parquet
tools/make_parquet_list.py --num_utts_per_parquet 1000 \
--num_processes 10 \
--src_dir data/$x \
--des_dir data/$x/parquet
done
fi
# inference
if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then
echo "Run inference. Please make sure utt in tts_text is in prompt_data"
for mode in sft zero_shot; do
python cosyvoice/bin/inference.py --mode $mode \
--gpu 0 \
--config conf/cosyvoice.yaml \
--prompt_data data/test/parquet/data.list \
--prompt_utt2data data/test/parquet/utt2data.list \
--tts_text `pwd`/tts_text.json \
--llm_model $pretrained_model_dir/llm.pt \
--flow_model $pretrained_model_dir/flow.pt \
--hifigan_model $pretrained_model_dir/hift.pt \
--result_dir `pwd`/exp/cosyvoice/test/$mode
done
fi
# train llm
export CUDA_VISIBLE_DEVICES="0,1,2,3"
num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}')
job_id=1986
dist_backend="nccl"
num_workers=2
prefetch=100
train_engine=torch_ddp
if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then
echo "Run train. We only support llm traning for now. If your want to train from scratch, please use conf/cosyvoice.fromscratch.yaml"
if [ $train_engine == 'deepspeed' ]; then
echo "Notice deepspeed has its own optimizer config. Modify conf/ds_stage2.json if necessary"
fi
cp data/train/parquet/data.list data/train.data.list
cp data/dev/parquet/data.list data/dev.data.list
for model in llm; do
torchrun --nnodes=1 --nproc_per_node=$num_gpus \
--rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \
cosyvoice/bin/train.py \
--train_engine $train_engine \
--config conf/cosyvoice.yaml \
--train_data data/train.data.list \
--cv_data data/dev.data.list \
--model $model \
--checkpoint $pretrained_model_dir/$model.pt \
--model_dir `pwd`/exp/cosyvoice/$model/$train_engine \
--tensorboard_dir `pwd`/tensorboard/cosyvoice/$model/$train_engine \
--ddp.dist_backend $dist_backend \
--num_workers ${num_workers} \
--prefetch ${prefetch} \
--pin_memory \
--deepspeed_config ./conf/ds_stage2.json \
--deepspeed.save_states model+optimizer
done
fi
\ No newline at end of file
../../../tools
\ No newline at end of file
{
"38_5718_20170915093303": [
"我想这出最好歌曲把歌词发到网上请别人帮我作曲急急",
"叫他明天早上差五分儿九点去机场"
],
"38_5721_20170915091235": [
"变温室调到零下两度档",
"交谈中请勿轻信汇款信息陌生电话请勿使用外挂软件"
],
"38_5733_20170915130323": [
"这是老鹰乐队的一首经典歌曲",
"我急用这段音乐我自己找到一段但是有现场杂音"
],
"38_5836_20170916221414": [
"给我播一个陶喆的专辑",
"这套餐好贵呀我发这么多短信贵死了"
]
}
\ No newline at end of file
# 模型唯一标识
modelCode = 919
# 模型名称
modelName=cosyvoice_pytorch
# 模型描述
modelDescription=CosyVoice多语言、音色和情感控制模型
# 应用场景
appScenario=推理,语音生成,金融,教育,交通,政府
# 框架类型
frameType=pytorch
FROM pytorch/pytorch:2.0.1-cuda11.7-cudnn8-runtime
ENV DEBIAN_FRONTEND=noninteractive
WORKDIR /opt/CosyVoice
RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list
RUN apt-get update -y
RUN apt-get -y install git unzip git-lfs
RUN git lfs install
RUN git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git
# here we use python==3.10 because we cannot find an image which have both python3.8 and torch2.0.1-cu118 installed
RUN cd CosyVoice && pip3 install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com
RUN cd CosyVoice/runtime/python/grpc && python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. cosyvoice.proto
\ No newline at end of file
import argparse
import logging
import requests
def saveResponse(path, response):
# 以二进制写入模式打开文件
with open(path, 'wb') as file:
# 将响应的二进制内容写入文件
file.write(response.content)
def main():
api = args.api_base
if args.mode == 'sft':
url = api + "/api/inference/sft"
payload={
'tts': args.tts_text,
'role': args.spk_id
}
response = requests.request("POST", url, data=payload)
saveResponse(args.tts_wav, response)
elif args.mode == 'zero_shot':
url = api + "/api/inference/zero-shot"
payload={
'tts': args.tts_text,
'prompt': args.prompt_text
}
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
response = requests.request("POST", url, data=payload, files=files)
saveResponse(args.tts_wav, response)
elif args.mode == 'cross_lingual':
url = api + "/api/inference/cross-lingual"
payload={
'tts': args.tts_text,
}
files=[('audio', ('prompt_audio.wav', open(args.prompt_wav,'rb'), 'application/octet-stream'))]
response = requests.request("POST", url, data=payload, files=files)
saveResponse(args.tts_wav, response)
else:
url = api + "/api/inference/instruct"
payload = {
'tts': args.tts_text,
'role': args.spk_id,
'instruct': args.instruct_text
}
response = requests.request("POST", url, data=payload)
saveResponse(args.tts_wav, response)
logging.info("Response save to {}", args.tts_wav)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--api_base',
type=str,
default='http://127.0.0.1:6006')
parser.add_argument('--mode',
default='sft',
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
help='request mode')
parser.add_argument('--tts_text',
type=str,
default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
parser.add_argument('--spk_id',
type=str,
default='中文女')
parser.add_argument('--prompt_text',
type=str,
default='希望你以后能够做的比我还好呦。')
parser.add_argument('--prompt_wav',
type=str,
default='../../../zero_shot_prompt.wav')
parser.add_argument('--instruct_text',
type=str,
default='Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.')
parser.add_argument('--tts_wav',
type=str,
default='demo.wav')
args = parser.parse_args()
prompt_sr, target_sr = 16000, 22050
main()
# Set inference model
# export MODEL_DIR=pretrained_models/CosyVoice-300M-Instruct
# For development
# fastapi dev --port 6006 fastapi_server.py
# For production deployment
# fastapi run --port 6006 fastapi_server.py
import os
import sys
import io,time
from fastapi import FastAPI, Response, File, UploadFile, Form
from fastapi.responses import HTMLResponse
from fastapi.middleware.cors import CORSMiddleware #引入 CORS中间件模块
from contextlib import asynccontextmanager
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../../..'.format(ROOT_DIR))
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
from cosyvoice.cli.cosyvoice import CosyVoice
from cosyvoice.utils.file_utils import load_wav
import numpy as np
import torch
import torchaudio
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
class LaunchFailed(Exception):
pass
@asynccontextmanager
async def lifespan(app: FastAPI):
model_dir = os.getenv("MODEL_DIR", "pretrained_models/CosyVoice-300M-SFT")
if model_dir:
logging.info("MODEL_DIR is {}", model_dir)
app.cosyvoice = CosyVoice(model_dir)
# sft usage
logging.info("Avaliable speakers {}", app.cosyvoice.list_avaliable_spks())
else:
raise LaunchFailed("MODEL_DIR environment must set")
yield
app = FastAPI(lifespan=lifespan)
#设置允许访问的域名
origins = ["*"] #"*",即为所有,也可以改为允许的特定ip。
app.add_middleware(
CORSMiddleware,
allow_origins=origins, #设置允许的origins来源
allow_credentials=True,
allow_methods=["*"], # 设置允许跨域的http方法,比如 get、post、put等。
allow_headers=["*"]) #允许跨域的headers,可以用来鉴别来源等作用。
def buildResponse(output):
buffer = io.BytesIO()
torchaudio.save(buffer, output, 22050, format="wav")
buffer.seek(0)
return Response(content=buffer.read(-1), media_type="audio/wav")
@app.post("/api/inference/sft")
@app.get("/api/inference/sft")
async def sft(tts: str = Form(), role: str = Form()):
start = time.process_time()
output = app.cosyvoice.inference_sft(tts, role)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/zero-shot")
async def zeroShot(tts: str = Form(), prompt: str = Form(), audio: UploadFile = File()):
start = time.process_time()
prompt_speech = load_wav(audio.file, 16000)
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
output = app.cosyvoice.inference_zero_shot(tts, prompt, prompt_speech_16k)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/cross-lingual")
async def crossLingual(tts: str = Form(), audio: UploadFile = File()):
start = time.process_time()
prompt_speech = load_wav(audio.file, 16000)
prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
output = app.cosyvoice.inference_cross_lingual(tts, prompt_speech_16k)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.post("/api/inference/instruct")
@app.get("/api/inference/instruct")
async def instruct(tts: str = Form(), role: str = Form(), instruct: str = Form()):
start = time.process_time()
output = app.cosyvoice.inference_instruct(tts, role, instruct)
end = time.process_time()
logging.info("infer time is {} seconds", end-start)
return buildResponse(output['tts_speech'])
@app.get("/api/roles")
async def roles():
return {"roles": app.cosyvoice.list_avaliable_spks()}
@app.get("/", response_class=HTMLResponse)
async def root():
return """
<!DOCTYPE html>
<html lang=zh-cn>
<head>
<meta charset=utf-8>
<title>Api information</title>
</head>
<body>
Get the supported tones from the Roles API first, then enter the tones and textual content in the TTS API for synthesis. <a href='./docs'>Documents of API</a>
</body>
</html>
"""
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../../..'.format(ROOT_DIR))
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
import logging
import argparse
import torchaudio
import cosyvoice_pb2
import cosyvoice_pb2_grpc
import grpc
import torch
import numpy as np
from cosyvoice.utils.file_utils import load_wav
def main():
with grpc.insecure_channel("{}:{}".format(args.host, args.port)) as channel:
stub = cosyvoice_pb2_grpc.CosyVoiceStub(channel)
request = cosyvoice_pb2.Request()
if args.mode == 'sft':
logging.info('send sft request')
sft_request = cosyvoice_pb2.sftRequest()
sft_request.spk_id = args.spk_id
sft_request.tts_text = args.tts_text
request.sft_request.CopyFrom(sft_request)
elif args.mode == 'zero_shot':
logging.info('send zero_shot request')
zero_shot_request = cosyvoice_pb2.zeroshotRequest()
zero_shot_request.tts_text = args.tts_text
zero_shot_request.prompt_text = args.prompt_text
prompt_speech = load_wav(args.prompt_wav, 16000)
zero_shot_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
request.zero_shot_request.CopyFrom(zero_shot_request)
elif args.mode == 'cross_lingual':
logging.info('send cross_lingual request')
cross_lingual_request = cosyvoice_pb2.crosslingualRequest()
cross_lingual_request.tts_text = args.tts_text
prompt_speech = load_wav(args.prompt_wav, 16000)
cross_lingual_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes()
request.cross_lingual_request.CopyFrom(cross_lingual_request)
else:
logging.info('send instruct request')
instruct_request = cosyvoice_pb2.instructRequest()
instruct_request.tts_text = args.tts_text
instruct_request.spk_id = args.spk_id
instruct_request.instruct_text = args.instruct_text
request.instruct_request.CopyFrom(instruct_request)
response = stub.Inference(request)
logging.info('save response to {}'.format(args.tts_wav))
tts_speech = torch.from_numpy(np.array(np.frombuffer(response.tts_audio, dtype=np.int16))).unsqueeze(dim=0)
torchaudio.save(args.tts_wav, tts_speech, target_sr)
logging.info('get response')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--host',
type=str,
default='0.0.0.0')
parser.add_argument('--port',
type=int,
default='50000')
parser.add_argument('--mode',
default='sft',
choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'],
help='request mode')
parser.add_argument('--tts_text',
type=str,
default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?')
parser.add_argument('--spk_id',
type=str,
default='中文女')
parser.add_argument('--prompt_text',
type=str,
default='希望你以后能够做的比我还好呦。')
parser.add_argument('--prompt_wav',
type=str,
default='../../../zero_shot_prompt.wav')
parser.add_argument('--instruct_text',
type=str,
default='Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.')
parser.add_argument('--tts_wav',
type=str,
default='demo.wav')
args = parser.parse_args()
prompt_sr, target_sr = 16000, 22050
main()
syntax = "proto3";
package cosyvoice;
option go_package = "protos/";
service CosyVoice{
rpc Inference(Request) returns (Response) {}
}
message Request{
oneof RequestPayload {
sftRequest sft_request = 1;
zeroshotRequest zero_shot_request = 2;
crosslingualRequest cross_lingual_request = 3;
instructRequest instruct_request = 4;
}
}
message sftRequest{
string spk_id = 1;
string tts_text = 2;
}
message zeroshotRequest{
string tts_text = 1;
string prompt_text = 2;
bytes prompt_audio = 3;
}
message crosslingualRequest{
string tts_text = 1;
bytes prompt_audio = 2;
}
message instructRequest{
string tts_text = 1;
string spk_id = 2;
string instruct_text = 3;
}
message Response{
bytes tts_audio = 1;
}
\ No newline at end of file
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/../../..'.format(ROOT_DIR))
sys.path.append('{}/../../../third_party/Matcha-TTS'.format(ROOT_DIR))
from concurrent import futures
import argparse
import cosyvoice_pb2
import cosyvoice_pb2_grpc
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
import grpc
import torch
import numpy as np
from cosyvoice.cli.cosyvoice import CosyVoice
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s')
class CosyVoiceServiceImpl(cosyvoice_pb2_grpc.CosyVoiceServicer):
def __init__(self, args):
self.cosyvoice = CosyVoice(args.model_dir)
logging.info('grpc service initialized')
def Inference(self, request, context):
if request.HasField('sft_request'):
logging.info('get sft inference request')
model_output = self.cosyvoice.inference_sft(request.sft_request.tts_text, request.sft_request.spk_id)
elif request.HasField('zero_shot_request'):
logging.info('get zero_shot inference request')
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.zero_shot_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
model_output = self.cosyvoice.inference_zero_shot(request.zero_shot_request.tts_text, request.zero_shot_request.prompt_text, prompt_speech_16k)
elif request.HasField('cross_lingual_request'):
logging.info('get cross_lingual inference request')
prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.cross_lingual_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0)
prompt_speech_16k = prompt_speech_16k.float() / (2**15)
model_output = self.cosyvoice.inference_cross_lingual(request.cross_lingual_request.tts_text, prompt_speech_16k)
else:
logging.info('get instruct inference request')
model_output = self.cosyvoice.inference_instruct(request.instruct_request.tts_text, request.instruct_request.spk_id, request.instruct_request.instruct_text)
logging.info('send inference response')
response = cosyvoice_pb2.Response()
response.tts_audio = (model_output['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
return response
def main():
grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=args.max_conc), maximum_concurrent_rpcs=args.max_conc)
cosyvoice_pb2_grpc.add_CosyVoiceServicer_to_server(CosyVoiceServiceImpl(args), grpcServer)
grpcServer.add_insecure_port('0.0.0.0:{}'.format(args.port))
grpcServer.start()
logging.info("server listening on 0.0.0.0:{}".format(args.port))
grpcServer.wait_for_termination()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--port',
type=int,
default=50000)
parser.add_argument('--max_conc',
type=int,
default=4)
parser.add_argument('--model_dir',
type=str,
default='iic/CosyVoice-300M',
help='local path or modelscope repo id')
args = parser.parse_args()
main()
#!/usr/bin/env python3
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import torchaudio
from tqdm import tqdm
import onnxruntime
import torchaudio.compliance.kaldi as kaldi
def main(args):
utt2wav, utt2spk = {}, {}
with open('{}/wav.scp'.format(args.dir)) as f:
for l in f:
l = l.replace('\n', '').split()
utt2wav[l[0]] = l[1]
with open('{}/utt2spk'.format(args.dir)) as f:
for l in f:
l = l.replace('\n', '').split()
utt2spk[l[0]] = l[1]
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
providers = ["CPUExecutionProvider"]
ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers)
utt2embedding, spk2embedding = {}, {}
for utt in tqdm(utt2wav.keys()):
audio, sample_rate = torchaudio.load(utt2wav[utt])
if sample_rate != 16000:
audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio)
feat = kaldi.fbank(audio,
num_mel_bins=80,
dither=0,
sample_frequency=16000)
feat = feat - feat.mean(dim=0, keepdim=True)
embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
utt2embedding[utt] = embedding
spk = utt2spk[utt]
if spk not in spk2embedding:
spk2embedding[spk] = []
spk2embedding[spk].append(embedding)
for k, v in spk2embedding.items():
spk2embedding[k] = torch.tensor(v).mean(dim=0).tolist()
torch.save(utt2embedding, '{}/utt2embedding.pt'.format(args.dir))
torch.save(spk2embedding, '{}/spk2embedding.pt'.format(args.dir))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir',
type=str)
parser.add_argument('--onnx_path',
type=str)
args = parser.parse_args()
main(args)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment