Unverified Commit 8f5ef3a2 authored by Yoach Lacombe's avatar Yoach Lacombe Committed by GitHub
Browse files

Update training guide (#102)

* Update README.md

* Update README.md

* Update README.md

* update configs and readme

* fix training and eval single gpus and long audios errors

* fix error transcriptions none

* fix trascription null wer

---------

Co-authored-by: yoach@huggingface.co <Yoach Lacombe>
parent 9f34c1b8
...@@ -118,8 +118,6 @@ We've set up an [inference guide](INFERENCE.md) to make generation faster. Think ...@@ -118,8 +118,6 @@ We've set up an [inference guide](INFERENCE.md) to make generation faster. Think
https://github.com/huggingface/parler-tts/assets/52246514/251e2488-fe6e-42c1-81cd-814c5b7795b0 https://github.com/huggingface/parler-tts/assets/52246514/251e2488-fe6e-42c1-81cd-814c5b7795b0
## Training ## Training
> [!WARNING]
> The training guide has yet to be adapted to the newest checkpoints.
<a target="_blank" href="https://colab.research.google.com/github/ylacombe/scripts_and_notebooks/blob/main/Finetuning_Parler_TTS_on_a_single_speaker_dataset.ipynb"> <a target="_blank" href="https://colab.research.google.com/github/ylacombe/scripts_and_notebooks/blob/main/Finetuning_Parler_TTS_on_a_single_speaker_dataset.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
...@@ -131,12 +129,15 @@ The [training folder](/training/) contains all the information to train or fine- ...@@ -131,12 +129,15 @@ The [training folder](/training/) contains all the information to train or fine-
- [3. A training guide](/training/README.md#3-training) - [3. A training guide](/training/README.md#3-training)
> [!IMPORTANT] > [!IMPORTANT]
> **TL;DR:** After having followed the [installation steps](/training/README.md#requirements), you can reproduce the Parler-TTS Mini v0.1 training recipe with the following command line: > **TL;DR:** After having followed the [installation steps](/training/README.md#requirements), you can reproduce the Parler-TTS Mini v1 training recipe with the following command line:
```sh ```sh
accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_0.01.json accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_v1.json
``` ```
> [!IMPORTANT]
> You can also follow [this fine-tuning guide](https://colab.research.google.com/github/ylacombe/scripts_and_notebooks/blob/main/Finetuning_Parler_TTS_on_a_single_speaker_dataset.ipynb) on a mono-speaker dataset example.
## Acknowledgements ## Acknowledgements
This library builds on top of a number of open-source giants, to whom we'd like to extend our warmest thanks for providing these tools! This library builds on top of a number of open-source giants, to whom we'd like to extend our warmest thanks for providing these tools!
......
...@@ -61,7 +61,7 @@ if __name__ == "__main__": ...@@ -61,7 +61,7 @@ if __name__ == "__main__":
# set other default generation config params # set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate) model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
model.generation_config.do_sample = True # True model.generation_config.do_sample = True # True
model.generation_config.guidance_scale = 1 # 3.0
model.config.pad_token_id = encodec_vocab_size model.config.pad_token_id = encodec_vocab_size
model.config.decoder_start_token_id = encodec_vocab_size + 1 model.config.decoder_start_token_id = encodec_vocab_size + 1
......
...@@ -59,7 +59,7 @@ if __name__ == "__main__": ...@@ -59,7 +59,7 @@ if __name__ == "__main__":
# set other default generation config params # set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate) model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
model.generation_config.do_sample = True # True model.generation_config.do_sample = True # True
model.generation_config.guidance_scale = 1 # 3.0
model.config.pad_token_id = encodec_vocab_size model.config.pad_token_id = encodec_vocab_size
model.config.decoder_start_token_id = encodec_vocab_size + 1 model.config.decoder_start_token_id = encodec_vocab_size + 1
......
from parler_tts import ParlerTTSForCausalLM, ParlerTTSForConditionalGeneration, ParlerTTSDecoderConfig
from transformers import AutoConfig
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("save_directory", type=str, help="Directory where to save the model and the decoder.")
parser.add_argument("--text_model", type=str, help="Repository id or path to the text encoder.")
parser.add_argument("--audio_model", type=str, help="Repository id or path to the audio encoder.")
args = parser.parse_args()
text_model = args.text_model
encodec_version = args.audio_model
t5 = AutoConfig.from_pretrained(text_model)
encodec = AutoConfig.from_pretrained(encodec_version)
encodec_vocab_size = encodec.codebook_size
num_codebooks = encodec.num_codebooks
print("num_codebooks", num_codebooks)
decoder_config = ParlerTTSDecoderConfig(
vocab_size=encodec_vocab_size + 64, # + 64 instead of +1 to have a multiple of 64
max_position_embeddings=4096, # 30 s = 2580
num_hidden_layers=30,
ffn_dim=6144,
num_attention_heads=24,
num_key_value_heads=24,
layerdrop=0.0,
use_cache=True,
activation_function="gelu",
hidden_size=1536,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
pad_token_id=encodec_vocab_size,
eos_token_id=encodec_vocab_size,
bos_token_id=encodec_vocab_size + 1,
num_codebooks=num_codebooks,
)
decoder = ParlerTTSForCausalLM(decoder_config)
decoder.save_pretrained(os.path.join(args.save_directory, "decoder"))
model = ParlerTTSForConditionalGeneration.from_sub_models_pretrained(
text_encoder_pretrained_model_name_or_path=text_model,
audio_encoder_pretrained_model_name_or_path=encodec_version,
decoder_pretrained_model_name_or_path=os.path.join(args.save_directory, "decoder"),
vocab_size=t5.vocab_size,
)
# set the appropriate bos/pad token ids
model.generation_config.decoder_start_token_id = encodec_vocab_size + 1
model.generation_config.pad_token_id = encodec_vocab_size
model.generation_config.eos_token_id = encodec_vocab_size
# set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
model.generation_config.do_sample = True # True
model.config.pad_token_id = encodec_vocab_size
model.config.decoder_start_token_id = encodec_vocab_size + 1
model.save_pretrained(os.path.join(args.save_directory, "parler-tts-untrained-larger/"))
...@@ -61,7 +61,6 @@ if __name__ == "__main__": ...@@ -61,7 +61,6 @@ if __name__ == "__main__":
# set other default generation config params # set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate) model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
model.generation_config.do_sample = True # True model.generation_config.do_sample = True # True
model.generation_config.guidance_scale = 1 # 3.0
model.config.pad_token_id = encodec_vocab_size model.config.pad_token_id = encodec_vocab_size
model.config.decoder_start_token_id = encodec_vocab_size + 1 model.config.decoder_start_token_id = encodec_vocab_size + 1
......
{
"model_name_or_path": "./parler-tts-untrained-600M/parler-tts-untrained-600M/",
"save_to_disk": "./tmp_dataset_audio/",
"temporary_save_to_disk": "./audio_code_tmp/",
"wandb_project": "parler-tts-50k-hours",
"wandb_run_name": "Mini",
"feature_extractor_name":"ylacombe/dac_44khZ_8kbps",
"description_tokenizer_name":"google/flan-t5-large",
"prompt_tokenizer_name":"google/flan-t5-large",
"report_to": ["wandb"],
"overwrite_output_dir": true,
"output_dir": "./output_dir_training",
"train_dataset_name": "ylacombe/libritts_r_filtered+ylacombe/libritts_r_filtered+ylacombe/libritts_r_filtered+parler-tts/mls_eng",
"train_metadata_dataset_name": "ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/mls-eng-descriptions-v4",
"train_dataset_config_name": "clean+clean+other+default",
"train_split_name": "train.clean.360+train.clean.100+train.other.500+train",
"eval_dataset_name": "ylacombe/libritts_r_filtered+parler-tts/mls_eng",
"eval_metadata_dataset_name": "ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/mls-eng-descriptions-v4",
"eval_dataset_config_name": "other+default",
"eval_split_name": "test.other+test",
"target_audio_column_name": "audio",
"description_column_name": "text_description",
"prompt_column_name": "text",
"max_eval_samples": 96,
"max_duration_in_seconds": 30,
"min_duration_in_seconds": 2.0,
"max_text_length": 600,
"group_by_length": true,
"add_audio_samples_to_wandb": true,
"id_column_name": "id",
"preprocessing_num_workers": 8,
"do_train": true,
"num_train_epochs": 4,
"gradient_accumulation_steps": 4,
"gradient_checkpointing": false,
"per_device_train_batch_size": 6,
"learning_rate": 0.00095,
"adam_beta1": 0.9,
"adam_beta2": 0.99,
"weight_decay": 0.01,
"lr_scheduler_type": "constant_with_warmup",
"warmup_steps": 20000,
"logging_steps": 1000,
"freeze_text_encoder": true,
"do_eval": true,
"predict_with_generate": true,
"include_inputs_for_metrics": true,
"evaluation_strategy": "steps",
"eval_steps": 10000,
"save_steps": 10000,
"per_device_eval_batch_size": 4,
"audio_encoder_per_device_batch_size":24,
"dtype": "bfloat16",
"seed": 456,
"dataloader_num_workers":8,
"attn_implementation": "sdpa"
}
\ No newline at end of file
{
"model_name_or_path": "./parler-tts-untrained-large/parler-tts-untrained-large",
"save_to_disk": "./tmp_dataset_audio/",
"temporary_save_to_disk": "./audio_code_tmp/",
"wandb_project": "parler-tts-50k-hours",
"wandb_run_name": "Large",
"feature_extractor_name":"ylacombe/dac_44khZ_8kbps",
"description_tokenizer_name":"google/flan-t5-large",
"prompt_tokenizer_name":"google/flan-t5-large",
"report_to": ["wandb"],
"overwrite_output_dir": true,
"output_dir": "./output_dir_training",
"train_dataset_name": "ylacombe/libritts_r_filtered+ylacombe/libritts_r_filtered+ylacombe/libritts_r_filtered+parler-tts/mls_eng",
"train_metadata_dataset_name": "ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/mls-eng-descriptions-v4",
"train_dataset_config_name": "clean+clean+other+default",
"train_split_name": "train.clean.360+train.clean.100+train.other.500+train",
"eval_dataset_name": "ylacombe/libritts_r_filtered+parler-tts/mls_eng",
"eval_metadata_dataset_name": "ylacombe/libritts-r-filtered-descriptions-10k-v5-without-accents+ylacombe/mls-eng-descriptions-v4",
"eval_dataset_config_name": "other+default",
"eval_split_name": "test.other+test",
"target_audio_column_name": "audio",
"description_column_name": "text_description",
"prompt_column_name": "text",
"max_eval_samples": 96,
"max_duration_in_seconds": 30,
"min_duration_in_seconds": 2.0,
"max_text_length": 600,
"group_by_length": true,
"add_audio_samples_to_wandb": true,
"id_column_name": "id",
"preprocessing_num_workers": 8,
"do_train": true,
"num_train_epochs": 4,
"gradient_accumulation_steps": 4,
"gradient_checkpointing": false,
"per_device_train_batch_size": 3,
"learning_rate": 0.0015,
"adam_beta1": 0.9,
"adam_beta2": 0.99,
"weight_decay": 0.01,
"lr_scheduler_type": "constant_with_warmup",
"warmup_steps": 10000,
"logging_steps": 1000,
"freeze_text_encoder": true,
"do_eval": true,
"predict_with_generate": true,
"include_inputs_for_metrics": true,
"evaluation_strategy": "steps",
"eval_steps": 10000,
"save_steps": 10000,
"save_total_limit": 10,
"per_device_eval_batch_size": 6,
"audio_encoder_per_device_batch_size":24,
"dtype": "bfloat16",
"seed": 738,
"dataloader_num_workers":8,
"attn_implementation": "sdpa"
}
...@@ -4,13 +4,10 @@ ...@@ -4,13 +4,10 @@
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a> </a>
> [!WARNING] **TL;DR:** After having followed the [installation steps](#requirements), you can reproduce the [Parler-TTS Mini v1](https://huggingface.co/parler-tts/parler-tts-mini-v1) training recipe with the following command line:
> The training guide has yet to be adapted to the newest checkpoints.
**TL;DR:** After having followed the [installation steps](#requirements), you can reproduce the [Parler-TTS Mini v0.1](https://huggingface.co/parler-tts/parler_tts_mini_v0.1) training recipe with the following command line:
```sh ```sh
accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_0.01.json accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_v1.json
``` ```
------------- -------------
...@@ -25,10 +22,10 @@ This sub-folder contains all the information to train or fine-tune your own Parl ...@@ -25,10 +22,10 @@ This sub-folder contains all the information to train or fine-tune your own Parl
## 1. Architecture ## 1. Architecture
At the moment, Parler-TTS architecture is a carbon copy of the [MusicGen architecture](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/musicgen#model-structure) and can be decomposed into three distinct stages: At the moment, Parler-TTS architecture is almost a carbon copy of the [MusicGen architecture](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/musicgen#model-structure) and can be decomposed into three distinct stages:
1. Text encoder: maps the text descriptions to a sequence of hidden-state representations. Parler-TTS uses a frozen text encoder initialised entirely from Flan-T5 1. Text encoder: maps the text descriptions to a sequence of hidden-state representations. Parler-TTS uses a frozen text encoder initialised entirely from Flan-T5
2. Parler-TTS decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations 2. Parler-TTS decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations
3. Audio codec: used to recover the audio waveform from the audio tokens predicted by the decoder. We use the [DAC model](https://github.com/descriptinc/descript-audio-codec) from Descript, although other codec models, such as [EnCodec](https://huggingface.co/facebook/encodec_48khz), can also be used 3. Audio codec: used to recover the audio waveform from the audio tokens predicted by the decoder. We use the [DAC model](https://github.com/descriptinc/descript-audio-codec) from Descript, although other codec models, such as [EnCodec](https://huggingface.co/facebook/encodec_48khz), can also be used.
Parler-TTS however introduces some small tweaks: Parler-TTS however introduces some small tweaks:
- The text **description** is passed through the text encoder and used in the cross-attention layers of the decoder. - The text **description** is passed through the text encoder and used in the cross-attention layers of the decoder.
...@@ -80,7 +77,7 @@ And then enter an authentication token from https://huggingface.co/settings/toke ...@@ -80,7 +77,7 @@ And then enter an authentication token from https://huggingface.co/settings/toke
Depending on your compute resources and your dataset, you need to choose between fine-tuning a pre-trained model and training a new model from scratch. Depending on your compute resources and your dataset, you need to choose between fine-tuning a pre-trained model and training a new model from scratch.
In that sense, we released a 600M checkpoint trained on 10.5K hours of annotated data under the repository id: [`parler-tts/parler_tts_mini_v0.1`](https://huggingface.co/parler-tts/parler_tts_mini_v0.1), that you can fine-tune for your own use-case. In that sense, we released a 880M checkpoint trained on 45K hours of annotated data under the repository id: [`parler-tts/parler-tts-mini-v1`](https://huggingface.co/parler-tts/parler-tts-mini-v1), that you can fine-tune for your own use-case.
You can also train you own model from scratch. You can find [here](/helpers/model_init_scripts/) examples on how to initialize a model from scratch. For example, you can initialize a dummy model with: You can also train you own model from scratch. You can find [here](/helpers/model_init_scripts/) examples on how to initialize a model from scratch. For example, you can initialize a dummy model with:
...@@ -88,10 +85,10 @@ You can also train you own model from scratch. You can find [here](/helpers/mode ...@@ -88,10 +85,10 @@ You can also train you own model from scratch. You can find [here](/helpers/mode
python helpers/model_init_scripts/init_dummy_model.py ./parler-tts-untrained-dummy --text_model "google-t5/t5-small" --audio_model "parler-tts/dac_44khZ_8kbps" python helpers/model_init_scripts/init_dummy_model.py ./parler-tts-untrained-dummy --text_model "google-t5/t5-small" --audio_model "parler-tts/dac_44khZ_8kbps"
``` ```
In the rest of this guide, and to reproduce the Parler-TTS Mini v0.1 training recipe, we'll use a 600M parameters model that we'll initialize with: In the rest of this guide, and to reproduce the Parler-TTS Mini v1 training recipe, we'll use a 880M parameters model that we'll initialize with:
```sh ```sh
python helpers/model_init_scripts/init_model_600M.py ./parler-tts-untrained-600M --text_model "google/flan-t5-base" --audio_model "parler-tts/dac_44khZ_8kbps" python helpers/model_init_scripts/init_model_600M.py ./parler-tts-untrained-600M --text_model "google/flan-t5-large" --audio_model "parler-tts/dac_44khZ_8kbps"
``` ```
...@@ -104,11 +101,11 @@ To train your own Parler-TTS, you need datasets with 3 main features: ...@@ -104,11 +101,11 @@ To train your own Parler-TTS, you need datasets with 3 main features:
Note that we made the choice to use description of the main speech characteristics (speaker pitch, speaking rate, level of noise, etc.) but that you are free to use any handmade or generated text description that makes sense. Note that we made the choice to use description of the main speech characteristics (speaker pitch, speaking rate, level of noise, etc.) but that you are free to use any handmade or generated text description that makes sense.
To train Parler-TTS Mini v0.1, we used: To train Parler-TTS Mini v1, we used:
* The full [LibriTTS-R dataset](https://huggingface.co/datasets/blabble-io/libritts_r), a 1K hours high-quality speech dataset. * A [filtered version](https://huggingface.co/datasets/parler-tts/libritts_r_filtered) of [LibriTTS-R dataset](https://huggingface.co/datasets/blabble-io/libritts_r), a 1K hours high-quality speech dataset.
* A [10K hours subset](https://huggingface.co/datasets/parler-tts/mls_eng_10k) of [Multilingual LibriSpeech](https://huggingface.co/datasets/facebook/multilingual_librispeech). * The [English subset](https://huggingface.co/datasets/parler-tts/mls_eng) of [Multilingual LibriSpeech](https://huggingface.co/datasets/facebook/multilingual_librispeech).
Both datasets have been annotated using the [Data-Speech](https://github.com/huggingface/dataspeech) recipe, respectively [here](https://huggingface.co/datasets/parler-tts/libritts_r_tags_tagged_10k_generated) and [here](https://huggingface.co/datasets/parler-tts/mls-eng-10k-tags_tagged_10k_generated). Both datasets have been annotated using the [Data-Speech](https://github.com/huggingface/dataspeech) recipe, respectively [here](https://huggingface.co/datasets/parler-tts/libritts-r-filtered-speaker-descriptions) and [here](https://huggingface.co/datasets/parler-tts/mls-eng-speaker-descriptions).
## 3. Training ## 3. Training
...@@ -118,22 +115,22 @@ The script [`run_parler_tts_training.py`](/training/run_parler_tts_training.py) ...@@ -118,22 +115,22 @@ The script [`run_parler_tts_training.py`](/training/run_parler_tts_training.py)
2. pre-compute audio tokens 2. pre-compute audio tokens
3. train Parler-TTS 3. train Parler-TTS
To train Parler-TTS Mini v0.1, we roughly used: To train Parler-TTS Mini v1, we roughly used:
```sh ```sh
accelerate launch ./training/run_parler_tts_training.py \ accelerate launch ./training/run_parler_tts_training.py \
--model_name_or_path "./parler-tts-untrained-600M/parler-tts-untrained-600M/" \ --model_name_or_path "./parler-tts-untrained-600M/parler-tts-untrained-600M/" \
--feature_extractor_name "parler-tts/dac_44khZ_8kbps" \ --feature_extractor_name "parler-tts/dac_44khZ_8kbps" \
--description_tokenizer_name "google/flan-t5-base" \ --description_tokenizer_name "google/flan-t5-large" \
--prompt_tokenizer_name "google/flan-t5-base" \ --prompt_tokenizer_name "google/flan-t5-large" \
--report_to "wandb" \ --report_to "wandb" \
--overwrite_output_dir true \ --overwrite_output_dir true \
--train_dataset_name "blabble-io/libritts_r+blabble-io/libritts_r+blabble-io/libritts_r+parler-tts/mls_eng_10k" \ --train_dataset_name "parler-tts/libritts_r_filtered+parler-tts/libritts_r_filtered+parler-tts/libritts_r_filtered+parler-tts/mls_eng" \
--train_metadata_dataset_name "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated" \ --train_metadata_dataset_name "parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/mls-eng-speaker-descriptions" \
--train_dataset_config_name "clean+clean+other+default" \ --train_dataset_config_name "clean+clean+other+default" \
--train_split_name "train.clean.360+train.clean.100+train.other.500+train" \ --train_split_name "train.clean.360+train.clean.100+train.other.500+train" \
--eval_dataset_name "blabble-io/libritts_r+parler-tts/mls_eng_10k" \ --eval_dataset_name "parler-tts/libritts_r_filtered+parler-tts/mls_eng" \
--eval_metadata_dataset_name "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated" \ --eval_metadata_dataset_name "parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/mls-eng-speaker-descriptions" \
--eval_dataset_config_name "other+default" \ --eval_dataset_config_name "other+default" \
--eval_split_name "test.other+test" \ --eval_split_name "test.other+test" \
--target_audio_column_name "audio" \ --target_audio_column_name "audio" \
...@@ -141,15 +138,15 @@ accelerate launch ./training/run_parler_tts_training.py \ ...@@ -141,15 +138,15 @@ accelerate launch ./training/run_parler_tts_training.py \
--prompt_column_name "text" \ --prompt_column_name "text" \
--max_duration_in_seconds 30 \ --max_duration_in_seconds 30 \
--min_duration_in_seconds 2.0 \ --min_duration_in_seconds 2.0 \
--max_text_length 400 \ --max_text_length 600 \
--add_audio_samples_to_wandb true \ --add_audio_samples_to_wandb true \
--id_column_name "id" \ --id_column_name "id" \
--preprocessing_num_workers 8 \ --preprocessing_num_workers 8 \
--do_train true \ --do_train true \
--num_train_epochs 40 \ --num_train_epochs 4 \
--gradient_accumulation_steps 8 \ --gradient_accumulation_steps 6 \
--gradient_checkpointing false \ --gradient_checkpointing false \
--per_device_train_batch_size 3 \ --per_device_train_batch_size 4 \
--learning_rate 0.00095 \ --learning_rate 0.00095 \
--adam_beta1 0.9 \ --adam_beta1 0.9 \
--adam_beta2 0.99 \ --adam_beta2 0.99 \
...@@ -164,8 +161,8 @@ accelerate launch ./training/run_parler_tts_training.py \ ...@@ -164,8 +161,8 @@ accelerate launch ./training/run_parler_tts_training.py \
--evaluation_strategy steps \ --evaluation_strategy steps \
--eval_steps 10000 \ --eval_steps 10000 \
--save_steps 10000 \ --save_steps 10000 \
--per_device_eval_batch_size 12 \ --per_device_eval_batch_size 4 \
--audio_encoder_per_device_batch_size 20 \ --audio_encoder_per_device_batch_size 24 \
--dtype "bfloat16" \ --dtype "bfloat16" \
--seed 456 \ --seed 456 \
--output_dir "./output_dir_training/" \ --output_dir "./output_dir_training/" \
...@@ -173,33 +170,34 @@ accelerate launch ./training/run_parler_tts_training.py \ ...@@ -173,33 +170,34 @@ accelerate launch ./training/run_parler_tts_training.py \
--save_to_disk "./tmp_dataset_audio/" \ --save_to_disk "./tmp_dataset_audio/" \
--max_eval_samples 96 \ --max_eval_samples 96 \
--dataloader_num_workers 8 \ --dataloader_num_workers 8 \
--group_by_length true --group_by_length true \
--attn_implementation "sdpa"
``` ```
In particular, note how multiple training datasets, metadataset, configurations and splits can be loaded by separating the dataset arguments by + symbols: In particular, note how multiple training datasets, metadataset, configurations and splits can be loaded by separating the dataset arguments by + symbols:
```sh ```sh
"train_dataset_name": "blabble-io/libritts_r+blabble-io/libritts_r+blabble-io/libritts_r+parler-tts/mls_eng_10k", "train_dataset_name": "parler-tts/libritts_r_filtered+parler-tts/libritts_r_filtered+parler-tts/libritts_r_filtered+parler-tts/mls_eng",
"train_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated", "train_metadata_dataset_name": "parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/libritts-r-filtered-speaker-descriptions+parler-tts/mls-eng-speaker-descriptions",
"train_dataset_config_name": "clean+clean+other+default", "train_dataset_config_name": "clean+clean+other+default",
"train_split_name": "train.clean.360+train.clean.100+train.other.500+train", "train_split_name": "train.clean.360+train.clean.100+train.other.500+train",
``` ```
Additionally, you can also write a JSON config file. Here, [starting_point_0.01.json](helpers/training_configs/starting_point_0.01.json) contains the exact same hyper-parameters than above and can be launched like that: Additionally, you can also write a JSON config file. Here, [starting_point_v1.json](helpers/training_configs/starting_point_v1.json) contains the exact same hyper-parameters than above and can be launched like that:
```sh ```sh
accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_0.01.json accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_v1.json
``` ```
Training logs will be reported to wandb, provided that you passed `--report_to "wandb"` to the arguments. An example of what a training log from the above training looks like can be found [here](https://wandb.ai/ylacombe/parler-tts-300M-punctuated/runs/q6h7hspc?nw=nwuserylacombe). Training logs will be reported to wandb, provided that you passed `--report_to "wandb"` to the arguments.
> [!TIP] > [!TIP]
> Starting training a new model from scratch can easily be overwhelming, so here's what training looked like for v0.1: [logs](https://api.wandb.ai/links/ylacombe/ea449l81) > Starting training a new model from scratch can easily be overwhelming, so here's what training looked like for v1: [logs](https://api.wandb.ai/links/ylacombe/j7g8isjn)
Scaling to multiple GPUs using [distributed data parallelism (DDP)](https://pytorch.org/tutorials/beginner/ddp_series_theory.html) is trivial: simply run `accelerate config` and select the multi-GPU option, specifying the IDs of the GPUs you wish to use. The above script can then be run using DDP with no code changes. In our case, we used a node of 8 H100 80GB to train Parler-TTS v0.1 for around 4 days. Scaling to multiple GPUs using [distributed data parallelism (DDP)](https://pytorch.org/tutorials/beginner/ddp_series_theory.html) is trivial: simply run `accelerate config` and select the multi-GPU option, specifying the IDs of the GPUs you wish to use. The above script can then be run using DDP with no code changes. In our case, we used 4 nodes of 8 H100 80GB to train Parler-TTS Mini for around 1.5 days.
There are a few other noteworthy arguments: There are a few other noteworthy arguments:
1. `train_metadata_dataset_name` and `eval_metadata_dataset_name` specify, if necessary, the names of the dataset(s) that contain(s) the conditionning text descriptions. For example, this [dataset resulting from the Data-Speech annotation process](https://huggingface.co/datasets/parler-tts/libritts_r_tags_tagged_10k_generated) is saved without the audio column, as it's costly to write and push audio data, so it needs to be concatenated back to the original LibriTTS-R dataset. 1. `train_metadata_dataset_name` and `eval_metadata_dataset_name` specify, if necessary, the names of the dataset(s) that contain(s) the conditionning text descriptions. For example, this [dataset resulting from the Data-Speech annotation process](https://huggingface.co/datasets/parler-tts/libritts-r-filtered-speaker-descriptions) is saved without the audio column, as it's costly to write and push audio data, so it needs to be concatenated back to the original LibriTTS-R dataset.
2. As noted above, the script pre-computes audio tokens as computing audio codes is costly and only needs to be done once, since we're freezing the audio encoder. `audio_encoder_per_device_batch_size` is used to precise the per devie batch size for this pre-processing step. 2. As noted above, the script pre-computes audio tokens as computing audio codes is costly and only needs to be done once, since we're freezing the audio encoder. `audio_encoder_per_device_batch_size` is used to precise the per devie batch size for this pre-processing step.
3. Additionnally, when scaling up the training data and iterating on the hyper-parameters or the model architecture, we might want to avoid recomputing the audio tokens at each training run. That's why we introduced two additional parameters, `save_to_disk` and `temporary_save_to_disk` that serves as temporary buffers to save intermediary datasets. Note that processed data is made of text and audio tokens which are much more memory efficient, so the additional required space is negligible. 3. Additionnally, when scaling up the training data and iterating on the hyper-parameters or the model architecture, we might want to avoid recomputing the audio tokens at each training run. That's why we introduced two additional parameters, `save_to_disk` and `temporary_save_to_disk` that serves as temporary buffers to save intermediary datasets. Note that processed data is made of text and audio tokens which are much more memory efficient, so the additional required space is negligible.
4. `predict_with_generate` and `add_audio_samples_to_wandb` are required to store generated audios and to compute WER and CLAP similarity. 4. `predict_with_generate` and `add_audio_samples_to_wandb` are required to store generated audios and to compute WER and CLAP similarity.
...@@ -211,4 +209,4 @@ And finally, two additional comments: ...@@ -211,4 +209,4 @@ And finally, two additional comments:
> [!TIP] > [!TIP]
> Fine-tuning is as easy as modifying `model_name_or_path` to a pre-trained model. > Fine-tuning is as easy as modifying `model_name_or_path` to a pre-trained model.
> For example: `--model_name_or_path parler-tts/parler_tts_mini_v0.1`. > For example: `--model_name_or_path parler-tts/parler-tts-mini-v1`.
...@@ -114,22 +114,28 @@ def wer( ...@@ -114,22 +114,28 @@ def wer(
normalized_predictions.append(norm_pred) normalized_predictions.append(norm_pred)
normalized_references.append(norm_ref) normalized_references.append(norm_ref)
word_error = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) word_error = 100
clean_word_error = None clean_word_error = None
noisy_word_error = None noisy_word_error = None
percent_clean_samples = 0 percent_clean_samples = 0
if noise_level_to_compute_clean_wer and si_sdr_measures: if len(normalized_references) > 0:
si_sdr_measures = np.array(si_sdr_measures) word_error = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)
mask = si_sdr_measures >= noise_level_to_compute_clean_wer
if mask.any():
clean_word_error = 100 * metric.compute( if noise_level_to_compute_clean_wer and si_sdr_measures:
predictions=np.array(normalized_predictions)[mask], references=np.array(normalized_references)[mask] si_sdr_measures = np.array(si_sdr_measures)
) mask = si_sdr_measures >= noise_level_to_compute_clean_wer
noisy_word_error = 100 * metric.compute( if mask.any():
predictions=np.array(normalized_predictions)[~mask], references=np.array(normalized_references)[~mask] clean_word_error = 100 * metric.compute(
) predictions=np.array(normalized_predictions)[mask], references=np.array(normalized_references)[mask]
percent_clean_samples = mask.sum() / len(mask) )
if not mask.all():
noisy_word_error = 100 * metric.compute(
predictions=np.array(normalized_predictions)[~mask], references=np.array(normalized_references)[~mask]
)
else:
noisy_word_error = 0
percent_clean_samples = mask.sum() / len(mask)
asr_pipeline.model.to("cpu") asr_pipeline.model.to("cpu")
asr_pipeline = release_memory(asr_pipeline) asr_pipeline = release_memory(asr_pipeline)
......
...@@ -344,8 +344,8 @@ def main(): ...@@ -344,8 +344,8 @@ def main():
# derive max & min input length for sample rate & max duration # derive max & min input length for sample rate & max duration
sampling_rate = feature_extractor.sampling_rate sampling_rate = feature_extractor.sampling_rate
max_target_length = data_args.max_duration_in_seconds * sampling_rate max_target_length = int(data_args.max_duration_in_seconds * sampling_rate)
min_target_length = data_args.min_duration_in_seconds * sampling_rate min_target_length = int(data_args.min_duration_in_seconds * sampling_rate)
target_audio_column_name = data_args.target_audio_column_name target_audio_column_name = data_args.target_audio_column_name
description_column_name = data_args.description_column_name description_column_name = data_args.description_column_name
prompt_column_name = data_args.prompt_column_name prompt_column_name = data_args.prompt_column_name
...@@ -1069,6 +1069,7 @@ def main(): ...@@ -1069,6 +1069,7 @@ def main():
# Model forward # Model forward
eval_metric = eval_step(batch, accelerator, autocast_kwargs) eval_metric = eval_step(batch, accelerator, autocast_kwargs)
eval_metric = accelerator.gather_for_metrics(eval_metric) eval_metric = accelerator.gather_for_metrics(eval_metric)
eval_metric = {key: val.unsqueeze(0) if val.ndim == 0 else val for (key,val) in eval_metric.items()}
eval_metrics.append(eval_metric) eval_metrics.append(eval_metric)
if training_args.predict_with_generate: if training_args.predict_with_generate:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment