init_model_300M.py 2.53 KB
Newer Older
Yoach Lacombe's avatar
Yoach Lacombe committed
1
from parler_tts import ParlerTTSForCausalLM, ParlerTTSForConditionalGeneration, ParlerTTSDecoderConfig
Yoach Lacombe's avatar
Yoach Lacombe committed
2
from transformers import AutoConfig
Yoach Lacombe's avatar
Yoach Lacombe committed
3
import os
Yoach Lacombe's avatar
Yoach Lacombe committed
4
import argparse
Yoach Lacombe's avatar
Yoach Lacombe committed
5

6

Yoach Lacombe's avatar
Yoach Lacombe committed
7
8
9
10
11
12
13
if __name__ == "__main__":
    parser = argparse.ArgumentParser()    
    parser.add_argument("save_directory", type=str, help="Directory where to save the model and the decoder.")
    parser.add_argument("text_model", type=str, help="Repository id or path to the text encoder.")
    parser.add_argument("audio_model", type=str, help="Repository id or path to the audio encoder.")
    
    args = parser.parse_args()
Yoach Lacombe's avatar
Yoach Lacombe committed
14

Yoach Lacombe's avatar
Yoach Lacombe committed
15
16
    text_model = args.text_model
    encodec_version = args.audio_model
Yoach Lacombe's avatar
Yoach Lacombe committed
17

Yoach Lacombe's avatar
Yoach Lacombe committed
18
19
    t5 = AutoConfig.from_pretrained(text_model)
    encodec = AutoConfig.from_pretrained(encodec_version)
Yoach Lacombe's avatar
Yoach Lacombe committed
20

Yoach Lacombe's avatar
Yoach Lacombe committed
21
22
23
    encodec_vocab_size = encodec.codebook_size
    num_codebooks = encodec.num_codebooks
    print("num_codebooks", num_codebooks)
Yoach Lacombe's avatar
Yoach Lacombe committed
24

25

Yoach Lacombe's avatar
Yoach Lacombe committed
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
    decoder_config = ParlerTTSDecoderConfig(
        vocab_size=encodec_vocab_size + 64,  # + 64 instead of +1 to have a multiple of 64
        max_position_embeddings=4096,  # 30 s = 2580
        num_hidden_layers=24,
        ffn_dim=4096,
        num_attention_heads=16,
        layerdrop=0.0,
        use_cache=True,
        activation_function="gelu",
        hidden_size=1024,
        dropout=0.1,
        attention_dropout=0.0,
        activation_dropout=0.0,
        pad_token_id=encodec_vocab_size,
        eos_token_id=encodec_vocab_size,
        bos_token_id=encodec_vocab_size + 1,
        num_codebooks=num_codebooks,
    )
Yoach Lacombe's avatar
Yoach Lacombe committed
44

45

Yoach Lacombe's avatar
Yoach Lacombe committed
46
47
    decoder = ParlerTTSForCausalLM(decoder_config)
    decoder.save_pretrained(os.path.join(args.save_directory, "decoder"))
48
49


Yoach Lacombe's avatar
Yoach Lacombe committed
50
51
52
53
54
55
    model = ParlerTTSForConditionalGeneration.from_sub_models_pretrained(
        text_encoder_pretrained_model_name_or_path=text_model,
        audio_encoder_pretrained_model_name_or_path=encodec_version,
        decoder_pretrained_model_name_or_path=os.path.join(args.save_directory, "decoder"),
        vocab_size=t5.vocab_size,
    )
56

Yoach Lacombe's avatar
Yoach Lacombe committed
57
58
59
60
    # set the appropriate bos/pad token ids
    model.generation_config.decoder_start_token_id = encodec_vocab_size + 1
    model.generation_config.pad_token_id = encodec_vocab_size
    model.generation_config.eos_token_id = encodec_vocab_size
61

Yoach Lacombe's avatar
Yoach Lacombe committed
62
63
64
65
    # set other default generation config params
    model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
    model.generation_config.do_sample = True  # True
    model.generation_config.guidance_scale = 1  # 3.0
Yoach Lacombe's avatar
Yoach Lacombe committed
66

Yoach Lacombe's avatar
Yoach Lacombe committed
67
68

    model.save_pretrained(os.path.join(args.save_directory,"stable-speech-untrained-300M/"))