init_dummy_model_with_dac.py 1.8 KB
Newer Older
Yoach Lacombe's avatar
Yoach Lacombe committed
1
from parler_tts import ParlerTTSForCausalLM, ParlerTTSForConditionalGeneration, ParlerTTSDecoderConfig
Yoach Lacombe's avatar
Yoach Lacombe committed
2
from transformers import AutoConfig
Yoach Lacombe's avatar
Yoach Lacombe committed
3
4
import os
TMP_DIR = "./tmp/artefacts/"
5

Yoach Lacombe's avatar
Yoach Lacombe committed
6
text_model = "google-t5/t5-small"
Yoach Lacombe's avatar
Yoach Lacombe committed
7
8
encodec_version = "ylacombe/dac_44khZ_8kbps"
num_codebooks = 9
Yoach Lacombe's avatar
Yoach Lacombe committed
9
10
11
12
13
14
15

t5 = AutoConfig.from_pretrained(text_model)
encodec = AutoConfig.from_pretrained(encodec_version)

encodec_vocab_size = encodec.codebook_size


Yoach Lacombe's avatar
Yoach Lacombe committed
16
decoder_config = ParlerTTSDecoderConfig(
Yoach Lacombe's avatar
Yoach Lacombe committed
17
    vocab_size=encodec_vocab_size + 1,
Yoach Lacombe's avatar
Yoach Lacombe committed
18
    max_position_embeddings=2048,
Yoach Lacombe's avatar
Yoach Lacombe committed
19
20
21
    num_hidden_layers=4,
    ffn_dim=512,
    num_attention_heads=8,
22
23
24
    layerdrop=0.0,
    use_cache=True,
    activation_function="gelu",
Yoach Lacombe's avatar
Yoach Lacombe committed
25
26
27
28
    hidden_size=512,
    dropout=0.0,
    attention_dropout=0.0,
    activation_dropout=0.0,
Yoach Lacombe's avatar
Yoach Lacombe committed
29
30
    pad_token_id=encodec_vocab_size,
    eos_token_id=encodec_vocab_size,
Yoach Lacombe's avatar
Yoach Lacombe committed
31
    bos_token_id=encodec_vocab_size + 1,
Yoach Lacombe's avatar
Yoach Lacombe committed
32
    num_codebooks=num_codebooks,
33
34
)

Yoach Lacombe's avatar
Yoach Lacombe committed
35

Yoach Lacombe's avatar
Yoach Lacombe committed
36
decoder = ParlerTTSForCausalLM(decoder_config)
Yoach Lacombe's avatar
Yoach Lacombe committed
37
decoder.save_pretrained(os.path.join(TMP_DIR, "decoder"))
38
39


Yoach Lacombe's avatar
Yoach Lacombe committed
40
model = ParlerTTSForConditionalGeneration.from_sub_models_pretrained(
Yoach Lacombe's avatar
Yoach Lacombe committed
41
42
    text_encoder_pretrained_model_name_or_path=text_model,
    audio_encoder_pretrained_model_name_or_path=encodec_version,
Yoach Lacombe's avatar
Yoach Lacombe committed
43
    decoder_pretrained_model_name_or_path=os.path.join(TMP_DIR, "decoder"),
Yoach Lacombe's avatar
Yoach Lacombe committed
44
    vocab_size=t5.vocab_size,
45
46
47
)

# set the appropriate bos/pad token ids
Yoach Lacombe's avatar
Yoach Lacombe committed
48
model.generation_config.decoder_start_token_id = encodec_vocab_size + 1
Yoach Lacombe's avatar
Yoach Lacombe committed
49
50
model.generation_config.pad_token_id = encodec_vocab_size
model.generation_config.eos_token_id = encodec_vocab_size
51
52
53

# set other default generation config params
model.generation_config.max_length = int(30 * model.audio_encoder.config.frame_rate)
Yoach Lacombe's avatar
Yoach Lacombe committed
54
model.generation_config.do_sample = True  # True
Yoach Lacombe's avatar
Yoach Lacombe committed
55
model.generation_config.guidance_scale = 1  # 3.0
56

Yoach Lacombe's avatar
Yoach Lacombe committed
57
model.save_pretrained(os.path.join(TMP_DIR, "tiny-model"))