librispeech_tts_r_dummy.json 1.95 KB
Newer Older
Yoach Lacombe's avatar
Yoach Lacombe committed
1
2
{
    "model_name_or_path": "/home/yoach/dataspeech/artefacts/tiny-model/",
Yoach Lacombe's avatar
Yoach Lacombe committed
3
    "feature_extractor_name":"facebook/encodec_32khz",
Yoach Lacombe's avatar
Yoach Lacombe committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
    "description_tokenizer_name":"t5-base",
    "prompt_tokenizer_name":"t5-base",

    "push_to_hub": false,
    "hub_model_id": "stable-speech-mini",
    "report_to": ["wandb"],
    "overwrite_output_dir": true,
    "output_dir": "/home/yoach/dataspeech/artefacts/training/",

    "train_dataset_name": "blabble-io/libritts_r",
    "train_metadata_dataset_name": "stable-speech/libritts-r-tags-and-text-generated",
    "train_dataset_config_name": "clean",
    "train_split_name": "train.clean.360",

    "eval_dataset_name": "blabble-io/libritts_r",
    "eval_metadata_dataset_name": "stable-speech/libritts-r-tags-and-text-generated",
    "eval_dataset_config_name": "clean",
    "eval_split_name": "train.clean.360",

    "target_audio_column_name": "audio", 
    "description_column_name": "text_description",
    "prompt_column_name": "text",

    "max_train_samples": 12,
    "max_eval_samples": 12,

    
    "max_duration_in_seconds": 30,
    "min_duration_in_seconds": 1.0,

    "add_audio_samples_to_wandb": true,
    "id_column_name": "id",

    "preprocessing_num_workers": 1,

    "pad_token_id": 2050,
    "decoder_start_token_id": 2048,

    "do_train": true,
    "num_train_epochs": 20,
    "gradient_accumulation_steps": 1,
    "gradient_checkpointing": false,
    "per_device_train_batch_size": 3,
    "learning_rate": 1e-3,
    "adam_beta1": 0.9,
    "adam_beta2": 0.999,
    "weight_decay": 0.1,

    "lr_scheduler_type": "cosine",
    "warmup_ratio":  0.1,


    "freeze_text_encoder": true,


    "do_eval": true, 
    "predict_with_generate": true,
    "include_inputs_for_metrics": true,
    "evaluation_strategy": "steps",
    "eval_steps": 10,
    "per_device_eval_batch_size": 3,
    "generation_max_length": 400,
    "do_sample": true,

    "logging_steps": 15,

    "dtype": "float32",
    "seed": 456,

    "dataloader_num_workers":8
}