librispeech_tts_r_dummy_dac.json 2.04 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
{
    "model_name_or_path": "/raid/yoach/tmp/artefacts/tiny-dac-model/",
    "save_to_disk":  "/raid/yoach/tmp/artefacts/small_experiment_dataset/",

    "feature_extractor_name":"ylacombe/dac_44khZ_8kbps",
    "description_tokenizer_name":"google-t5/t5-small",
    "prompt_tokenizer_name":"google-t5/t5-small",

    "push_to_hub": false,
    "hub_model_id": "stable-speech-mini",
    "report_to": ["wandb"],
    "overwrite_output_dir": true,
    "output_dir": "/raid/yoach/tmp/artefacts/training/",

    "train_dataset_name": "blabble-io/libritts_r",
    "train_metadata_dataset_name": "stable-speech/libritts-r-tags-and-text-generated",
    "train_dataset_config_name": "clean",
    "train_split_name": "train.clean.360",

    "eval_dataset_name": "blabble-io/libritts_r",
    "eval_metadata_dataset_name": "stable-speech/libritts-r-tags-and-text-generated",
    "eval_dataset_config_name": "clean",
    "eval_split_name": "train.clean.360",

    "target_audio_column_name": "audio", 
    "description_column_name": "text_description",
    "prompt_column_name": "text",

    "max_train_samples": 4,
    "max_eval_samples": 4,

    
    "max_duration_in_seconds": 30,
    "min_duration_in_seconds": 1.0,

    "add_audio_samples_to_wandb": true,
    "id_column_name": "id",

    "preprocessing_num_workers": 1,

    "pad_token_id": 1024,
    "decoder_start_token_id": 1025,

    "do_train": true,
    "num_train_epochs": 180,
    "gradient_accumulation_steps": 1,
    "gradient_checkpointing": false,
    "per_device_train_batch_size": 2,
    "learning_rate": 1e-3,
    "adam_beta1": 0.9,
    "adam_beta2": 0.999,
    "weight_decay": 0.1,

    "lr_scheduler_type": "cosine",
    "warmup_ratio":  0.1,


    "freeze_text_encoder": true,


    "do_eval": true, 
    "predict_with_generate": true,
    "include_inputs_for_metrics": true,
    "evaluation_strategy": "steps",
    "eval_steps": 30,
    "per_device_eval_batch_size": 2,
    "generation_max_length": 800,
    "do_sample": false,

    "logging_steps": 15,

    "dtype": "float32",
    "seed": 456,

    "dataloader_num_workers":8
}