app.py 8.75 KB
Newer Older
chenpangpang's avatar
chenpangpang committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import gradio as gr
import torch
from transformers.models.speecht5.number_normalizer import EnglishNumberNormalizer
from string import punctuation
import re

from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed

device = "cuda:0" if torch.cuda.is_available() else "cpu"

repo_id = "parler-tts/parler-tts-mini-v1"
repo_id_large = "ylacombe/parler-large-v1-og"

model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device)
model_large = ParlerTTSForConditionalGeneration.from_pretrained(repo_id_large).to(device)
tokenizer = AutoTokenizer.from_pretrained(repo_id)
feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id)

SAMPLE_RATE = feature_extractor.sampling_rate
SEED = 42

default_text = "All of the data, pre-processing, training code, and weights are released publicly under a permissive license, enabling the community to build on our work and develop their own powerful models."
default_description = "Laura's voice is monotone yet slightly fast in delivery, with a very close recording that almost has no background noise."
examples = [
    [
        "This version introduces speaker consistency across generations, characterized by their name. For example, Jon, Lea, Gary, Jenna, Mike and Laura.",
        "Gary's voice is monotone yet slightly fast in delivery, with a very close recording that has no background noise.",
        None,
    ],
    [
        '''There's 34 speakers. To take advantage of this, simply adapt your text description to specify which speaker to use: "Mike speaks animatedly...".''',
        "Gary speaks slightly animatedly and slightly slowly in delivery, with a very close recording that has no background noise.",
        None
    ],
    [
        "'This is the best time of my life, Bartley,' she said happily.",
        "A female speaker delivers a slightly expressive and animated speech with a moderate speed. The recording features a low-pitch voice and slight background noise, creating a close-sounding audio experience.",
        None,
    ],
    [
        "Montrose also, after having experienced still more variety of good and bad fortune, threw down his arms, and retired out of the kingdom.",
        "A man voice speaks slightly slowly with very noisy background, carrying a low-pitch tone and displaying a touch of expressiveness and animation. The sound is very distant, adding an air of intrigue.",
        None
    ],
    [
        "Once upon a time, in the depth of winter, when the flakes of snow fell like feathers from the clouds, a queen sat sewing at her pal-ace window, which had a carved frame of black wood.",
        "In a very poor recording quality, a female speaker delivers her slightly expressive and animated words with a fast pace. There's high level of background noise and a very distant-sounding reverberation. Her voice is slightly higher pitched than average.",
        None,
    ],
]

number_normalizer = EnglishNumberNormalizer()


def preprocess(text):
    text = number_normalizer(text).strip()
    text = text.replace("-", " ")
    if text[-1] not in punctuation:
        text = f"{text}."

    abbreviations_pattern = r'\b[A-Z][A-Z\.]+\b'

    def separate_abb(chunk):
        chunk = chunk.replace(".", "")
        print(chunk)
        return " ".join(chunk)

    abbreviations = re.findall(abbreviations_pattern, text)
    for abv in abbreviations:
        if abv in text:
            text = text.replace(abv, separate_abb(abv))
    return text


def gen_tts(text, description, use_large=False):
    inputs = tokenizer(description.strip(), return_tensors="pt").to(device)
    prompt = tokenizer(preprocess(text), return_tensors="pt").to(device)

    set_seed(SEED)
    if use_large:
        generation = model_large.generate(
            input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask,
            prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
        )
    else:
        generation = model.generate(
            input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, attention_mask=inputs.attention_mask,
            prompt_attention_mask=prompt.attention_mask, do_sample=True, temperature=1.0
        )
    audio_arr = generation.cpu().numpy().squeeze()

    return SAMPLE_RATE, audio_arr


css = """
        #share-btn-container {
            display: flex;
            padding-left: 0.5rem !important;
            padding-right: 0.5rem !important;
            background-color: #000000;
            justify-content: center;
            align-items: center;
            border-radius: 9999px !important; 
            width: 13rem;
            margin-top: 10px;
            margin-left: auto;
            flex: unset !important;
        }
        #share-btn {
            all: initial;
            color: #ffffff;
            font-weight: 600;
            cursor: pointer;
            font-family: 'IBM Plex Sans', sans-serif;
            margin-left: 0.5rem !important;
            padding-top: 0.25rem !important;
            padding-bottom: 0.25rem !important;
            right:0;
        }
        #share-btn * {
            all: unset !important;
        }
        #share-btn-container div:nth-child(-n+2){
            width: auto !important;
            min-height: 0px !important;
        }
        #share-btn-container .wrap {
            display: none !important;
        }
"""
with gr.Blocks(css=css) as block:
    gr.HTML(
        """
            <div style="text-align: center; max-width: 700px; margin: 0 auto;">
              <div
                style="
                  display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;
                "
              >
                <h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
                  Parler-TTS 🗣️
                </h1>
              </div>
            </div>
        """
    )
    gr.HTML(
        f"""
150
151
152
        <p><a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> 是一个用于高保真文本转语音 (TTS) 模型的训练和推理库。</p> 
        <p>这里展示的模型 Parler-TTS <a href="https://huggingface.co/parler-tts/parler-tts-mini-v1">Mini v1</a> 和 <a href="https://huggingface.co/parler-tts/parler-tts-large-v1">Large v1</a>
        是使用 45000 小时的英语有声读物进行训练的。它可以生成高质量的语音,并且可以使用简单的文本提示来控制其功能(例如性别、背景噪音、语速、音调和混响)。</p>
chenpangpang's avatar
chenpangpang committed
153

154
        <p>默认情况下,Parler-TTS会生成 🎲随机语音。为了确保  🎯 <b>说话者在生成过程中的一致性</b>, 这些模型还针对34位说话者进行了训练,并以姓名为特征(例如 Jon、Lea、Gary、Jenna、Mike、Laura)。</p>
chenpangpang's avatar
chenpangpang committed
155
        
156
        <p>要利用此功能,只需调整文本描述以指定要使用的说话者: `Jon's voice is monotone...`</p>
chenpangpang's avatar
chenpangpang committed
157
158
159
160
        """
    )
    with gr.Row():
        with gr.Column():
161
162
            input_text = gr.Textbox(label="输入文本", lines=2, value=default_text, elem_id="input_text")
            description = gr.Textbox(label="描述", lines=2, value=default_description,
chenpangpang's avatar
chenpangpang committed
163
                                     elem_id="input_description")
164
165
166
            use_large = gr.Checkbox(value=False, label="使用Large模型",
                                    info="使用Parler-TTS Large v1生成效果更好;使用Mini v1生成速度更快")
            run_button = gr.Button("生成音频", variant="primary")
chenpangpang's avatar
chenpangpang committed
167
        with gr.Column():
168
            audio_out = gr.Audio(label="Parler-TTS生成", type="numpy", elem_id="audio_out")
chenpangpang's avatar
chenpangpang committed
169
170
171
172
173
174
175

    inputs = [input_text, description, use_large]
    outputs = [audio_out]
    run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True)
    gr.Examples(examples=examples, fn=gen_tts, inputs=inputs, outputs=outputs, cache_examples=True)
    gr.HTML(
        """
176
        <p>提示:
chenpangpang's avatar
chenpangpang committed
177
        <ul>
178
179
180
            <li>文本中包含"very clear audio"以生成最高质量的音频, 以及"very noisy audio"以产生高水平的背景噪音</li>
            <li>标点符号可用于控制代际韵律,例如可以使用逗号在语音中添加小停顿</li>
            <li>其余语音特征(性别、语速、音调和混响)可以通过文本直接控制</li>
chenpangpang's avatar
chenpangpang committed
181
182
183
        </ul>
        </p>

184
        <p>Parler-TTS可以更快。 在<a href="https://github.com/huggingface/parler-tts/blob/main/INFERENCE.md"> 推理指南</a>给出了一些快速生成的方法,包括SDPA, torch.compile, batching and streaming!</p>
chenpangpang's avatar
chenpangpang committed
185
        
186
187
        <p> 如果您想了解有关如何训练该模型的更多信息,甚至对其进行微调,请查看 GitHub 上
        <a href="https://github.com/huggingface/parler-tts"> Parler-TTS</a> repository</p>
chenpangpang's avatar
chenpangpang committed
188
189
190
191
192
193
        
        """
    )

block.queue()
block.launch(server_name='0.0.0.0', share=True)