modeling_parler_tts.py 173 KB
Newer Older
sanchit-gandhi's avatar
sanchit-gandhi committed
1
# coding=utf-8
2
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
sanchit-gandhi's avatar
sanchit-gandhi committed
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Yoach Lacombe's avatar
Yoach Lacombe committed
15
""" PyTorch ParlerTTS model."""
sanchit-gandhi's avatar
sanchit-gandhi committed
16
17
18
19
20
import copy
import inspect
import math
import random
from dataclasses import dataclass
eustlb's avatar
eustlb committed
21
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
sanchit-gandhi's avatar
sanchit-gandhi committed
22
23
24

import torch
import torch.nn as nn
eustlb's avatar
eustlb committed
25
import torch.nn.functional as F
sanchit-gandhi's avatar
sanchit-gandhi committed
26
from torch.nn import CrossEntropyLoss
27
from transformers import AutoConfig, AutoModel, AutoModelForTextEncoding
sanchit-gandhi's avatar
sanchit-gandhi committed
28
from transformers.activations import ACT2FN
eustlb's avatar
eustlb committed
29
30
31
32
33
34
35
from transformers.cache_utils import (
    Cache,
    DynamicCache,
    EncoderDecoderCache,
    SlidingWindowCache,
    StaticCache,
)
sanchit-gandhi's avatar
sanchit-gandhi committed
36
from transformers.generation.configuration_utils import GenerationConfig
eustlb's avatar
eustlb committed
37
from transformers.generation.logits_process import LogitsProcessorList
sanchit-gandhi's avatar
sanchit-gandhi committed
38
from transformers.generation.stopping_criteria import StoppingCriteriaList
39
from transformers.modeling_attn_mask_utils import (
eustlb's avatar
eustlb committed
40
    AttentionMaskConverter,
41
42
43
    _prepare_4d_attention_mask,
    _prepare_4d_attention_mask_for_sdpa,
)
sanchit-gandhi's avatar
sanchit-gandhi committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
from transformers.modeling_outputs import (
    BaseModelOutput,
    BaseModelOutputWithPastAndCrossAttentions,
    CausalLMOutputWithCrossAttentions,
    ModelOutput,
    Seq2SeqLMOutput,
)
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import (
    add_start_docstrings,
    add_start_docstrings_to_model_forward,
    logging,
    replace_return_docstrings,
)
eustlb's avatar
eustlb committed
58
from transformers.utils.import_utils import is_flash_attn_2_available, is_flash_attn_greater_or_equal_2_10
sanchit-gandhi's avatar
style  
sanchit-gandhi committed
59

Yoach Lacombe's avatar
Yoach Lacombe committed
60
from .configuration_parler_tts import ParlerTTSConfig, ParlerTTSDecoderConfig
Yoach Lacombe's avatar
Yoach Lacombe committed
61
from .dac_wrapper import DACConfig, DACModel
eustlb's avatar
eustlb committed
62

Yoach Lacombe's avatar
Yoach Lacombe committed
63
64
65

AutoConfig.register("dac", DACConfig)
AutoModel.register(DACConfig, DACModel)
sanchit-gandhi's avatar
sanchit-gandhi committed
66
67
68
69
70
71

if TYPE_CHECKING:
    from transformers.generation.streamers import BaseStreamer

logger = logging.get_logger(__name__)

72
73
74
75
76
77
78

if is_flash_attn_2_available():
    from flash_attn import flash_attn_func, flash_attn_varlen_func
    from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input  # noqa
else:
    logger.warn("Flash attention 2 is not installed")

Yoach Lacombe's avatar
Yoach Lacombe committed
79
80
_CONFIG_FOR_DOC = "ParlerTTSConfig"
_CHECKPOINT_FOR_DOC = "facebook/parler_tts-small"
sanchit-gandhi's avatar
sanchit-gandhi committed
81
82

MUSICGEN_PRETRAINED_MODEL_ARCHIVE_LIST = [
Yoach Lacombe's avatar
Yoach Lacombe committed
83
84
    "facebook/parler_tts-small",
    # See all ParlerTTS models at https://huggingface.co/models?filter=parler_tts
sanchit-gandhi's avatar
sanchit-gandhi committed
85
86
]

Yoach Lacombe's avatar
Yoach Lacombe committed
87

eustlb's avatar
eustlb committed
88
89
90
NEED_SETUP_CACHE_CLASSES_MAPPING = {"static": StaticCache, "sliding_window": SlidingWindowCache}


91
92
93
94
95
96
97
98
def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask):
    """Apply a delay pattern mask to the decoder input ids, only preserving predictions where
    the mask is set to -1, and otherwise setting to the value detailed in the mask."""
    seq_len = input_ids.shape[-1]
    decoder_pad_token_mask = decoder_pad_token_mask[..., :seq_len]
    input_ids = torch.where(decoder_pad_token_mask == -1, input_ids, decoder_pad_token_mask)
    return input_ids

Yoach Lacombe's avatar
Yoach Lacombe committed
99
100
101
102

def build_delay_pattern_mask(
    input_ids: torch.LongTensor, bos_token_id: int, pad_token_id: int, max_length: int, num_codebooks: int
):
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
    """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by
    one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
    are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks,
    seq_len)`:
    - [B, -1, -1, -1, -1, P, P, P]
    - [B, B, -1, -1, -1, -1, P, P]
    - [B, B, B, -1, -1, -1, -1, P]
    - [B, B, B, B, -1, -1, -1, -1]
    where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
    a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
    mask is set to the value in the prompt:
    - [B, a, b, -1, -1, P, P, P]
    - [B, B, c, d, -1, -1, P, P]
    - [B, B, B, e, f, -1, -1, P]
    - [B, B, B, B, g, h, -1, -1]
    where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1
    tokens in our prediction.
    """
    # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
    input_ids = input_ids.reshape(-1, num_codebooks, input_ids.shape[-1])
    bsz, num_codebooks, seq_len = input_ids.shape

Yoach Lacombe's avatar
Yoach Lacombe committed
125
    input_ids_shifted = torch.ones((bsz, num_codebooks, max_length), dtype=torch.long, device=input_ids.device) * -1
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146

    # we only apply the mask if we have a large enough seq len - otherwise we return as is
    if max_length < 2 * num_codebooks - 1:
        return input_ids.reshape(bsz * num_codebooks, -1), input_ids_shifted.reshape(bsz * num_codebooks, -1)

    # fill the shifted ids with the prompt entries, offset by the codebook idx
    for codebook in range(num_codebooks):
        # mono channel - loop over the codebooks one-by-one
        input_ids_shifted[:, codebook, codebook : seq_len + codebook] = input_ids[:, codebook]

    # construct a pattern mask that indicates the positions of padding tokens for each codebook
    # first fill the upper triangular part (the EOS padding)
    eos_delay_pattern = torch.triu(
        torch.ones((num_codebooks, max_length), dtype=torch.bool), diagonal=max_length - num_codebooks + 1
    )
    # then fill the lower triangular part (the BOS padding)
    bos_delay_pattern = torch.tril(torch.ones((num_codebooks, max_length), dtype=torch.bool))

    bos_mask = ~(bos_delay_pattern).to(input_ids.device)
    eos_mask = ~(eos_delay_pattern).to(input_ids.device)
    mask = ~(bos_delay_pattern + eos_delay_pattern).to(input_ids.device)
Yoach Lacombe's avatar
Yoach Lacombe committed
147
    input_ids = mask * input_ids_shifted + ~bos_mask * bos_token_id + ~eos_mask * pad_token_id
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162

    # find the first position to start generating - this is the first place we have the -1 token
    # and will always be in the first codebook (since it has no codebook offset)
    first_codebook_ids = input_ids[:, 0, :]
    start_ids = (first_codebook_ids == -1).nonzero()[:, 1]
    if len(start_ids) > 0:
        first_start_id = min(start_ids)
    else:
        # we have no tokens that need to be filled - return entire matrix of input ids
        first_start_id = seq_len

    # (bsz * num_codebooks, seq_len) -> (bsz, num_codebooks, seq_len)
    pattern_mask = input_ids.reshape(bsz * num_codebooks, -1)
    input_ids = input_ids[..., :first_start_id].reshape(bsz * num_codebooks, -1)
    return input_ids, pattern_mask
sanchit-gandhi's avatar
sanchit-gandhi committed
163

Yoach Lacombe's avatar
Yoach Lacombe committed
164

165
166
167
168
169
170
171
172
173
174
175
176
177
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
    """
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    """
    batch, num_key_value_heads, slen, head_dim = hidden_states.shape
    if n_rep == 1:
        return hidden_states
    hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)


sanchit-gandhi's avatar
sanchit-gandhi committed
178
@dataclass
Yoach Lacombe's avatar
Yoach Lacombe committed
179
class ParlerTTSUnconditionalInput(ModelOutput):
sanchit-gandhi's avatar
sanchit-gandhi committed
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
    """
    Args:
        encoder_outputs  (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the text encoder model.
        attention_mask (`torch.LongTensor`)  of shape `(batch_size, sequence_length)`, *optional*):
            Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,
            1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.
    """

    encoder_outputs: Tuple[torch.FloatTensor] = None
    attention_mask: torch.LongTensor = None


# Copied from transformers.models.encoder_decoder.modeling_encoder_decoder.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
    """
    Shift input ids one token to the right.
    """
    shifted_input_ids = input_ids.new_zeros(input_ids.shape)
    shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
    if decoder_start_token_id is None:
        raise ValueError("Make sure to set the decoder_start_token_id attribute of the model's configuration.")
    shifted_input_ids[:, 0] = decoder_start_token_id

    if pad_token_id is None:
        raise ValueError("Make sure to set the pad_token_id attribute of the model's configuration.")
    # replace possible -100 values in labels by `pad_token_id`
    shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)

    return shifted_input_ids


Yoach Lacombe's avatar
Yoach Lacombe committed
212
213
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenSinusoidalPositionalEmbedding with Musicgen->ParlerTTS
class ParlerTTSSinusoidalPositionalEmbedding(nn.Module):
sanchit-gandhi's avatar
sanchit-gandhi committed
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
    """This module produces sinusoidal positional embeddings of any length."""

    def __init__(self, num_positions: int, embedding_dim: int):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.make_weights(num_positions, embedding_dim)

    def make_weights(self, num_embeddings: int, embedding_dim: int):
        emb_weights = self.get_embedding(num_embeddings, embedding_dim)
        if hasattr(self, "weights"):
            # in forward put the weights on the correct dtype and device of the param
            emb_weights = emb_weights.to(dtype=self.weights.dtype, device=self.weights.device)

        self.weights = nn.Parameter(emb_weights)
        self.weights.requires_grad = False
        self.weights.detach_()

    @staticmethod
    def get_embedding(num_embeddings: int, embedding_dim: int):
        """
        Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
        description in Section 3.5 of "Attention Is All You Need".
        """
        half_dim = embedding_dim // 2
        emb = math.log(10000) / (half_dim - 1)
        emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
        emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
        emb = torch.cat([torch.cos(emb), torch.sin(emb)], dim=1).view(num_embeddings, -1)
        if embedding_dim % 2 == 1:
            # zero pad
            emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
        return emb.to(torch.get_default_dtype())

    @torch.no_grad()
    def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
249
        bsz, seq_len, _ = input_ids.size()
sanchit-gandhi's avatar
sanchit-gandhi committed
250
        # Create the position ids from the input token ids.
eustlb's avatar
eustlb committed
251
        position_ids = torch.arange(seq_len, device=input_ids.device) + past_key_values_length
sanchit-gandhi's avatar
sanchit-gandhi committed
252
253
254
255
256
257
        # expand embeddings if needed
        if seq_len > self.weights.size(0):
            self.make_weights(seq_len + self.offset, self.embedding_dim)
        return self.weights.index_select(0, position_ids.view(-1)).detach()


258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->ParlerTTS
class ParlerTTSRotaryEmbedding(nn.Module):
    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0):
        super().__init__()
        self.scaling_factor = scaling_factor
        self.dim = dim
        self.max_position_embeddings = max_position_embeddings
        self.base = base
        inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
        self.register_buffer("inv_freq", inv_freq, persistent=False)
        # For BC we register cos and sin cached
        self.max_seq_len_cached = max_position_embeddings
        t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
        t = t / self.scaling_factor
        freqs = torch.outer(t, self.inv_freq)
        # Different from paper, but it uses a different permutation in order to obtain the same calculation
        emb = torch.cat((freqs, freqs), dim=-1)
        self.register_buffer("_cos_cached", emb.cos().to(torch.get_default_dtype()), persistent=False)
        self.register_buffer("_sin_cached", emb.sin().to(torch.get_default_dtype()), persistent=False)

    # Ignore copy
    @torch.no_grad()
    def forward(self, device_type, position_ids):
        # x: [bs, num_attention_heads, seq_len, head_size]
        inv_freq_expanded = self.inv_freq[None, :, None].expand(position_ids.shape[0], -1, 1)
        position_ids_expanded = position_ids[:, None, :]
        # Force float32 since bfloat16 loses precision on long contexts
        # See https://github.com/huggingface/transformers/pull/29285
        device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
        with torch.autocast(device_type=device_type, enabled=False):
            freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
            emb = torch.cat((freqs, freqs), dim=-1)
            cos = emb.cos()
            sin = emb.sin()
        return cos, sin


def rotate_half(x):
    """Rotates half the hidden dims of the input."""
    x1 = x[..., : x.shape[-1] // 2]
    x2 = x[..., x.shape[-1] // 2 :]
    return torch.cat((-x2, x1), dim=-1)


def apply_rotary_pos_emb(x, cos, sin, unsqueeze_dim=1):
    """Applies Rotary Position Embedding to the query and key tensors.

    Args:
        x (`torch.Tensor`): The tensor over which to apply the rope embeddings
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
        sin (`torch.Tensor`): The sine part of the rotary embedding.
        unsqueeze_dim (`int`, *optional*, defaults to 1):
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
    Returns:
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
    """
    cos = cos.unsqueeze(unsqueeze_dim)
    sin = sin.unsqueeze(unsqueeze_dim)
    x_embed = (x * cos) + (rotate_half(x) * sin)
    return x_embed


Yoach Lacombe's avatar
Yoach Lacombe committed
325
class ParlerTTSAttention(nn.Module):
326
    """Multi-headed attention from 'Attention Is All You Need' paper. Modified to use GQA and MQA."""
sanchit-gandhi's avatar
sanchit-gandhi committed
327
328
329
330
331

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
332
        num_key_value_heads: int,
sanchit-gandhi's avatar
sanchit-gandhi committed
333
334
335
336
        dropout: float = 0.0,
        is_decoder: bool = False,
        bias: bool = True,
        is_causal: bool = False,
337
        rope_embeddings: bool = False,
eustlb's avatar
eustlb committed
338
        layer_idx: Optional[int] = None,
339
        config: Optional[ParlerTTSDecoderConfig] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
340
341
342
343
344
345
    ):
        super().__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
346
347
        self.num_key_value_heads = num_key_value_heads
        self.num_key_value_groups = self.num_heads // self.num_key_value_heads
sanchit-gandhi's avatar
sanchit-gandhi committed
348
349
350
351
352
353
354
355
356
357
358
        self.config = config

        if (self.head_dim * num_heads) != self.embed_dim:
            raise ValueError(
                f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
                f" and `num_heads`: {num_heads})."
            )
        self.scaling = self.head_dim**-0.5
        self.is_decoder = is_decoder
        self.is_causal = is_causal

eustlb's avatar
eustlb committed
359
360
361
362
363
364
365
366
        if layer_idx is None and is_decoder:
            logger.warning_once(
                f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
                "will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
                "when creating this class."
            )
        self.layer_idx = layer_idx

367
368
        self.k_proj = nn.Linear(embed_dim, self.num_key_value_heads * self.head_dim, bias=bias)
        self.v_proj = nn.Linear(embed_dim, self.num_key_value_heads * self.head_dim, bias=bias)
sanchit-gandhi's avatar
sanchit-gandhi committed
369
370
371
        self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
        self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)

372
373
374
        self.rope_embeddings = rope_embeddings

    def _shape_query(self, tensor: torch.Tensor, seq_len: int, bsz: int):
sanchit-gandhi's avatar
sanchit-gandhi committed
375
376
        return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()

377
378
379
    def _shape_key_value(self, tensor: torch.Tensor, seq_len: int, bsz: int):
        return tensor.view(bsz, seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2).contiguous()

sanchit-gandhi's avatar
sanchit-gandhi committed
380
381
382
383
    def forward(
        self,
        hidden_states: torch.Tensor,
        key_value_states: Optional[torch.Tensor] = None,
eustlb's avatar
eustlb committed
384
        past_key_value: Optional[EncoderDecoderCache] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
385
        attention_mask: Optional[torch.Tensor] = None,
386
387
        cos: Optional[torch.LongTensor] = None,
        sin: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
388
389
        layer_head_mask: Optional[torch.Tensor] = None,
        output_attentions: bool = False,
eustlb's avatar
eustlb committed
390
        cache_position: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
391
392
393
394
395
396
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        """Input shape: Batch x Time x Channel"""

        # if key_value_states are provided this layer is used as a cross-attention layer
        # for the decoder
        is_cross_attention = key_value_states is not None
397
        bsz, tgt_len = hidden_states.shape[:2]
sanchit-gandhi's avatar
sanchit-gandhi committed
398
399
400

        # get query proj
        query_states = self.q_proj(hidden_states) * self.scaling
401
402
403
404
        query_states = self._shape_query(query_states, tgt_len, bsz)
        if self.rope_embeddings:
            query_states = apply_rotary_pos_emb(query_states, cos, sin)

eustlb's avatar
eustlb committed
405
406
407
408
409
410
411
412
        if past_key_value is not None:
            is_updated = past_key_value.is_updated.get(self.layer_idx)
            if is_cross_attention:
                # after the first generated id, we can subsequently re-use all key/value_states from cache
                past_key_value.is_updated[self.layer_idx] = True
                past_key_value = past_key_value.cross_attention_cache
            else:
                past_key_value = past_key_value.self_attention_cache
413

eustlb's avatar
eustlb committed
414
415
416
        # use key_value_states if cross attention
        current_states = key_value_states if key_value_states is not None else hidden_states
        if is_cross_attention and past_key_value and is_updated:
sanchit-gandhi's avatar
sanchit-gandhi committed
417
            # reuse k,v, cross_attentions
eustlb's avatar
eustlb committed
418
419
            key_states = past_key_value.key_cache[self.layer_idx]
            value_states = past_key_value.value_cache[self.layer_idx]
sanchit-gandhi's avatar
sanchit-gandhi committed
420
        else:
421
422
423
424
425
426
427
            key_states = self._shape_key_value(self.k_proj(current_states), -1, bsz)
            value_states = self._shape_key_value(self.v_proj(current_states), -1, bsz)

            if not is_cross_attention:
                # cached key states already have rope applied - only apply to new state
                key_states = apply_rotary_pos_emb(key_states, cos, sin) if self.rope_embeddings else key_states

eustlb's avatar
eustlb committed
428
429
430
431
432
433
            if past_key_value is not None:
                # save all key/value_states to cache to be re-used for fast auto-regressive generation
                cache_position = cache_position if not is_cross_attention else None
                key_states, value_states = past_key_value.update(
                    key_states, value_states, self.layer_idx, {"cache_position": cache_position}
                )
sanchit-gandhi's avatar
sanchit-gandhi committed
434

435
436
437
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)

eustlb's avatar
eustlb committed
438
        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
sanchit-gandhi's avatar
sanchit-gandhi committed
439

eustlb's avatar
eustlb committed
440
441
442
        if attention_mask is not None:  # no matter the length, we just slice it
            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
            attn_weights = attn_weights + causal_mask
sanchit-gandhi's avatar
sanchit-gandhi committed
443
444
445
446
447
448
449
450
451

        attn_weights = nn.functional.softmax(attn_weights, dim=-1)

        if layer_head_mask is not None:
            if layer_head_mask.size() != (self.num_heads,):
                raise ValueError(
                    f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
                    f" {layer_head_mask.size()}"
                )
eustlb's avatar
eustlb committed
452
            attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights
sanchit-gandhi's avatar
sanchit-gandhi committed
453
454

        attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
eustlb's avatar
eustlb committed
455
        attn_output = torch.matmul(attn_probs, value_states)
sanchit-gandhi's avatar
sanchit-gandhi committed
456

eustlb's avatar
eustlb committed
457
        if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
sanchit-gandhi's avatar
sanchit-gandhi committed
458
            raise ValueError(
eustlb's avatar
eustlb committed
459
                f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
sanchit-gandhi's avatar
sanchit-gandhi committed
460
461
462
463
464
465
466
467
468
469
                f" {attn_output.size()}"
            )

        attn_output = attn_output.transpose(1, 2)
        # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
        # partitioned across GPUs when using tensor-parallelism.
        attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)

        attn_output = self.out_proj(attn_output)

eustlb's avatar
eustlb committed
470
        return attn_output, attn_weights, past_key_value
sanchit-gandhi's avatar
sanchit-gandhi committed
471
472


473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
def _get_unpad_data(attention_mask):
    seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
    indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
    max_seqlen_in_batch = seqlens_in_batch.max().item()
    cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
    return (
        indices,
        cu_seqlens,
        max_seqlen_in_batch,
    )


# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenFlashAttention2 with Musicgen->ParlerTTS
class ParlerTTSFlashAttention2(ParlerTTSAttention):
    """
    ParlerTTS flash attention module. This module inherits from `ParlerTTSAttention` as the weights of the module stays
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
    flash attention and deal with padding tokens in case the input contains any of them.
    """

    # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()

    def forward(
        self,
        hidden_states: torch.Tensor,
        key_value_states: Optional[torch.Tensor] = None,
eustlb's avatar
eustlb committed
506
        past_key_value: Optional[EncoderDecoderCache] = None,
507
508
509
510
511
        attention_mask: Optional[torch.Tensor] = None,
        cos: Optional[torch.LongTensor] = None,
        sin: Optional[torch.LongTensor] = None,
        layer_head_mask: Optional[torch.Tensor] = None,
        output_attentions: bool = False,
eustlb's avatar
eustlb committed
512
        cache_position: Optional[torch.LongTensor] = None,
513
514
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        # ParlerTTSFlashAttention2 attention does not support output_attentions
eustlb's avatar
eustlb committed
515
516
517
518
519
        if isinstance(past_key_value, StaticCache):
            raise ValueError(
                "The `static` cache implementation is not compatible with `attn_implementation='flash_attention_2'`. "
                "Use `attn_implementation='sdpa'` in the meantime, and open an issue at https://github.com/huggingface/transformers"
            )
520
521
522
523
        # if key_value_states are provided this layer is used as a cross-attention layer
        # for the decoder
        is_cross_attention = key_value_states is not None

eustlb's avatar
eustlb committed
524
        bsz, tgt_len = hidden_states.shape[:2]
525
526

        # get query proj
eustlb's avatar
eustlb committed
527
        query_states = self.q_proj(hidden_states).view(bsz, tgt_len, self.num_heads, self.head_dim)
528
529
530
531

        if self.rope_embeddings:
            query_states = apply_rotary_pos_emb(query_states, cos, sin, unsqueeze_dim=2)

eustlb's avatar
eustlb committed
532
533
534
535
536
537
538
539
        if past_key_value is not None:
            is_updated = past_key_value.is_updated.get(self.layer_idx)
            if is_cross_attention:
                # after the first generated id, we can subsequently re-use all key/value_states from cache
                past_key_value.is_updated[self.layer_idx] = True
                past_key_value = past_key_value.cross_attention_cache
            else:
                past_key_value = past_key_value.self_attention_cache
540

eustlb's avatar
eustlb committed
541
542
543
        # use key_value_states if cross attention
        current_states = key_value_states if key_value_states is not None else hidden_states
        if is_cross_attention and past_key_value and is_updated:
544
            # reuse k,v, cross_attentions
eustlb's avatar
eustlb committed
545
546
            key_states = past_key_value.key_cache[self.layer_idx]
            value_states = past_key_value.value_cache[self.layer_idx]
547
        else:
eustlb's avatar
eustlb committed
548
549
            key_states = self._shape_key_value(self.k_proj(current_states), -1, bsz)
            value_states = self._shape_key_value(self.v_proj(current_states), -1, bsz)
550

eustlb's avatar
eustlb committed
551
            if not is_cross_attention and self.rope_embeddings:
552
                # cached key states already have rope applied - only apply to new state
eustlb's avatar
eustlb committed
553
                key_states = apply_rotary_pos_emb(key_states, cos, sin)
554

eustlb's avatar
eustlb committed
555
556
557
558
559
560
            if past_key_value is not None:
                # save all key/value_states to cache to be re-used for fast auto-regressive generation
                cache_position = cache_position if not is_cross_attention else None
                key_states, value_states = past_key_value.update(
                    key_states, value_states, self.layer_idx, {"cache_position": cache_position}
                )
561

eustlb's avatar
eustlb committed
562
563
564
565
        # # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]
        # #  We would need to refactor the KV cache to be able to avoid many of these transpose/reshape/view.
        key_states = key_states.transpose(1, 2)
        value_states = value_states.transpose(1, 2)
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592

        # In PEFT, usually we cast the layer norms in float32 for training stability reasons
        # therefore the input hidden states gets silently casted in float32. Hence, we need
        # cast them back in the correct dtype just to be sure everything works as expected.
        # This might slowdown training & inference so it is recommended to not cast the LayerNorms
        # in fp32. (LlamaRMSNorm handles it correctly)

        if query_states.dtype == torch.float32 or value_states.dtype == torch.float32:
            if torch.is_autocast_enabled():
                target_dtype = torch.get_autocast_gpu_dtype()
            # Handle the case where the model is quantized
            elif hasattr(self.config, "_pre_quantization_dtype"):
                target_dtype = self.config._pre_quantization_dtype
            else:
                target_dtype = self.q_proj.weight.dtype

            logger.warning_once(
                f"The input hidden states seems to be silently casted in float32, this might be related to"
                f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
                f" {target_dtype}."
            )

            query_states = query_states.to(target_dtype)
            key_states = key_states.to(target_dtype)
            value_states = value_states.to(target_dtype)

        attn_output = self._flash_attention_forward(
eustlb's avatar
eustlb committed
593
            query_states, key_states, value_states, attention_mask, tgt_len, dropout=self.dropout
594
595
        )

eustlb's avatar
eustlb committed
596
        attn_output = attn_output.reshape(bsz, tgt_len, -1)
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
        attn_output = self.out_proj(attn_output)

        if not output_attentions:
            attn_weights = None

        return attn_output, attn_weights, past_key_value

    # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
    def _flash_attention_forward(
        self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
    ):
        """
        Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
        first unpad the input, then computes the attention scores and pad the final attention scores.
        Args:
            query_states (`torch.Tensor`):
                Input query states to be passed to Flash Attention API
            key_states (`torch.Tensor`):
                Input key states to be passed to Flash Attention API
            value_states (`torch.Tensor`):
                Input value states to be passed to Flash Attention API
            attention_mask (`torch.Tensor`):
                The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
                position of padding tokens and 1 for the position of non-padding tokens.
            dropout (`float`):
                Attention dropout
            softmax_scale (`float`, *optional*):
                The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
        """
        if not self._flash_attn_uses_top_left_mask:
            causal = self.is_causal
        else:
            # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
            causal = self.is_causal and query_length != 1

        # Contains at least one padding token in the sequence
        if attention_mask is not None:
            batch_size = query_states.shape[0]
            query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
                query_states, key_states, value_states, attention_mask, query_length
            )

            cu_seqlens_q, cu_seqlens_k = cu_seq_lens
            max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens

            attn_output_unpad = flash_attn_varlen_func(
                query_states,
                key_states,
                value_states,
                cu_seqlens_q=cu_seqlens_q,
                cu_seqlens_k=cu_seqlens_k,
                max_seqlen_q=max_seqlen_in_batch_q,
                max_seqlen_k=max_seqlen_in_batch_k,
                dropout_p=dropout,
                softmax_scale=softmax_scale,
                causal=causal,
            )

            attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
        else:
            attn_output = flash_attn_func(
                query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
            )

        return attn_output

    # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
    def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
        indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
        batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape

        key_layer = index_first_axis(
            key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
        )
        value_layer = index_first_axis(
            value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
        )
        if query_length == kv_seq_len:
            query_layer = index_first_axis(
                query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
            )
            cu_seqlens_q = cu_seqlens_k
            max_seqlen_in_batch_q = max_seqlen_in_batch_k
            indices_q = indices_k
        elif query_length == 1:
            max_seqlen_in_batch_q = 1
            cu_seqlens_q = torch.arange(
                batch_size + 1, dtype=torch.int32, device=query_layer.device
            )  # There is a memcpy here, that is very bad.
            indices_q = cu_seqlens_q[:-1]
            query_layer = query_layer.squeeze(1)
        else:
            # The -q_len: slice assumes left padding.
            attention_mask = attention_mask[:, -query_length:]
            query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)

        return (
            query_layer,
            key_layer,
            value_layer,
            indices_q,
            (cu_seqlens_q, cu_seqlens_k),
            (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
        )


# Copied from transformers.models.bart.modeling_bart.BartSdpaAttention with Bart->Musicgen
class ParlerTTSSdpaAttention(ParlerTTSAttention):
    def forward(
        self,
        hidden_states: torch.Tensor,
        key_value_states: Optional[torch.Tensor] = None,
eustlb's avatar
eustlb committed
709
        past_key_value: Optional[EncoderDecoderCache] = None,
710
711
712
713
714
        attention_mask: Optional[torch.Tensor] = None,
        cos: Optional[torch.LongTensor] = None,
        sin: Optional[torch.LongTensor] = None,
        layer_head_mask: Optional[torch.Tensor] = None,
        output_attentions: bool = False,
eustlb's avatar
eustlb committed
715
        cache_position: Optional[torch.LongTensor] = None,
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        """Input shape: Batch x Time x Channel"""
        if output_attentions or layer_head_mask is not None:
            # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
            logger.warning_once(
                "ParlerTTSModel is using ParlerTTSSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
                ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
            )
            return super().forward(
                hidden_states,
                key_value_states=key_value_states,
                past_key_value=past_key_value,
                attention_mask=attention_mask,
                layer_head_mask=layer_head_mask,
                output_attentions=output_attentions,
eustlb's avatar
eustlb committed
731
                cache_position=cache_position,
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
            )

        # if key_value_states are provided this layer is used as a cross-attention layer
        # for the decoder
        is_cross_attention = key_value_states is not None

        bsz, tgt_len = hidden_states.shape[:2]

        # get query proj
        query_states = self.q_proj(hidden_states)
        query_states = self._shape_query(query_states, tgt_len, bsz)

        if self.rope_embeddings:
            query_states = apply_rotary_pos_emb(query_states, cos, sin)

eustlb's avatar
eustlb committed
747
748
749
750
751
752
753
754
        if past_key_value is not None:
            is_updated = past_key_value.is_updated.get(self.layer_idx)
            if is_cross_attention:
                # after the first generated id, we can subsequently re-use all key/value_states from cache
                past_key_value.is_updated[self.layer_idx] = True
                past_key_value = past_key_value.cross_attention_cache
            else:
                past_key_value = past_key_value.self_attention_cache
755

eustlb's avatar
eustlb committed
756
757
758
        # use key_value_states if cross attention
        current_states = key_value_states if key_value_states is not None else hidden_states
        if is_cross_attention and past_key_value and is_updated:
759
            # reuse k,v, cross_attentions
eustlb's avatar
eustlb committed
760
761
            key_states = past_key_value.key_cache[self.layer_idx]
            value_states = past_key_value.value_cache[self.layer_idx]
762
763
764
765
        else:
            key_states = self._shape_key_value(self.k_proj(current_states), -1, bsz)
            value_states = self._shape_key_value(self.v_proj(current_states), -1, bsz)

eustlb's avatar
eustlb committed
766
            if not is_cross_attention and self.rope_embeddings:
767
                # cached key states already have rope applied - only apply to new state
eustlb's avatar
eustlb committed
768
                key_states = apply_rotary_pos_emb(key_states, cos, sin)
769

eustlb's avatar
eustlb committed
770
771
772
773
774
775
            if past_key_value is not None:
                # save all key/value_states to cache to be re-used for fast auto-regressive generation
                cache_position = cache_position if not is_cross_attention else None
                key_states, value_states = past_key_value.update(
                    key_states, value_states, self.layer_idx, {"cache_position": cache_position}
                )
776

eustlb's avatar
eustlb committed
777
778
779
        causal_mask = attention_mask
        if attention_mask is not None:  # no matter the length, we just slice it
            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
780
781
782
783
784
785
786
787

        # repeat k/v heads if n_kv_heads < n_heads
        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)

        # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
        # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
        # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
eustlb's avatar
eustlb committed
788
        is_causal = True if self.is_causal and causal_mask is None and tgt_len > 1 else False
789
790
791
792
793
794
795

        # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
        # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
        attn_output = torch.nn.functional.scaled_dot_product_attention(
            query_states,
            key_states,
            value_states,
eustlb's avatar
eustlb committed
796
            attn_mask=causal_mask,
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
            dropout_p=self.dropout if self.training else 0.0,
            # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
            is_causal=is_causal,
        )

        if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
            raise ValueError(
                f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
                f" {attn_output.size()}"
            )

        attn_output = attn_output.transpose(1, 2)

        # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
        # partitioned across GPUs when using tensor-parallelism.
        attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)

        attn_output = self.out_proj(attn_output)

        return attn_output, None, past_key_value


PARLERTTS_ATTENTION_CLASSES = {
    "eager": ParlerTTSAttention,
    "sdpa": ParlerTTSSdpaAttention,
    "flash_attention_2": ParlerTTSFlashAttention2,
}


Yoach Lacombe's avatar
Yoach Lacombe committed
826
class ParlerTTSDecoderLayer(nn.Module):
eustlb's avatar
eustlb committed
827
    def __init__(self, config: ParlerTTSDecoderConfig, layer_idx: int = None):
sanchit-gandhi's avatar
sanchit-gandhi committed
828
829
830
        super().__init__()
        self.embed_dim = config.hidden_size

831
        self.self_attn = PARLERTTS_ATTENTION_CLASSES[config._attn_implementation](
sanchit-gandhi's avatar
sanchit-gandhi committed
832
833
            embed_dim=self.embed_dim,
            num_heads=config.num_attention_heads,
834
            num_key_value_heads=config.num_key_value_heads,
sanchit-gandhi's avatar
sanchit-gandhi committed
835
836
            dropout=config.attention_dropout,
            is_decoder=True,
837
            is_causal=True,
sanchit-gandhi's avatar
sanchit-gandhi committed
838
            bias=False,
839
            rope_embeddings=config.rope_embeddings,
eustlb's avatar
eustlb committed
840
            layer_idx=layer_idx,
841
            config=config,
sanchit-gandhi's avatar
sanchit-gandhi committed
842
843
844
845
846
847
        )
        self.dropout = config.dropout
        self.activation_fn = ACT2FN[config.activation_function]
        self.activation_dropout = config.activation_dropout

        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
848
849
850
851
852
853
        cross_attn_implementation = config._attn_implementation
        if config.cross_attention_implementation_strategy == "always_eager":
            cross_attn_implementation = "eager"
        elif config.cross_attention_implementation_strategy == "always_sdpa":
            cross_attn_implementation = "sdpa"
        self.encoder_attn = PARLERTTS_ATTENTION_CLASSES[cross_attn_implementation](
sanchit-gandhi's avatar
sanchit-gandhi committed
854
855
            self.embed_dim,
            config.num_attention_heads,
856
            num_key_value_heads=config.num_cross_attention_key_value_heads,
sanchit-gandhi's avatar
sanchit-gandhi committed
857
858
859
            dropout=config.attention_dropout,
            is_decoder=True,
            bias=False,
860
            rope_embeddings=config.rope_embeddings,
eustlb's avatar
eustlb committed
861
            layer_idx=layer_idx,
862
            config=config,
sanchit-gandhi's avatar
sanchit-gandhi committed
863
864
865
866
867
868
869
870
871
872
        )
        self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim, bias=False)
        self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim, bias=False)
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
873
874
        cos: Optional[torch.LongTensor] = None,
        sin: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
875
876
877
878
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
        layer_head_mask: Optional[torch.Tensor] = None,
        cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
eustlb's avatar
eustlb committed
879
        past_key_value: Optional[EncoderDecoderCache] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
880
881
        output_attentions: Optional[bool] = False,
        use_cache: Optional[bool] = True,
eustlb's avatar
eustlb committed
882
        cache_position: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
883
884
885
886
887
888
    ) -> torch.Tensor:
        """
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
889
890
891
            position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
                config.n_positions - 1]`.
sanchit-gandhi's avatar
sanchit-gandhi committed
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        """
        residual = hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)

        # Self Attention
        hidden_states, self_attn_weights, present_key_value = self.self_attn(
            hidden_states=hidden_states,
eustlb's avatar
eustlb committed
911
            past_key_value=past_key_value,
sanchit-gandhi's avatar
sanchit-gandhi committed
912
            attention_mask=attention_mask,
913
914
            cos=cos,
            sin=sin,
sanchit-gandhi's avatar
sanchit-gandhi committed
915
916
            layer_head_mask=layer_head_mask,
            output_attentions=output_attentions,
eustlb's avatar
eustlb committed
917
            cache_position=cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
918
919
920
921
922
923
924
925
926
927
928
929
930
        )
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states

        # Cross-Attention Block
        cross_attn_weights = None
        if encoder_hidden_states is not None:
            residual = hidden_states
            hidden_states = self.encoder_attn_layer_norm(hidden_states)
            hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
                hidden_states=hidden_states,
                key_value_states=encoder_hidden_states,
                attention_mask=encoder_attention_mask,
931
932
                cos=cos,
                sin=sin,
sanchit-gandhi's avatar
sanchit-gandhi committed
933
                layer_head_mask=cross_attn_layer_head_mask,
eustlb's avatar
eustlb committed
934
                past_key_value=past_key_value,
sanchit-gandhi's avatar
sanchit-gandhi committed
935
936
937
938
939
                output_attentions=output_attentions,
            )
            hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
            hidden_states = residual + hidden_states

eustlb's avatar
eustlb committed
940
941
            # add cross-attn to positions 1 of present_key_value tuple
            present_key_value = (present_key_value, cross_attn_present_key_value)
sanchit-gandhi's avatar
sanchit-gandhi committed
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962

        # Fully Connected
        residual = hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        hidden_states = self.activation_fn(self.fc1(hidden_states))
        hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
        hidden_states = self.fc2(hidden_states)
        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
        hidden_states = residual + hidden_states

        outputs = (hidden_states,)

        if output_attentions:
            outputs += (self_attn_weights, cross_attn_weights)

        if use_cache:
            outputs += (present_key_value,)

        return outputs


Yoach Lacombe's avatar
Yoach Lacombe committed
963
964
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenPreTrainedModel with Musicgen->ParlerTTS
class ParlerTTSPreTrainedModel(PreTrainedModel):
sanchit-gandhi's avatar
sanchit-gandhi committed
965
966
967
968
969
    """
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    """

Yoach Lacombe's avatar
Yoach Lacombe committed
970
    config_class = ParlerTTSDecoderConfig
sanchit-gandhi's avatar
sanchit-gandhi committed
971
972
    base_model_prefix = "model"
    supports_gradient_checkpointing = True
973
974
    _supports_flash_attn_2 = True
    _supports_sdpa = True
Yoach Lacombe's avatar
Yoach Lacombe committed
975
    _no_split_modules = ["ParlerTTSDecoderLayer", "ParlerTTSAttention"]
eustlb's avatar
eustlb committed
976
977
    _supports_cache_class = True
    _supports_static_cache = True
sanchit-gandhi's avatar
sanchit-gandhi committed
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992

    def _init_weights(self, module):
        std = self.config.initializer_factor
        if isinstance(module, (nn.Linear, nn.Conv1d)):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()


MUSICGEN_START_DOCSTRING = r"""

Yoach Lacombe's avatar
Yoach Lacombe committed
993
    The ParlerTTS model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by
sanchit-gandhi's avatar
sanchit-gandhi committed
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
    Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez. It is an
    encoder decoder transformer trained on the task of conditional music generation

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
Yoach Lacombe's avatar
Yoach Lacombe committed
1006
        config ([`ParlerTTSConfig`]): Model configuration class with all the parameters of the model.
sanchit-gandhi's avatar
sanchit-gandhi committed
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""

MUSICGEN_INPUTS_DOCSTRING = r"""
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.

            Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
            such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            <Tip warning={true}>

            The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
            target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
            you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
            frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
            target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
            `decoder_input_ids`.

            </Tip>

        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
1073
            TODO: it's passed through enc_to_dec_proj and optionnally we concat the prompt hidden states in certain cases.
eustlb's avatar
eustlb committed
1074
1075
1076
1077
1078
1079
1080
1081
1082
        past_key_values (`EncoderDecoderCache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
            Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are
            four sets of pre-computed hidden-states: key and values states in the self-attention blocks (2) and
            in the cross-attention blocks (2). The `past_key_values` are returned when `use_cache=True` is passed or
            when `config.use_cache=True`

            Two formats are allowed:
            - An [`~cache_utils.EncoderDecoderCache`] instance;
            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
sanchit-gandhi's avatar
sanchit-gandhi committed
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
            input (see `past_key_values`). This is useful if you want more control over how to convert
            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.

            If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
            of `inputs_embeds`.
Yoach Lacombe's avatar
Yoach Lacombe committed
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
        prompt_input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input prompt sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        prompt_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding prompt token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        prompt_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `prompt_input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `prompt_input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
sanchit-gandhi's avatar
sanchit-gandhi committed
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
eustlb's avatar
eustlb committed
1131
1132
1133
        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
            Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache
            in the correct position and to infer the complete sequence length.
sanchit-gandhi's avatar
sanchit-gandhi committed
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
"""

MUSICGEN_DECODER_INPUTS_DOCSTRING = r"""
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`):
            Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.

            Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
            such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.

            [What are input IDs?](../glossary#input-ids)

            <Tip warning={true}>

            The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
            target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
            you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
            frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
            target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
            `input_ids`.

            </Tip>

        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
            the decoder.
        encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
            Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
            selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

Yoach Lacombe's avatar
Yoach Lacombe committed
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
            [What are attention masks?](../glossary#attention-mask)
        prompt_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
            Sequence of prompt hidden-states at the output of the initial embedding layer. Concatenated to the input embeds.
        prompt_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
            Mask to avoid performing cross-attention on padding tokens indices of prompt input_ids. Mask values
            selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

sanchit-gandhi's avatar
sanchit-gandhi committed
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
            [What are attention masks?](../glossary#attention-mask)
        head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
            cross-attention on hidden heads. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""


Yoach Lacombe's avatar
Yoach Lacombe committed
1224
class ParlerTTSDecoder(ParlerTTSPreTrainedModel):
sanchit-gandhi's avatar
sanchit-gandhi committed
1225
    """
Yoach Lacombe's avatar
Yoach Lacombe committed
1226
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ParlerTTSDecoderLayer`]
sanchit-gandhi's avatar
sanchit-gandhi committed
1227
1228
    """

Yoach Lacombe's avatar
Yoach Lacombe committed
1229
    def __init__(self, config: ParlerTTSDecoderConfig):
sanchit-gandhi's avatar
sanchit-gandhi committed
1230
1231
1232
1233
1234
1235
1236
1237
        super().__init__(config)
        self.dropout = config.dropout
        self.layerdrop = config.layerdrop
        self.max_target_positions = config.max_position_embeddings
        self.d_model = config.hidden_size
        self.num_codebooks = config.num_codebooks
        self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0

Yoach Lacombe's avatar
Yoach Lacombe committed
1238
        # TODO(YL): actually doesn't need the +1 if initialized correctly. Too late to change now.
Yoach Lacombe's avatar
Yoach Lacombe committed
1239
        embed_dim = config.vocab_size + 1  # + 1 for pad token id
sanchit-gandhi's avatar
sanchit-gandhi committed
1240
1241
1242
1243
        self.embed_tokens = nn.ModuleList(
            [nn.Embedding(embed_dim, config.hidden_size) for _ in range(config.num_codebooks)]
        )

1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
        self.rope_embeddings = config.rope_embeddings
        if not config.rope_embeddings:
            self.embed_positions = ParlerTTSSinusoidalPositionalEmbedding(
                config.max_position_embeddings,
                config.hidden_size,
            )
        else:
            self.rotary_emb = ParlerTTSRotaryEmbedding(
                config.hidden_size // config.num_attention_heads,
                max_position_embeddings=config.max_position_embeddings,
                base=config.rope_theta,
            )
eustlb's avatar
eustlb committed
1256
1257
1258
        self.layers = nn.ModuleList(
            [ParlerTTSDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
        )
sanchit-gandhi's avatar
sanchit-gandhi committed
1259
        self.layer_norm = nn.LayerNorm(config.hidden_size)
1260
1261
1262
1263
1264
1265
1266
        self.attn_implementation = config._attn_implementation
        encoder_attn_implementation = config._attn_implementation
        if config.cross_attention_implementation_strategy is not None:
            encoder_attn_implementation = (
                "sdpa" if config.cross_attention_implementation_strategy == "always_sdpa" else "eager"
            )
        self.encoder_attn_implementation = encoder_attn_implementation
sanchit-gandhi's avatar
sanchit-gandhi committed
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
        self.gradient_checkpointing = False
        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.embed_tokens

    def set_input_embeddings(self, value):
        self.embed_tokens = value

    @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.LongTensor] = None,
Yoach Lacombe's avatar
Yoach Lacombe committed
1284
1285
        prompt_hidden_states: Optional[torch.FloatTensor] = None,
        prompt_attention_mask: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1286
1287
1288
1289
        head_mask: Optional[torch.Tensor] = None,
        cross_attn_head_mask: Optional[torch.Tensor] = None,
        past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
1290
        position_ids: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1291
1292
1293
1294
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
eustlb's avatar
eustlb committed
1295
        cache_position=None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
    ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # retrieve input_ids and inputs_embeds
        if input_ids is not None and inputs_embeds is not None:
            raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
        elif input_ids is not None:
            # (bsz * codebooks, seq_len) -> (bsz, codebooks, seq_len)
            input = input_ids.reshape(-1, self.num_codebooks, input_ids.shape[-1])
            bsz, num_codebooks, seq_len = input.shape
            input_shape = (bsz, seq_len)
        elif inputs_embeds is not None:
            input_shape = inputs_embeds.size()[:-1]
            input = inputs_embeds[:, :, -1:]
        else:
            raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")

        if inputs_embeds is None:
            inputs_embeds = sum([self.embed_tokens[codebook](input[:, codebook]) for codebook in range(num_codebooks)])
Yoach Lacombe's avatar
Yoach Lacombe committed
1320

eustlb's avatar
eustlb committed
1321
        prepended_sequence_length = 0
1322
1323
        # if prompt_hidden_states, fuse to inputs_embeds and update input shape
        if prompt_hidden_states is not None:
eustlb's avatar
eustlb committed
1324
            prepended_sequence_length = prompt_hidden_states.shape[-2]
1325
            inputs_embeds = torch.cat([prompt_hidden_states, inputs_embeds], dim=1)
Yoach Lacombe's avatar
Yoach Lacombe committed
1326

eustlb's avatar
eustlb committed
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
        return_legacy_cache = False
        return_self_attention_cache = False
        if use_cache or past_key_values is not None:
            if isinstance(past_key_values, Cache) and not isinstance(past_key_values, EncoderDecoderCache):
                return_self_attention_cache = True
                past_key_values = EncoderDecoderCache(past_key_values, DynamicCache())
            elif not isinstance(past_key_values, EncoderDecoderCache):
                return_legacy_cache = True
                logger.warning_once(
                    "Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.43.0. "
                    "You should pass an instance of `EncoderDecoderCache` instead, e.g. "
                    "`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
                )
                past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)

        past_key_values_length = 0
        if cache_position is not None:
            past_key_values_length = cache_position[0]
        elif past_key_values is not None:
            past_key_values_length = past_key_values.get_seq_length()

        if cache_position is None:
            cache_position = torch.arange(
                past_key_values_length, past_key_values_length + input_shape[1] + prepended_sequence_length, device=inputs_embeds.device
            )

        if position_ids is None:
            position_ids = cache_position.unsqueeze(0)

1356
1357
1358
1359
        # NOTE: 1. As it is, the masked ids from the prompt will still count in the positions embeddings
        # NOTE: 2. we want to concatenate the prompt attention mask and the decoder attention mask
        # i.i.f `prompt_cross_attention=False`. ParlerTTSForConditionalGeneration's taking care of setting
        # `prompt_attention_mask=None`
1360
1361
1362
1363
1364
1365
        if prompt_attention_mask is not None and attention_mask is not None:
            attention_mask = torch.cat([prompt_attention_mask, attention_mask], dim=1)
        elif prompt_attention_mask is not None:
            logger.warning_once(
                "`prompt_attention_mask` is specified but `attention_mask` is not. A full `attention_mask` will be created. Make sure this is the intended behaviour."
            )
eustlb's avatar
eustlb committed
1366
            if past_key_values_length == 0:
Yoach Lacombe's avatar
Yoach Lacombe committed
1367
1368
1369
1370
1371
1372
1373
                attention_mask = torch.cat(
                    [
                        prompt_attention_mask,
                        torch.ones(input_shape, device=self.device, dtype=prompt_attention_mask.dtype),
                    ],
                    dim=1,
                )
1374
            else:
1375
1376
1377
                # In the generation case of `prompt_cross_attention=True`, we need to recreate an attention mask from scratch
                # to be able to prepend the prompt attention mask.
                # Since we generate token per token, we can recompute the generated length from the information we have.
Yoach Lacombe's avatar
Yoach Lacombe committed
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
                generated_length = past_key_values_length - prompt_attention_mask.shape[1] + 1
                attention_mask = torch.cat(
                    [
                        prompt_attention_mask,
                        torch.ones(
                            (input_shape[0], generated_length), device=self.device, dtype=prompt_attention_mask.dtype
                        ),
                    ],
                    dim=1,
                )

Yoach Lacombe's avatar
Yoach Lacombe committed
1389
        input_shape = inputs_embeds.size()[:-1]
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
        cos, sin = None, None

        if not self.rope_embeddings:
            # embed positions
            # TODO: As it is, the masked ids from the prompt will still count in the positions embeddings
            # maybe should modify position embeddings
            positions = self.embed_positions(inputs_embeds, past_key_values_length)
            hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
        else:
            hidden_states = inputs_embeds
sanchit-gandhi's avatar
sanchit-gandhi committed
1400

1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
            if position_ids is None:
                if attention_mask is not None:
                    # masked ids will **not** count in the position embeddings
                    position_ids = attention_mask.long().cumsum(-1) - 1
                    position_ids.masked_fill_(attention_mask == 0, 1)
                else:
                    position_ids = torch.arange(
                        past_key_values_length,
                        input_shape[1] + past_key_values_length,
                        dtype=torch.long,
                        device=inputs_embeds.device,
                    )
                    position_ids = position_ids.unsqueeze(0)
sanchit-gandhi's avatar
sanchit-gandhi committed
1414

1415
1416
1417
                # Some generation methods already pass only the last input ID
                if position_ids.shape[1] > input_shape[1]:
                    position_ids = position_ids[:, -input_shape[1] :]
sanchit-gandhi's avatar
sanchit-gandhi committed
1418

1419
1420
            cos, sin = self.rotary_emb(hidden_states.device.type, position_ids)
            cos, sin = cos.to(hidden_states.dtype), sin.to(hidden_states.dtype)
sanchit-gandhi's avatar
sanchit-gandhi committed
1421
1422
1423

        hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)

eustlb's avatar
eustlb committed
1424
1425
1426
1427
1428
1429
1430
        causal_mask = self._update_causal_mask(
            attention_mask,
            inputs_embeds,
            cache_position,
            past_key_values.self_attention_cache if past_key_values is not None else None,
            output_attentions,
        )
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449

        if encoder_hidden_states is not None and encoder_attention_mask is not None:
            if self.encoder_attn_implementation == "flash_attention_2":
                encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
            elif self.encoder_attn_implementation == "sdpa" and cross_attn_head_mask is None and not output_attentions:
                # output_attentions=True & cross_attn_head_mask can not be supported when using SDPA, and we fall back on
                # the manual implementation that requires a 4D causal mask in all cases.
                # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
                encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa(
                    encoder_attention_mask,
                    inputs_embeds.dtype,
                    tgt_len=input_shape[-1],
                )
            else:
                # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
                encoder_attention_mask = _prepare_4d_attention_mask(
                    encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
                )

sanchit-gandhi's avatar
sanchit-gandhi committed
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
        if self.gradient_checkpointing and self.training:
            if use_cache:
                logger.warning_once(
                    "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
                )
                use_cache = False
        # decoder layers
        all_hidden_states = () if output_hidden_states else None
        all_self_attns = () if output_attentions else None
        all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None

        # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
        for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
            if attn_mask is not None:
                if attn_mask.size()[0] != len(self.layers):
                    raise ValueError(
                        f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
                        f" {attn_mask.size()[0]}."
                    )
        for idx, decoder_layer in enumerate(self.layers):
            # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
            if output_hidden_states:
                all_hidden_states += (hidden_states,)
            dropout_probability = random.uniform(0, 1)
            if self.training and (dropout_probability < self.layerdrop):
                continue

            if self.gradient_checkpointing and self.training:
                layer_outputs = self._gradient_checkpointing_func(
                    decoder_layer.forward,
                    hidden_states,
eustlb's avatar
eustlb committed
1481
                    causal_mask,
1482
1483
                    cos,
                    sin,
sanchit-gandhi's avatar
sanchit-gandhi committed
1484
1485
1486
1487
1488
1489
1490
                    encoder_hidden_states,
                    encoder_attention_mask,
                    head_mask[idx] if head_mask is not None else None,
                    cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
                    None,
                    output_attentions,
                    use_cache,
eustlb's avatar
eustlb committed
1491
                    cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
1492
1493
1494
1495
                )
            else:
                layer_outputs = decoder_layer(
                    hidden_states,
eustlb's avatar
eustlb committed
1496
                    attention_mask=causal_mask,
1497
1498
                    cos=cos,
                    sin=sin,
sanchit-gandhi's avatar
sanchit-gandhi committed
1499
1500
1501
1502
1503
1504
                    encoder_hidden_states=encoder_hidden_states,
                    encoder_attention_mask=encoder_attention_mask,
                    layer_head_mask=(head_mask[idx] if head_mask is not None else None),
                    cross_attn_layer_head_mask=(
                        cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
                    ),
eustlb's avatar
eustlb committed
1505
                    past_key_value=past_key_values if use_cache else None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1506
1507
                    output_attentions=output_attentions,
                    use_cache=use_cache,
eustlb's avatar
eustlb committed
1508
                    cache_position=cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
                )
            hidden_states = layer_outputs[0]

            if output_attentions:
                all_self_attns += (layer_outputs[1],)

                if encoder_hidden_states is not None:
                    all_cross_attentions += (layer_outputs[2],)

        hidden_states = self.layer_norm(hidden_states)

        # add hidden states from the last decoder layer
        if output_hidden_states:
            all_hidden_states += (hidden_states,)

eustlb's avatar
eustlb committed
1524
1525
1526
1527
1528
        next_cache = past_key_values if use_cache else None
        if return_self_attention_cache:
            next_cache = past_key_values.self_attention_cache
        if return_legacy_cache:
            next_cache = past_key_values.to_legacy_cache()
sanchit-gandhi's avatar
sanchit-gandhi committed
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
        if not return_dict:
            return tuple(
                v
                for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
                if v is not None
            )
        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=hidden_states,
            past_key_values=next_cache,
            hidden_states=all_hidden_states,
            attentions=all_self_attns,
            cross_attentions=all_cross_attentions,
        )

eustlb's avatar
eustlb committed
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
    # Copied from transformers.models.llama.modeling_llama.LlamaModel._update_causal_mask
    def _update_causal_mask(
        self,
        attention_mask: torch.Tensor,
        input_tensor: torch.Tensor,
        cache_position: torch.Tensor,
        past_key_values: Cache,
        output_attentions: bool,
    ):
        # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
        # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
        # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
        # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114

        if self.config._attn_implementation == "flash_attention_2":
            if attention_mask is not None and 0.0 in attention_mask:
                return attention_mask
            return None

        # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
        # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
        # to infer the attention mask.
        past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
        using_static_cache = isinstance(past_key_values, StaticCache)

        # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
        if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
            if AttentionMaskConverter._ignore_causal_mask_sdpa(
                attention_mask,
                inputs_embeds=input_tensor,
                past_key_values_length=past_seen_tokens,
                is_training=self.training,
            ):
                return None

        dtype, device = input_tensor.dtype, input_tensor.device
        min_dtype = torch.finfo(dtype).min
        sequence_length = input_tensor.shape[1]
        if using_static_cache:
            target_length = past_key_values.get_max_length()
        else:
            target_length = (
                attention_mask.shape[-1]
                if isinstance(attention_mask, torch.Tensor)
                else past_seen_tokens + sequence_length + 1
            )

        if attention_mask is not None and attention_mask.dim() == 4:
            # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
            if attention_mask.max() != 0:
                raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
            causal_mask = attention_mask
        else:
            causal_mask = torch.full(
                (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
            )
            if sequence_length != 1:
                causal_mask = torch.triu(causal_mask, diagonal=1)
            causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
            causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
            if attention_mask is not None:
                causal_mask = causal_mask.clone()  # copy to contiguous memory for in-place edit
                mask_length = attention_mask.shape[-1]
                padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
                padding_mask = padding_mask == 0
                causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
                    padding_mask, min_dtype
                )
        if (
            self.config._attn_implementation == "sdpa"
            and attention_mask is not None
            and attention_mask.device.type == "cuda"
            and not output_attentions
        ):
            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
            # Details: https://github.com/pytorch/pytorch/issues/110213
            causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)

        return causal_mask

sanchit-gandhi's avatar
sanchit-gandhi committed
1624
1625

@add_start_docstrings(
Yoach Lacombe's avatar
Yoach Lacombe committed
1626
    "The bare ParlerTTS decoder model outputting raw hidden-states without any specific head on top.",
sanchit-gandhi's avatar
sanchit-gandhi committed
1627
1628
    MUSICGEN_START_DOCSTRING,
)
Yoach Lacombe's avatar
Yoach Lacombe committed
1629
1630
1631
# Copied from transformers.models.musicgen.modeling_musicgen.MusicgenModel with Musicgen->ParlerTTS
class ParlerTTSModel(ParlerTTSPreTrainedModel):
    def __init__(self, config: ParlerTTSDecoderConfig):
sanchit-gandhi's avatar
sanchit-gandhi committed
1632
        super().__init__(config)
Yoach Lacombe's avatar
Yoach Lacombe committed
1633
        self.decoder = ParlerTTSDecoder(config)
sanchit-gandhi's avatar
sanchit-gandhi committed
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.decoder.embed_tokens

    def set_input_embeddings(self, value):
        self.decoder.embed_tokens = value

    def get_decoder(self):
        return self.decoder

    @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
1651
        position_ids: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1652
1653
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.LongTensor] = None,
Yoach Lacombe's avatar
Yoach Lacombe committed
1654
1655
        prompt_hidden_states: Optional[torch.FloatTensor] = None,
        prompt_attention_mask: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1656
1657
        head_mask: Optional[torch.Tensor] = None,
        cross_attn_head_mask: Optional[torch.Tensor] = None,
eustlb's avatar
eustlb committed
1658
        past_key_values: Optional[Union[EncoderDecoderCache, Tuple[torch.FloatTensor]]] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1659
1660
1661
1662
1663
        inputs_embeds: Optional[torch.FloatTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
eustlb's avatar
eustlb committed
1664
        cache_position: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
    ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
        output_hidden_states = (
            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
        )
        use_cache = use_cache if use_cache is not None else self.config.use_cache
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
        decoder_outputs = self.decoder(
            input_ids=input_ids,
            attention_mask=attention_mask,
1677
            position_ids=position_ids,
sanchit-gandhi's avatar
sanchit-gandhi committed
1678
1679
            encoder_attention_mask=encoder_attention_mask,
            encoder_hidden_states=encoder_hidden_states,
1680
1681
            prompt_hidden_states=prompt_hidden_states,
            prompt_attention_mask=prompt_attention_mask,
sanchit-gandhi's avatar
sanchit-gandhi committed
1682
1683
1684
1685
1686
1687
1688
1689
            head_mask=head_mask,
            cross_attn_head_mask=cross_attn_head_mask,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
eustlb's avatar
eustlb committed
1690
            cache_position=cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
        )

        if not return_dict:
            return decoder_outputs

        return BaseModelOutputWithPastAndCrossAttentions(
            last_hidden_state=decoder_outputs.last_hidden_state,
            past_key_values=decoder_outputs.past_key_values,
            hidden_states=decoder_outputs.hidden_states,
            attentions=decoder_outputs.attentions,
            cross_attentions=decoder_outputs.cross_attentions,
        )


@add_start_docstrings(
Yoach Lacombe's avatar
Yoach Lacombe committed
1706
    "The Parler-TTS decoder model with a language modelling head on top.",
sanchit-gandhi's avatar
sanchit-gandhi committed
1707
1708
    MUSICGEN_START_DOCSTRING,
)
Yoach Lacombe's avatar
Yoach Lacombe committed
1709
1710
class ParlerTTSForCausalLM(ParlerTTSPreTrainedModel):
    def __init__(self, config: ParlerTTSDecoderConfig):
sanchit-gandhi's avatar
sanchit-gandhi committed
1711
1712
        super().__init__(config)

Yoach Lacombe's avatar
Yoach Lacombe committed
1713
        self.model = ParlerTTSModel(config)
sanchit-gandhi's avatar
sanchit-gandhi committed
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746

        self.num_codebooks = config.num_codebooks
        self.lm_heads = nn.ModuleList(
            [nn.Linear(config.hidden_size, config.vocab_size, bias=False) for _ in range(config.num_codebooks)]
        )

        # Initialize weights and apply final processing
        self.post_init()

    def get_input_embeddings(self):
        return self.model.decoder.embed_tokens

    def set_input_embeddings(self, value):
        self.model.decoder.embed_tokens = value

    def get_output_embeddings(self):
        return self.lm_heads

    def set_output_embeddings(self, new_embeddings):
        self.lm_heads = new_embeddings

    def set_decoder(self, decoder):
        self.model.decoder = decoder

    def get_decoder(self):
        return self.model.decoder

    @add_start_docstrings_to_model_forward(MUSICGEN_DECODER_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: torch.LongTensor = None,
        attention_mask: Optional[torch.Tensor] = None,
1747
        position_ids: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1748
1749
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.LongTensor] = None,
Yoach Lacombe's avatar
Yoach Lacombe committed
1750
1751
        prompt_hidden_states: Optional[torch.FloatTensor] = None,
        prompt_attention_mask: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1752
1753
1754
1755
1756
1757
1758
1759
1760
        head_mask: Optional[torch.Tensor] = None,
        cross_attn_head_mask: Optional[torch.Tensor] = None,
        past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
        inputs_embeds: Optional[torch.FloatTensor] = None,
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
eustlb's avatar
eustlb committed
1761
        cache_position: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1762
1763
    ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
        r"""
1764
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
sanchit-gandhi's avatar
sanchit-gandhi committed
1765
1766
1767
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
Yoach Lacombe's avatar
Yoach Lacombe committed
1768
        Returns:
sanchit-gandhi's avatar
sanchit-gandhi committed
1769
1770
1771
1772
1773
1774
1775
        """

        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        outputs = self.model(
            input_ids,
            attention_mask=attention_mask,
1776
            position_ids=position_ids,
sanchit-gandhi's avatar
sanchit-gandhi committed
1777
1778
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=encoder_attention_mask,
1779
1780
            prompt_hidden_states=prompt_hidden_states,
            prompt_attention_mask=prompt_attention_mask,
sanchit-gandhi's avatar
sanchit-gandhi committed
1781
1782
1783
1784
1785
1786
1787
1788
            head_mask=head_mask,
            cross_attn_head_mask=cross_attn_head_mask,
            past_key_values=past_key_values,
            inputs_embeds=inputs_embeds,
            use_cache=use_cache,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
eustlb's avatar
eustlb committed
1789
            cache_position=cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
1790
1791
1792
1793
1794
1795
1796
1797
        )

        hidden_states = outputs[0]

        lm_logits = torch.stack([head(hidden_states) for head in self.lm_heads], dim=1)

        loss = None
        if labels is not None:
1798
            # since encoder hidden states have concatenated to hidden states, take the last hidden states corresponding to labels
Yoach Lacombe's avatar
Yoach Lacombe committed
1799
1800
            logits = lm_logits[:, :, -labels.shape[1] :]

1801
1802
            loss_fct = CrossEntropyLoss()
            loss = torch.zeros([], device=self.device)
Yoach Lacombe's avatar
Yoach Lacombe committed
1803

1804
            # (bsz, vocab_size, seq_len, num_codebooks), (bsz, seq_len, num_codebooks)
Yoach Lacombe's avatar
Yoach Lacombe committed
1805
            labels = labels.masked_fill(labels == self.config.bos_token_id, -100)
Yoach Lacombe's avatar
Yoach Lacombe committed
1806
1807
1808
1809

            # we use every codebooks token AND one single EOS at the end of each codebooks
            mask = (input_ids.transpose(1, 2) != self.config.eos_token_id) & ((labels != -100))

Yoach Lacombe's avatar
Yoach Lacombe committed
1810
1811
1812
1813
1814
            # per codebook cross-entropy
            for codebook in range(self.config.num_codebooks):
                codebook_logits = logits[:, codebook].contiguous().view(-1, logits.shape[-1])
                codebook_mask = mask[..., codebook].contiguous().view(-1)
                codebook_labels = labels[..., codebook].contiguous().view(-1)
Yoach Lacombe's avatar
Yoach Lacombe committed
1815

Yoach Lacombe's avatar
Yoach Lacombe committed
1816
1817
                codebook_loss = loss_fct(codebook_logits[codebook_mask], codebook_labels[codebook_mask])
                loss += codebook_loss
Yoach Lacombe's avatar
Yoach Lacombe committed
1818

Yoach Lacombe's avatar
Yoach Lacombe committed
1819
            loss = loss / self.config.num_codebooks
sanchit-gandhi's avatar
sanchit-gandhi committed
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842

        # (bsz, num_codebooks, seq_len, vocab_size) -> (bsz * num_codebooks, seq_len, vocab_size)
        lm_logits = lm_logits.reshape(-1, *lm_logits.shape[2:])

        if not return_dict:
            output = (lm_logits,) + outputs[1:]
            return ((loss,) + output) if loss is not None else output

        return CausalLMOutputWithCrossAttentions(
            loss=loss,
            logits=lm_logits,
            past_key_values=outputs.past_key_values,
            hidden_states=outputs.hidden_states,
            attentions=outputs.attentions,
            cross_attentions=outputs.cross_attentions,
        )

    def prepare_inputs_for_generation(
        self,
        input_ids,
        attention_mask=None,
        encoder_hidden_states=None,
        encoder_attention_mask=None,
1843
1844
        prompt_hidden_states=None,
        prompt_attention_mask=None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1845
1846
1847
1848
1849
        head_mask=None,
        cross_attn_head_mask=None,
        past_key_values=None,
        use_cache=True,
        delay_pattern_mask=None,
eustlb's avatar
eustlb committed
1850
1851
        cache_position=None,
        inputs_embeds=None,
sanchit-gandhi's avatar
sanchit-gandhi committed
1852
1853
1854
1855
1856
        **kwargs,
    ):
        if delay_pattern_mask is None:
            input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
                input_ids,
Yoach Lacombe's avatar
Yoach Lacombe committed
1857
1858
                bos_token_id=self.generation_config.bos_token_id,
                pad_token_id=self.generation_config.pad_token_id,
sanchit-gandhi's avatar
sanchit-gandhi committed
1859
1860
1861
1862
1863
1864
                max_length=self.generation_config.max_length,
            )

        # apply the delay pattern mask
        input_ids = self.apply_delay_pattern_mask(input_ids, delay_pattern_mask)

eustlb's avatar
eustlb committed
1865
1866
1867
1868
1869
        position_ids = kwargs.get("position_ids", None)
        if attention_mask is not None and position_ids is None:
            # create position_ids on the fly for batch generation
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)
sanchit-gandhi's avatar
sanchit-gandhi committed
1870

1871
1872
1873
1874
1875
1876
        position_ids = kwargs.get("position_ids", None)
        if attention_mask is not None and position_ids is None:
            # create position_ids on the fly for batch generation
            position_ids = attention_mask.long().cumsum(-1) - 1
            position_ids.masked_fill_(attention_mask == 0, 1)

sanchit-gandhi's avatar
sanchit-gandhi committed
1877
1878
        if past_key_values is not None:
            input_ids = input_ids[:, -1:]
1879
1880
            if position_ids is not None:
                position_ids = position_ids[:, -input_ids.shape[1] :]
Yoach Lacombe's avatar
Yoach Lacombe committed
1881

1882
1883
            # we only want to use prompt signal in the 1st generation step but keeping the attention mask
            prompt_hidden_states = None
sanchit-gandhi's avatar
sanchit-gandhi committed
1884
1885

        return {
eustlb's avatar
eustlb committed
1886
            "input_ids": input_ids.contiguous(), # `contiguous()` needed for compilation use cases
sanchit-gandhi's avatar
sanchit-gandhi committed
1887
            "attention_mask": attention_mask,
1888
            "position_ids": position_ids,
sanchit-gandhi's avatar
sanchit-gandhi committed
1889
1890
            "encoder_hidden_states": encoder_hidden_states,
            "encoder_attention_mask": encoder_attention_mask,
1891
1892
            "prompt_hidden_states": prompt_hidden_states,
            "prompt_attention_mask": prompt_attention_mask,
sanchit-gandhi's avatar
sanchit-gandhi committed
1893
1894
1895
1896
            "head_mask": head_mask,
            "cross_attn_head_mask": cross_attn_head_mask,
            "past_key_values": past_key_values,
            "use_cache": use_cache,
eustlb's avatar
eustlb committed
1897
1898
            "cache_position": cache_position,
            "inputs_embeds": inputs_embeds,
sanchit-gandhi's avatar
sanchit-gandhi committed
1899
1900
1901
        }

    # Ignore copy
Yoach Lacombe's avatar
Yoach Lacombe committed
1902
1903
1904
    def build_delay_pattern_mask(
        self, input_ids: torch.LongTensor, bos_token_id: int, pad_token_id: int, max_length: int = None
    ):
sanchit-gandhi's avatar
sanchit-gandhi committed
1905
1906
1907
1908
        """Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by
        one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
        are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks,
        seq_len)`:
Yoach Lacombe's avatar
Yoach Lacombe committed
1909
1910
1911
        - [B, -1, -1, -1, -1, P, P, P]
        - [B, B, -1, -1, -1, -1, P, P]
        - [B, B, B, -1, -1, -1, -1, P]
Yoach Lacombe's avatar
Yoach Lacombe committed
1912
        - [B, B, B, B, -1, -1, -1, -1]
Yoach Lacombe's avatar
Yoach Lacombe committed
1913
        where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
sanchit-gandhi's avatar
sanchit-gandhi committed
1914
1915
        a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
        mask is set to the value in the prompt:
Yoach Lacombe's avatar
Yoach Lacombe committed
1916
1917
1918
        - [B, a, b, -1, -1, P, P, P]
        - [B, B, c, d, -1, -1, P, P]
        - [B, B, B, e, f, -1, -1, P]
Yoach Lacombe's avatar
Yoach Lacombe committed
1919
        - [B, B, B, B, g, h, -1, -1]
sanchit-gandhi's avatar
sanchit-gandhi committed
1920
1921
1922
1923
        where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1
        tokens in our prediction.
        """
        max_length = max_length if max_length is not None else self.generation_config.max_length
1924
        return build_delay_pattern_mask(input_ids, bos_token_id, pad_token_id, max_length, self.num_codebooks)
sanchit-gandhi's avatar
sanchit-gandhi committed
1925
1926
1927
1928
1929

    @staticmethod
    def apply_delay_pattern_mask(input_ids, decoder_pad_token_mask):
        """Apply a delay pattern mask to the decoder input ids, only preserving predictions where
        the mask is set to -1, and otherwise setting to the value detailed in the mask."""
1930
        return apply_delay_pattern_mask(input_ids, decoder_pad_token_mask)
sanchit-gandhi's avatar
sanchit-gandhi committed
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037

    @torch.no_grad()
    def generate(
        self,
        inputs: Optional[torch.Tensor] = None,
        generation_config: Optional[GenerationConfig] = None,
        logits_processor: Optional[LogitsProcessorList] = None,
        stopping_criteria: Optional[StoppingCriteriaList] = None,
        synced_gpus: Optional[bool] = None,
        streamer: Optional["BaseStreamer"] = None,
        **kwargs,
    ):
        """
        Generates sequences of token ids for models with a language modeling head.

        <Tip warning={true}>

        Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
        model's default generation configuration. You can override any `generation_config` by passing the corresponding
        parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.

        For an overview of generation strategies and code examples, check out the [following
        guide](./generation_strategies).

        </Tip>

        Parameters:
            inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
                The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
                method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
                should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
                `input_ids`, `input_values`, `input_features`, or `pixel_values`.
            generation_config (`~generation.GenerationConfig`, *optional*):
                The generation configuration to be used as base parametrization for the generation call. `**kwargs`
                passed to generate matching the attributes of `generation_config` will override them. If
                `generation_config` is not provided, the default will be used, which had the following loading
                priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
                configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
                default values, whose documentation should be checked to parameterize generation.
            logits_processor (`LogitsProcessorList`, *optional*):
                Custom logits processors that complement the default logits processors built from arguments and
                generation config. If a logit processor is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            stopping_criteria (`StoppingCriteriaList`, *optional*):
                Custom stopping criteria that complement the default stopping criteria built from arguments and a
                generation config. If a stopping criteria is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            synced_gpus (`bool`, *optional*, defaults to `False`):
                Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
            streamer (`BaseStreamer`, *optional*):
                Streamer object that will be used to stream the generated sequences. Generated tokens are passed
                through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
            kwargs (`Dict[str, Any]`, *optional*):
                Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
                forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
                specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.

        Return:
            [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
            or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.

                If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateDecoderOnlyOutput`],
                    - [`~generation.GenerateBeamDecoderOnlyOutput`]

                If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateEncoderDecoderOutput`],
                    - [`~generation.GenerateBeamEncoderDecoderOutput`]
        """
        # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
        if generation_config is None:
            generation_config = self.generation_config

        generation_config = copy.deepcopy(generation_config)
        model_kwargs = generation_config.update(**kwargs)  # All unused kwargs must be model kwargs
        generation_config.validate()
        self._validate_model_kwargs(model_kwargs.copy())

        # 2. Set generation parameters if not already defined
        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()

        if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
            if model_kwargs.get("attention_mask", None) is None:
                logger.warning(
                    "The attention mask and the pad token id were not set. As a consequence, you may observe "
                    "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
                )
            eos_token_id = generation_config.eos_token_id
            if isinstance(eos_token_id, list):
                eos_token_id = eos_token_id[0]
            logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
            generation_config.pad_token_id = eos_token_id

        # 3. Define model inputs
        # inputs_tensor has to be defined
        # model_input_name is defined if model-specific keyword input is passed
        # otherwise model_input_name is None
        # all model-specific keyword inputs are removed from `model_kwargs`
        input_ids, model_input_name, model_kwargs = self._prepare_model_inputs(
            inputs, generation_config.bos_token_id, model_kwargs
        )
        batch_size = input_ids.shape[0] // self.num_codebooks
eustlb's avatar
eustlb committed
2038
2039
        kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
        self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=input_ids.device)
sanchit-gandhi's avatar
sanchit-gandhi committed
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080

        # 4. Define other model kwargs
        model_kwargs["use_cache"] = generation_config.use_cache

        requires_attention_mask = "encoder_outputs" not in model_kwargs
        if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
            model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
                input_ids, generation_config.pad_token_id, generation_config.eos_token_id
            )

        # 5. Prepare `max_length` depending on other stopping criteria.
        input_ids_seq_length = input_ids.shape[-1]
        has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
        if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
            logger.warning(
                f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
                "to control the generation length.  recommend setting `max_new_tokens` to control the maximum length of the generation."
            )
        elif generation_config.max_new_tokens is not None:
            if not has_default_max_length:
                logger.warning(
                    f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
                    f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
                    "Please refer to the documentation for more information. "
                    "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
                )
            generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length

        if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
            raise ValueError(
                f"Unfeasible length constraints: the minimum length ({generation_config.min_length}) is larger than"
                f" the maximum length ({generation_config.max_length})"
            )
        if input_ids_seq_length >= generation_config.max_length:
            logger.warning(
                f"Input length of decoder_input_ids is {input_ids_seq_length}, but `max_length` is set to"
                f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
                " increasing `max_new_tokens`."
            )

        # 6. Prepare `input_ids` which will be used for auto-regressive generation
Yoach Lacombe's avatar
Yoach Lacombe committed
2081
        # Build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Parler-TTS)
sanchit-gandhi's avatar
sanchit-gandhi committed
2082
2083
        input_ids, delay_pattern_mask = self.build_delay_pattern_mask(
            input_ids,
Yoach Lacombe's avatar
Yoach Lacombe committed
2084
2085
            bos_token_id=generation_config.bos_token_id,
            pad_token_id=generation_config.pad_token_id,
sanchit-gandhi's avatar
sanchit-gandhi committed
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
            max_length=generation_config.max_length,
        )

        if streamer is not None:
            streamer.put(input_ids.cpu())

        # stash the delay mask so that we don't have to recompute it in each forward pass
        model_kwargs["delay_pattern_mask"] = delay_pattern_mask

        # 7. determine generation mode
        is_greedy_gen_mode = (
            (generation_config.num_beams == 1)
            and (generation_config.num_beam_groups == 1)
            and generation_config.do_sample is False
        )
        is_sample_gen_mode = (
            (generation_config.num_beams == 1)
            and (generation_config.num_beam_groups == 1)
            and generation_config.do_sample is True
        )

eustlb's avatar
eustlb committed
2107
        # 8. prepare distribution pre_processing samplers
sanchit-gandhi's avatar
sanchit-gandhi committed
2108
2109
2110
2111
2112
2113
        logits_processor = self._get_logits_processor(
            generation_config=generation_config,
            input_ids_seq_length=input_ids_seq_length,
            encoder_input_ids=input_ids,
            prefix_allowed_tokens_fn=None,
            logits_processor=logits_processor,
eustlb's avatar
eustlb committed
2114
            device=input_ids.device,
sanchit-gandhi's avatar
sanchit-gandhi committed
2115
2116
        )

eustlb's avatar
eustlb committed
2117
        # 9. prepare stopping criteria
sanchit-gandhi's avatar
sanchit-gandhi committed
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
        stopping_criteria = self._get_stopping_criteria(
            generation_config=generation_config, stopping_criteria=stopping_criteria
        )

        if is_greedy_gen_mode:
            if generation_config.num_return_sequences > 1:
                raise ValueError(
                    "num_return_sequences has to be 1 when doing greedy search, "
                    f"but is {generation_config.num_return_sequences}."
                )

eustlb's avatar
eustlb committed
2129
2130
            # 10. run greedy search
            outputs = self._sample(
sanchit-gandhi's avatar
sanchit-gandhi committed
2131
2132
2133
                input_ids,
                logits_processor=logits_processor,
                stopping_criteria=stopping_criteria,
2134
                generation_config=generation_config,
sanchit-gandhi's avatar
sanchit-gandhi committed
2135
2136
2137
2138
2139
2140
                synced_gpus=synced_gpus,
                streamer=streamer,
                **model_kwargs,
            )

        elif is_sample_gen_mode:
eustlb's avatar
eustlb committed
2141
2142
            # 10. prepare logits warper
            logits_warper = self._get_logits_warper(generation_config, device=input_ids.device)
sanchit-gandhi's avatar
sanchit-gandhi committed
2143
2144
2145
2146
2147
2148
2149
2150

            # expand input_ids with `num_return_sequences` additional sequences per batch
            input_ids, model_kwargs = self._expand_inputs_for_generation(
                input_ids=input_ids,
                expand_size=generation_config.num_return_sequences,
                **model_kwargs,
            )

eustlb's avatar
eustlb committed
2151
            # 11. run sample
2152
            outputs = self._sample(
sanchit-gandhi's avatar
sanchit-gandhi committed
2153
2154
2155
2156
                input_ids,
                logits_processor=logits_processor,
                logits_warper=logits_warper,
                stopping_criteria=stopping_criteria,
2157
                generation_config=generation_config,
sanchit-gandhi's avatar
sanchit-gandhi committed
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
                synced_gpus=synced_gpus,
                streamer=streamer,
                **model_kwargs,
            )

        else:
            raise ValueError(
                "Got incompatible mode for generation, should be one of greedy or sampling. "
                "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
            )

        if generation_config.return_dict_in_generate:
            output_ids = outputs.sequences
        else:
            output_ids = outputs
Yoach Lacombe's avatar
Yoach Lacombe committed
2173

sanchit-gandhi's avatar
sanchit-gandhi committed
2174
2175
2176
        # apply the pattern mask to the final ids
        output_ids = self.apply_delay_pattern_mask(output_ids, model_kwargs["delay_pattern_mask"])

Yoach Lacombe's avatar
Yoach Lacombe committed
2177
        # revert the pattern delay mask by filtering the eos and bos token ids from the delay pattern mask
Yoach Lacombe's avatar
Yoach Lacombe committed
2178
2179
2180
2181
2182
2183
2184
2185
2186
        _, mask = self.build_delay_pattern_mask(
            input_ids,
            bos_token_id=generation_config.bos_token_id,
            pad_token_id=generation_config.pad_token_id,
            max_length=output_ids.shape[1],
        )

        mask = (mask != generation_config.bos_token_id) & (mask != generation_config.pad_token_id)
        output_ids = output_ids[mask].reshape(batch_size, self.num_codebooks, -1)
sanchit-gandhi's avatar
sanchit-gandhi committed
2187
2188
2189
2190
2191
2192
2193
2194
2195

        if generation_config.return_dict_in_generate:
            outputs.sequences = output_ids
            return outputs
        else:
            return output_ids


@add_start_docstrings(
Yoach Lacombe's avatar
Yoach Lacombe committed
2196
    "The composite Parler-TTS model with a text encoder, audio encoder and ParlerTTS decoder, "
sanchit-gandhi's avatar
sanchit-gandhi committed
2197
2198
2199
    "for music generation tasks with one or both of text and audio prompts.",
    MUSICGEN_START_DOCSTRING,
)
Yoach Lacombe's avatar
Yoach Lacombe committed
2200
2201
class ParlerTTSForConditionalGeneration(PreTrainedModel):
    config_class = ParlerTTSConfig
sanchit-gandhi's avatar
sanchit-gandhi committed
2202
2203
2204
    base_model_prefix = "encoder_decoder"
    main_input_name = "input_ids"
    supports_gradient_checkpointing = True
2205
2206
    _supports_flash_attn_2 = True
    _supports_sdpa = True
eustlb's avatar
eustlb committed
2207
2208
    _supports_cache_class = True
    _supports_static_cache = True
sanchit-gandhi's avatar
sanchit-gandhi committed
2209
2210
2211

    def __init__(
        self,
Yoach Lacombe's avatar
Yoach Lacombe committed
2212
        config: Optional[ParlerTTSConfig] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2213
2214
        text_encoder: Optional[PreTrainedModel] = None,
        audio_encoder: Optional[PreTrainedModel] = None,
Yoach Lacombe's avatar
Yoach Lacombe committed
2215
        decoder: Optional[ParlerTTSForCausalLM] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2216
2217
2218
    ):
        if config is None and (text_encoder is None or audio_encoder is None or decoder is None):
            raise ValueError(
Yoach Lacombe's avatar
Yoach Lacombe committed
2219
                "Either a configuration has to be provided, or all three of text encoder, audio encoder and Parler-TTS decoder."
sanchit-gandhi's avatar
sanchit-gandhi committed
2220
2221
            )
        if config is None:
Yoach Lacombe's avatar
Yoach Lacombe committed
2222
            config = ParlerTTSConfig.from_sub_models_config(text_encoder.config, audio_encoder.config, decoder.config)
sanchit-gandhi's avatar
sanchit-gandhi committed
2223
2224
2225
2226
2227
2228
2229
        else:
            if not isinstance(config, self.config_class):
                raise ValueError(f"Config: {config} has to be of type {self.config_class}")

        if config.decoder.cross_attention_hidden_size is not None:
            if config.decoder.cross_attention_hidden_size != config.text_encoder.hidden_size:
                raise ValueError(
Yoach Lacombe's avatar
Yoach Lacombe committed
2230
                    "If `cross_attention_hidden_size` is specified in the Parler-TTS decoder's configuration, it has to be equal"
sanchit-gandhi's avatar
sanchit-gandhi committed
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
                    f" to the text encoder's `hidden_size`. Got {config.decoder.cross_attention_hidden_size} for"
                    f" `config.decoder.cross_attention_hidden_size` and {config.text_encoder.hidden_size} for"
                    " `config.text_encoder.hidden_size`."
                )

        # initialize with config
        super().__init__(config)

        if text_encoder is None:
            from transformers.models.auto.modeling_auto import AutoModelForTextEncoding

            text_encoder = AutoModelForTextEncoding.from_config(config.text_encoder)

        if audio_encoder is None:
            from transformers.models.auto.modeling_auto import AutoModel

            audio_encoder = AutoModel.from_config(config.audio_encoder)

        if decoder is None:
Yoach Lacombe's avatar
Yoach Lacombe committed
2250
            decoder = ParlerTTSForCausalLM(config.decoder)
sanchit-gandhi's avatar
sanchit-gandhi committed
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283

        self.text_encoder = text_encoder
        self.audio_encoder = audio_encoder
        self.decoder = decoder

        if self.text_encoder.config.to_dict() != self.config.text_encoder.to_dict():
            logger.warning(
                f"Config of the text_encoder: {self.text_encoder.__class__} is overwritten by shared text_encoder config:"
                f" {self.config.text_encoder}"
            )
        if self.audio_encoder.config.to_dict() != self.config.audio_encoder.to_dict():
            logger.warning(
                f"Config of the audio_encoder: {self.audio_encoder.__class__} is overwritten by shared audio_encoder config:"
                f" {self.config.audio_encoder}"
            )
        if self.decoder.config.to_dict() != self.config.decoder.to_dict():
            logger.warning(
                f"Config of the decoder: {self.decoder.__class__} is overwritten by shared decoder config:"
                f" {self.config.decoder}"
            )

        # make sure that the individual model's config refers to the shared config
        # so that the updates to the config will be synced
        self.text_encoder.config = self.config.text_encoder
        self.audio_encoder.config = self.config.audio_encoder
        self.decoder.config = self.config.decoder

        # text encoder outputs might need to be projected to different dimension for decoder
        if (
            self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
            and self.decoder.config.cross_attention_hidden_size is None
        ):
            self.enc_to_dec_proj = nn.Linear(self.text_encoder.config.hidden_size, self.decoder.config.hidden_size)
Yoach Lacombe's avatar
Yoach Lacombe committed
2284

2285
        # prompt embeddings
Yoach Lacombe's avatar
Yoach Lacombe committed
2286
        self.embed_prompts = nn.Embedding(config.vocab_size, self.decoder.config.hidden_size)
sanchit-gandhi's avatar
sanchit-gandhi committed
2287

2288
2289
2290
2291
2292
2293
2294
        self.prompt_cross_attention = config.prompt_cross_attention
        if config.prompt_cross_attention:
            self.embed_positions = ParlerTTSSinusoidalPositionalEmbedding(
                config.decoder.max_position_embeddings,
                config.decoder.hidden_size,
            )

sanchit-gandhi's avatar
sanchit-gandhi committed
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
        if self.text_encoder.get_output_embeddings() is not None:
            raise ValueError(
                f"The encoder {self.text_encoder} should not have a LM Head. Please use a model without and LM Head"
            )

        decoder_signature = set(inspect.signature(self.decoder.forward).parameters.keys())
        if "encoder_hidden_states" not in decoder_signature:
            raise ValueError(
                "The selected decoder is not prepared for the encoder hidden states to be passed. Please see the "
                "following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350"
            )

2307
2308
        # Initialize projection and embedding layers and tie text encoder and decoder weights if set accordingly
        self.post_init()
Yoach Lacombe's avatar
Yoach Lacombe committed
2309

2310
    def _init_weights(self, module):
Yoach Lacombe's avatar
Yoach Lacombe committed
2311
        std = self.decoder.config.initializer_factor
2312
2313
2314
2315
2316
2317
2318
2319
        if isinstance(module, (nn.Linear, nn.Conv1d)):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.bias is not None:
                module.bias.data.zero_()
        elif isinstance(module, nn.Embedding):
            module.weight.data.normal_(mean=0.0, std=std)
            if module.padding_idx is not None:
                module.weight.data[module.padding_idx].zero_()
sanchit-gandhi's avatar
sanchit-gandhi committed
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357

    def tie_weights(self):
        # tie text encoder & decoder if needed
        if self.config.tie_encoder_decoder:
            # tie text encoder and decoder base model
            decoder_base_model_prefix = self.decoder.base_model_prefix
            self._tie_encoder_decoder_weights(
                self.text_encoder, self.decoder._modules[decoder_base_model_prefix], self.decoder.base_model_prefix
            )

    def get_audio_encoder(self):
        return self.audio_encoder

    def get_text_encoder(self):
        return self.text_encoder

    def get_encoder(self):
        # get the text encoder to compute the encoder hidden-states for generation
        return self.get_text_encoder()

    def get_decoder(self):
        return self.decoder

    def get_input_embeddings(self):
        return self.text_encoder.get_input_embeddings()

    def get_output_embeddings(self):
        return self.decoder.get_output_embeddings()

    def set_output_embeddings(self, new_embeddings):
        return self.decoder.set_output_embeddings(new_embeddings)

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
        r"""
        Example:

        ```python
2358
        >>> from parler_tts import ParlerTTSForConditionalGeneration
sanchit-gandhi's avatar
sanchit-gandhi committed
2359

Yoach Lacombe's avatar
Yoach Lacombe committed
2360
        >>> model = ParlerTTSForConditionalGeneration.from_pretrained("facebook/parler_tts-small")
sanchit-gandhi's avatar
sanchit-gandhi committed
2361
2362
2363
2364
2365
        ```"""

        # At the moment fast initialization is not supported for composite models
        if kwargs.get("_fast_init", False):
            logger.warning(
Yoach Lacombe's avatar
Yoach Lacombe committed
2366
                "Fast initialization is currently not supported for ParlerTTSForConditionalGeneration. "
sanchit-gandhi's avatar
sanchit-gandhi committed
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
                "Falling back to slow initialization..."
            )
        kwargs["_fast_init"] = False

        return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)

    @classmethod
    def from_sub_models_pretrained(
        cls,
        text_encoder_pretrained_model_name_or_path: str = None,
        audio_encoder_pretrained_model_name_or_path: str = None,
        decoder_pretrained_model_name_or_path: str = None,
        *model_args,
        **kwargs,
    ) -> PreTrainedModel:
        r"""
Yoach Lacombe's avatar
Yoach Lacombe committed
2383
        Instantiate a text encoder, an audio encoder, and a Parler-TTS decoder from one, two or three base classes of the
sanchit-gandhi's avatar
sanchit-gandhi committed
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
        library from pretrained model checkpoints.


        The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
        the model, you need to first set it back in training mode with `model.train()`.

        Params:
            text_encoder_pretrained_model_name_or_path (`str`, *optional*):
                Information necessary to initiate the text encoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like `t5-base`, or namespaced under a user or
                      organization name, like `google/flan-t5-base.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            audio_encoder_pretrained_model_name_or_path (`str`, *optional*):
                Information necessary to initiate the audio encoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
                      user or organization name, like `facebook/encodec_24khz`.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
                Information necessary to initiate the decoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like `gpt2`, or namespaced under a user or
Yoach Lacombe's avatar
Yoach Lacombe committed
2414
                      organization name, like `facebook/parler_tts-small`.
sanchit-gandhi's avatar
sanchit-gandhi committed
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            model_args (remaining positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.

            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`).

                - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration
                  parameter.
                - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration
                  parameter.
                - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
                - To update the parent model configuration, do not use a prefix for each configuration parameter.

                Behaves differently depending on whether a `config` is provided or automatically loaded.

        Example:

        ```python
2437
        >>> from parler_tts import ParlerTTSForConditionalGeneration
sanchit-gandhi's avatar
sanchit-gandhi committed
2438

Yoach Lacombe's avatar
Yoach Lacombe committed
2439
2440
        >>> # initialize a parler_tts model from a t5 text encoder, encodec audio encoder, and parler_tts decoder
        >>> model = ParlerTTSForConditionalGeneration.from_sub_models_pretrained(
sanchit-gandhi's avatar
sanchit-gandhi committed
2441
2442
        ...     text_encoder_pretrained_model_name_or_path="t5-base",
        ...     audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz",
Yoach Lacombe's avatar
Yoach Lacombe committed
2443
        ...     decoder_pretrained_model_name_or_path="facebook/parler_tts-small",
sanchit-gandhi's avatar
sanchit-gandhi committed
2444
2445
        ... )
        >>> # saving model after fine-tuning
Yoach Lacombe's avatar
Yoach Lacombe committed
2446
        >>> model.save_pretrained("./parler_tts-ft")
sanchit-gandhi's avatar
sanchit-gandhi committed
2447
        >>> # load fine-tuned model
Yoach Lacombe's avatar
Yoach Lacombe committed
2448
        >>> model = ParlerTTSForConditionalGeneration.from_pretrained("./parler_tts-ft")
sanchit-gandhi's avatar
sanchit-gandhi committed
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
        ```"""

        kwargs_text_encoder = {
            argument[len("text_encoder_") :]: value
            for argument, value in kwargs.items()
            if argument.startswith("text_encoder_")
        }

        kwargs_audio_encoder = {
            argument[len("audio_encoder_") :]: value
            for argument, value in kwargs.items()
            if argument.startswith("audio_encoder_")
        }

        kwargs_decoder = {
            argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
        }

        # remove text encoder, audio encoder and decoder kwargs from kwargs
        for key in kwargs_text_encoder.keys():
            del kwargs["text_encoder_" + key]
        for key in kwargs_audio_encoder.keys():
            del kwargs["audio_encoder_" + key]
        for key in kwargs_decoder.keys():
            del kwargs["decoder_" + key]

        # Load and initialize the encoder and decoder
        # The distinction between encoder and decoder at the model level is made
        # by the value of the flag `is_decoder` that we need to set correctly.
        text_encoder = kwargs_text_encoder.pop("model", None)
        if text_encoder is None:
            if text_encoder_pretrained_model_name_or_path is None:
                raise ValueError(
                    "If `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has "
                    "to be defined."
                )

            if "config" not in kwargs_text_encoder:
                encoder_config, kwargs_text_encoder = AutoConfig.from_pretrained(
                    text_encoder_pretrained_model_name_or_path, **kwargs_text_encoder, return_unused_kwargs=True
                )

                if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
                    logger.info(
                        f"Initializing {text_encoder_pretrained_model_name_or_path} as a text_encoder model "
                        "from a decoder model. Cross-attention and casual mask are disabled."
                    )
                    encoder_config.is_decoder = False
                    encoder_config.add_cross_attention = False

                kwargs_text_encoder["config"] = encoder_config

2501
            text_encoder = AutoModelForTextEncoding.from_pretrained(
sanchit-gandhi's avatar
sanchit-gandhi committed
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
                text_encoder_pretrained_model_name_or_path, *model_args, **kwargs_text_encoder
            )

        audio_encoder = kwargs_audio_encoder.pop("model", None)
        if audio_encoder is None:
            if audio_encoder_pretrained_model_name_or_path is None:
                raise ValueError(
                    "If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has "
                    "to be defined."
                )

            if "config" not in kwargs_audio_encoder:
                encoder_config, kwargs_audio_encoder = AutoConfig.from_pretrained(
                    audio_encoder_pretrained_model_name_or_path, **kwargs_audio_encoder, return_unused_kwargs=True
                )

                if encoder_config.is_decoder is True or encoder_config.add_cross_attention is True:
                    logger.info(
                        f"Initializing {audio_encoder_pretrained_model_name_or_path} as an audio_encoder model "
                        "from a decoder model. Cross-attention and casual mask are disabled."
                    )
                    encoder_config.is_decoder = False
                    encoder_config.add_cross_attention = False

                kwargs_audio_encoder["config"] = encoder_config

            audio_encoder = AutoModel.from_pretrained(
                audio_encoder_pretrained_model_name_or_path, *model_args, **kwargs_audio_encoder
            )

        decoder = kwargs_decoder.pop("model", None)
        if decoder is None:
            if decoder_pretrained_model_name_or_path is None:
                raise ValueError(
                    "If `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has "
                    "to be defined."
                )

            if "config" not in kwargs_decoder:
Yoach Lacombe's avatar
Yoach Lacombe committed
2541
                decoder_config, kwargs_decoder = ParlerTTSDecoderConfig.from_pretrained(
sanchit-gandhi's avatar
sanchit-gandhi committed
2542
2543
2544
                    decoder_pretrained_model_name_or_path, **kwargs_decoder, return_unused_kwargs=True
                )

Yoach Lacombe's avatar
Yoach Lacombe committed
2545
                if isinstance(decoder_config, ParlerTTSConfig):
sanchit-gandhi's avatar
sanchit-gandhi committed
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
                    decoder_config = decoder_config.decoder

                if decoder_config.is_decoder is False or decoder_config.add_cross_attention is False:
                    logger.info(
                        f"Initializing {decoder_pretrained_model_name_or_path} as a decoder model. Cross attention"
                        f" layers are added to {decoder_pretrained_model_name_or_path} and randomly initialized if"
                        f" {decoder_pretrained_model_name_or_path}'s architecture allows for cross attention layers."
                    )
                    decoder_config.is_decoder = True
                    decoder_config.add_cross_attention = True

                kwargs_decoder["config"] = decoder_config

            if kwargs_decoder["config"].is_decoder is False or kwargs_decoder["config"].add_cross_attention is False:
                logger.warning(
                    f"Decoder model {decoder_pretrained_model_name_or_path} is not initialized as a decoder. "
                    f"In order to initialize {decoder_pretrained_model_name_or_path} as a decoder, "
                    "make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` "
                    "passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a "
                    "`decoder_config` to `.from_sub_models_pretrained(...)`"
                )

Yoach Lacombe's avatar
Yoach Lacombe committed
2568
            decoder = ParlerTTSForCausalLM.from_pretrained(decoder_pretrained_model_name_or_path, **kwargs_decoder)
sanchit-gandhi's avatar
sanchit-gandhi committed
2569
2570

        # instantiate config with corresponding kwargs
Yoach Lacombe's avatar
Yoach Lacombe committed
2571
        config = ParlerTTSConfig.from_sub_models_config(
sanchit-gandhi's avatar
sanchit-gandhi committed
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
            text_encoder.config, audio_encoder.config, decoder.config, **kwargs
        )
        return cls(text_encoder=text_encoder, audio_encoder=audio_encoder, decoder=decoder, config=config)

    @add_start_docstrings_to_model_forward(MUSICGEN_INPUTS_DOCSTRING)
    @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
    def forward(
        self,
        input_ids: Optional[torch.LongTensor] = None,
        attention_mask: Optional[torch.BoolTensor] = None,
        input_values: Optional[torch.FloatTensor] = None,
        padding_mask: Optional[torch.BoolTensor] = None,
        decoder_input_ids: Optional[torch.LongTensor] = None,
        decoder_attention_mask: Optional[torch.BoolTensor] = None,
        encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
eustlb's avatar
eustlb committed
2587
        past_key_values: Optional[Union[EncoderDecoderCache, Tuple[torch.FloatTensor]]] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2588
2589
        inputs_embeds: Optional[torch.FloatTensor] = None,
        decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
Yoach Lacombe's avatar
Yoach Lacombe committed
2590
2591
2592
        prompt_input_ids: Optional[torch.FloatTensor] = None,
        prompt_attention_mask: Optional[torch.LongTensor] = None,
        prompt_hidden_states: Optional[torch.FloatTensor] = None,
2593
        decoder_position_ids: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2594
2595
2596
2597
2598
        labels: Optional[torch.LongTensor] = None,
        use_cache: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        return_dict: Optional[bool] = None,
eustlb's avatar
eustlb committed
2599
        cache_position: Optional[torch.LongTensor] = None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2600
2601
2602
2603
2604
2605
2606
        **kwargs,
    ) -> Union[Tuple, Seq2SeqLMOutput]:
        r"""
        Returns:

        Examples:
        ```python
Yoach Lacombe's avatar
Yoach Lacombe committed
2607
        >>> from transformers import AutoProcessor, ParlerTTSForConditionalGeneration
sanchit-gandhi's avatar
sanchit-gandhi committed
2608
2609
        >>> import torch

Yoach Lacombe's avatar
Yoach Lacombe committed
2610
2611
        >>> processor = AutoProcessor.from_pretrained("facebook/parler_tts-small")
        >>> model = ParlerTTSForConditionalGeneration.from_pretrained("facebook/parler_tts-small")
sanchit-gandhi's avatar
sanchit-gandhi committed
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646

        >>> inputs = processor(
        ...     text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
        ...     padding=True,
        ...     return_tensors="pt",
        ... )

        >>> pad_token_id = model.generation_config.pad_token_id
        >>> decoder_input_ids = (
        ...     torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long)
        ...     * pad_token_id
        ... )

        >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits
        >>> logits.shape  # (bsz * num_codebooks, tgt_len, vocab_size)
        torch.Size([8, 1, 2048])
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        kwargs_text_encoder = {
            argument[len("text_encoder_")]: value
            for argument, value in kwargs.items()
            if argument.startswith("text_encoder_")
        }

        kwargs_audio_encoder = {
            argument[len("audio_encoder_")]: value
            for argument, value in kwargs.items()
            if argument.startswith("audio_encoder_")
        }

        kwargs_decoder = {
            argument[len("decoder_") :]: value for argument, value in kwargs.items() if argument.startswith("decoder_")
        }

2647
2648
2649
2650
        if prompt_hidden_states is None:
            if prompt_input_ids is not None:
                prompt_hidden_states = self.embed_prompts(prompt_input_ids)

sanchit-gandhi's avatar
sanchit-gandhi committed
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
        if encoder_outputs is None:
            encoder_outputs = self.text_encoder(
                input_ids=input_ids,
                attention_mask=attention_mask,
                inputs_embeds=inputs_embeds,
                output_attentions=output_attentions,
                output_hidden_states=output_hidden_states,
                return_dict=return_dict,
                **kwargs_text_encoder,
            )
2661
            encoder_hidden_states = encoder_outputs[0]
sanchit-gandhi's avatar
sanchit-gandhi committed
2662

2663
2664
2665
2666
2667
2668
            # optionally project encoder_hidden_states
            if (
                self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
                and self.decoder.config.cross_attention_hidden_size is None
            ):
                encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)
sanchit-gandhi's avatar
sanchit-gandhi committed
2669

2670
2671
            if attention_mask is not None:
                encoder_hidden_states = encoder_hidden_states * attention_mask[..., None]
sanchit-gandhi's avatar
sanchit-gandhi committed
2672

2673
2674
2675
2676
            if prompt_hidden_states is not None and self.prompt_cross_attention:
                # add sinusoidal positional embedding
                positions = self.embed_positions(prompt_hidden_states, 0)
                prompt_hidden_states = prompt_hidden_states + positions.to(prompt_hidden_states.device)
Yoach Lacombe's avatar
Yoach Lacombe committed
2677

2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
                if prompt_attention_mask is not None and attention_mask is None:
                    attention_mask = torch.ones(
                        encoder_hidden_states.shape[:2], device=self.device, dtype=prompt_attention_mask.dtype
                    )
                elif attention_mask is not None and prompt_attention_mask is None:
                    prompt_attention_mask = torch.ones(
                        prompt_hidden_states.shape[:2], device=self.device, dtype=attention_mask.dtype
                    )

                # concatenate text description states with prompt description states
                encoder_hidden_states = torch.cat([encoder_hidden_states, prompt_hidden_states], dim=1)
                if prompt_attention_mask is not None:
                    attention_mask = torch.cat([attention_mask, prompt_attention_mask], dim=1)

                prompt_hidden_states = None
                prompt_attention_mask = None

            encoder_outputs["last_hidden_state"] = encoder_hidden_states

        elif isinstance(encoder_outputs, tuple):
            encoder_outputs = BaseModelOutput(*encoder_outputs)

        encoder_hidden_states = encoder_outputs.last_hidden_state
sanchit-gandhi's avatar
sanchit-gandhi committed
2701
2702
2703

        if (labels is not None) and (decoder_input_ids is None and decoder_inputs_embeds is None):
            decoder_input_ids = shift_tokens_right(
Yoach Lacombe's avatar
Yoach Lacombe committed
2704
                labels, self.config.pad_token_id, self.config.decoder_start_token_id
Yoach Lacombe's avatar
Yoach Lacombe committed
2705
            ).transpose(1, 2)
sanchit-gandhi's avatar
sanchit-gandhi committed
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730

        elif decoder_input_ids is None and decoder_inputs_embeds is None:
            audio_encoder_outputs = self.audio_encoder(
                input_values=input_values,
                padding_mask=padding_mask,
                **kwargs_audio_encoder,
            )
            audio_codes = audio_encoder_outputs.audio_codes
            frames, bsz, codebooks, seq_len = audio_codes.shape
            if frames != 1:
                raise ValueError(
                    f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
                    "disabled by setting `chunk_length=None` in the audio encoder."
                )

            if self.config.decoder.audio_channels == 2 and audio_codes.shape[2] == self.decoder.num_codebooks // 2:
                # mono input through encodec that we convert to stereo
                audio_codes = audio_codes.repeat_interleave(2, dim=2)

            decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)

        # Decode
        decoder_outputs = self.decoder(
            input_ids=decoder_input_ids,
            attention_mask=decoder_attention_mask,
2731
            position_ids=decoder_position_ids,
sanchit-gandhi's avatar
sanchit-gandhi committed
2732
2733
            encoder_hidden_states=encoder_hidden_states,
            encoder_attention_mask=attention_mask,
2734
2735
            prompt_hidden_states=prompt_hidden_states,
            prompt_attention_mask=prompt_attention_mask,
sanchit-gandhi's avatar
sanchit-gandhi committed
2736
2737
2738
2739
2740
2741
            inputs_embeds=decoder_inputs_embeds,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            use_cache=use_cache,
            past_key_values=past_key_values,
            return_dict=return_dict,
2742
            labels=labels,
eustlb's avatar
eustlb committed
2743
            cache_position=cache_position,
sanchit-gandhi's avatar
sanchit-gandhi committed
2744
2745
2746
2747
            **kwargs_decoder,
        )

        if not return_dict:
2748
            return decoder_outputs + (encoder_hidden_states,)
sanchit-gandhi's avatar
sanchit-gandhi committed
2749
2750

        return Seq2SeqLMOutput(
2751
            loss=decoder_outputs.loss,
sanchit-gandhi's avatar
sanchit-gandhi committed
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
            logits=decoder_outputs.logits,
            past_key_values=decoder_outputs.past_key_values,
            decoder_hidden_states=decoder_outputs.hidden_states,
            decoder_attentions=decoder_outputs.attentions,
            cross_attentions=decoder_outputs.cross_attentions,
            encoder_last_hidden_state=encoder_outputs.last_hidden_state,
            encoder_hidden_states=encoder_outputs.hidden_states,
            encoder_attentions=encoder_outputs.attentions,
        )

    def prepare_inputs_for_generation(
        self,
        decoder_input_ids,
        past_key_values=None,
        attention_mask=None,
        head_mask=None,
        decoder_attention_mask=None,
        decoder_head_mask=None,
2770
2771
        prompt_hidden_states=None,
        prompt_attention_mask=None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2772
2773
2774
2775
        cross_attn_head_mask=None,
        use_cache=None,
        encoder_outputs=None,
        decoder_delay_pattern_mask=None,
eustlb's avatar
eustlb committed
2776
2777
        cache_position=None,
        inputs_embeds=None,
sanchit-gandhi's avatar
sanchit-gandhi committed
2778
2779
2780
2781
2782
        **kwargs,
    ):
        if decoder_delay_pattern_mask is None:
            decoder_input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
                decoder_input_ids,
Yoach Lacombe's avatar
Yoach Lacombe committed
2783
2784
                bos_token_id=self.generation_config.bos_token_id,
                pad_token_id=self.generation_config.pad_token_id,
sanchit-gandhi's avatar
sanchit-gandhi committed
2785
2786
2787
2788
2789
2790
                max_length=self.generation_config.max_length,
            )

        # apply the delay pattern mask
        decoder_input_ids = self.decoder.apply_delay_pattern_mask(decoder_input_ids, decoder_delay_pattern_mask)

eustlb's avatar
eustlb committed
2791
        past_length = 0
sanchit-gandhi's avatar
sanchit-gandhi committed
2792
        if past_key_values is not None:
eustlb's avatar
eustlb committed
2793
2794
2795
2796
2797
2798
2799
2800
2801
            if isinstance(past_key_values, EncoderDecoderCache):
                past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length()
                if past_key_values.get_seq_length() > 0:
                    # we only want to use prompt signal in the 1st generation step
                    prompt_hidden_states = None
            else:
                past_length = past_key_values[0][0].shape[2]
                # we only want to use prompt signal in the 1st generation step
                prompt_hidden_states = None
sanchit-gandhi's avatar
sanchit-gandhi committed
2802
2803
2804
2805
2806
2807
2808
2809
2810

            # Some generation methods already pass only the last input ID
            if decoder_input_ids.shape[1] > past_length:
                remove_prefix_length = past_length
            else:
                # Default to old behavior: keep only final ID
                remove_prefix_length = decoder_input_ids.shape[1] - 1

            decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
Yoach Lacombe's avatar
Yoach Lacombe committed
2811

eustlb's avatar
eustlb committed
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
        if cache_position is None:
            cache_position = torch.arange(
                past_length, past_length + decoder_input_ids.shape[1], device=decoder_input_ids.device
            )
        elif use_cache:
            cur_len = decoder_input_ids.shape[1]
            if prompt_hidden_states is not None and not self.prompt_cross_attention:
                # meaning we are in 1st generation step and prompt_hidden_state will be prepended
                cur_len += prompt_hidden_states.shape[1]

            cache_position = cache_position[-cur_len:]

        if decoder_attention_mask is None and prompt_attention_mask is not None:
            input = decoder_input_ids.reshape(-1, self.decoder.num_codebooks, decoder_input_ids.shape[-1])
            bsz, _, seq_len = input.shape
            input_shape = (bsz, seq_len)

            past_key_values_length = 0
            if cache_position is not None:
                past_key_values_length = cache_position[0]
            elif past_key_values is not None:
                past_key_values_length = past_key_values.get_seq_length()

            logger.warning_once(
                "`prompt_attention_mask` is specified but `attention_mask` is not. A full `attention_mask` will be created. Make sure this is the intended behaviour."
            )
            if past_key_values is None or (
                isinstance(past_key_values, EncoderDecoderCache) and past_key_values.get_seq_length() == 0
            ):
                decoder_attention_mask = torch.ones(input_shape, device=self.device, dtype=decoder_input_ids.dtype)
            elif prompt_attention_mask is not None:
                # In the generation case of `prompt_cross_attention=True`, we need to recreate an attention mask from scratch
                # to be able to prepend the prompt attention mask.
                # Since we generate token per token, we can recompute the generated length from the information we have.
                generated_length = past_key_values_length - prompt_attention_mask.shape[1] + 1
                decoder_attention_mask = torch.ones(
                    (input_shape[0], generated_length), device=self.device, dtype=prompt_attention_mask.dtype
                )
2850

sanchit-gandhi's avatar
sanchit-gandhi committed
2851
2852
2853
2854
        return {
            "input_ids": None,  # encoder_outputs is defined. input_ids not needed
            "encoder_outputs": encoder_outputs,
            "past_key_values": past_key_values,
eustlb's avatar
eustlb committed
2855
            "decoder_input_ids": decoder_input_ids.contiguous(),
sanchit-gandhi's avatar
sanchit-gandhi committed
2856
2857
2858
2859
2860
            "attention_mask": attention_mask,
            "decoder_attention_mask": decoder_attention_mask,
            "head_mask": head_mask,
            "decoder_head_mask": decoder_head_mask,
            "cross_attn_head_mask": cross_attn_head_mask,
2861
2862
            "prompt_hidden_states": prompt_hidden_states,
            "prompt_attention_mask": prompt_attention_mask,
sanchit-gandhi's avatar
sanchit-gandhi committed
2863
            "use_cache": use_cache,
eustlb's avatar
eustlb committed
2864
2865
            "cache_position": cache_position,
            "inputs_embeds": inputs_embeds,
sanchit-gandhi's avatar
sanchit-gandhi committed
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
        }

    def _prepare_decoder_input_ids_for_generation(
        self,
        batch_size: int,
        model_input_name: str,
        model_kwargs: Dict[str, torch.Tensor],
        decoder_start_token_id: int = None,
        bos_token_id: int = None,
        device: torch.device = None,
    ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]:
        """Prepares `decoder_input_ids` for generation with encoder-decoder models"""

        # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
        # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
        if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
            decoder_input_ids = model_kwargs.pop("decoder_input_ids")
        elif "input_ids" in model_kwargs and model_input_name != "input_ids":
            decoder_input_ids = model_kwargs.pop("input_ids")
        else:
            decoder_input_ids = None

        # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
        decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
        if device is None:
            device = self.device
        decoder_input_ids_start = (
            torch.ones((batch_size * self.decoder.num_codebooks, 1), dtype=torch.long, device=device)
            * decoder_start_token_id
        )

        # no user input -> use decoder_start_token_id as decoder_input_ids
        if decoder_input_ids is None:
            decoder_input_ids = decoder_input_ids_start

        # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
        # decoder_attention_mask if provided)
        elif (decoder_input_ids[..., 0] != decoder_start_token_id).all().item():
            decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1)
            if "decoder_attention_mask" in model_kwargs:
                decoder_attention_mask = model_kwargs["decoder_attention_mask"]
                decoder_attention_mask = torch.cat(
                    (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
                    dim=-1,
                )
                model_kwargs["decoder_attention_mask"] = decoder_attention_mask

eustlb's avatar
eustlb committed
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
        if not self.prompt_cross_attention:
            prompt_hidden_states = model_kwargs["prompt_hidden_states"]
            num_codebooks = self.decoder.num_codebooks
            input = decoder_input_ids.reshape(-1, num_codebooks, decoder_input_ids.shape[-1])
            inputs_embeds = sum(
                [
                    self.decoder.model.decoder.embed_tokens[codebook](input[:, codebook])
                    for codebook in range(num_codebooks)
                ]
            )
            inputs_embeds = torch.cat([prompt_hidden_states, inputs_embeds], dim=1)
            model_kwargs["inputs_embeds"] = inputs_embeds

sanchit-gandhi's avatar
sanchit-gandhi committed
2926
2927
2928
2929
2930
2931
        return decoder_input_ids, model_kwargs

    def _prepare_text_encoder_kwargs_for_generation(
        self,
        inputs_tensor: torch.Tensor,
        model_kwargs,
2932
2933
        model_input_name: Optional[str],
        generation_config: GenerationConfig,
sanchit-gandhi's avatar
sanchit-gandhi committed
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
    ) -> Dict[str, Any]:
        # 1. get text encoder
        encoder = self.get_text_encoder()
        # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
        # as the inputs.
        if hasattr(encoder, "_hf_hook"):
            encoder._hf_hook.io_same_device = True

        # 2. Prepare encoder args and encoder kwargs from model kwargs.
        irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
        encoder_kwargs = {
            argument: value
            for argument, value in model_kwargs.items()
            if not any(argument.startswith(p) for p in irrelevant_prefix)
        }
        encoder_signature = set(inspect.signature(encoder.forward).parameters)
        encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
        if not encoder_accepts_wildcard:
            encoder_kwargs = {
                argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
            }
2955
2956
        encoder_kwargs["output_attentions"] = generation_config.output_attentions
        encoder_kwargs["output_hidden_states"] = generation_config.output_hidden_states
sanchit-gandhi's avatar
sanchit-gandhi committed
2957
2958
2959
2960
2961
2962
2963

        # 3. make sure that encoder returns `ModelOutput`
        model_input_name = model_input_name if model_input_name is not None else self.text_encoder.main_input_name
        encoder_kwargs["return_dict"] = True
        encoder_kwargs[model_input_name] = inputs_tensor
        last_hidden_state = encoder(**encoder_kwargs).last_hidden_state

2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
        # we optionnally project last_hidden_state to avoid recomputing every time
        encoder_hidden_states = last_hidden_state
        if (
            self.text_encoder.config.hidden_size != self.decoder.config.hidden_size
            and self.decoder.config.cross_attention_hidden_size is None
        ):
            encoder_hidden_states = self.enc_to_dec_proj(encoder_hidden_states)

        if model_kwargs["attention_mask"] is not None:
            encoder_hidden_states = encoder_hidden_states * model_kwargs["attention_mask"][..., None]

        model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=encoder_hidden_states)
sanchit-gandhi's avatar
sanchit-gandhi committed
2976
2977

        return model_kwargs
Yoach Lacombe's avatar
Yoach Lacombe committed
2978

2979
    def _prepare_prompt_kwargs_for_generation(self, prompt_input_ids, model_kwargs):
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
        prompt_hidden_states = self.embed_prompts(prompt_input_ids)

        if self.prompt_cross_attention:
            # add sinusoidal positional embedding
            positions = self.embed_positions(prompt_hidden_states, 0)
            prompt_hidden_states = prompt_hidden_states + positions.to(prompt_hidden_states.device)

            attention_mask = model_kwargs.get("attention_mask", None)
            prompt_attention_mask = model_kwargs.get("prompt_attention_mask", None)
            encoder_hidden_states = model_kwargs["encoder_outputs"].last_hidden_state

            if prompt_attention_mask is not None and attention_mask is None:
                attention_mask = torch.ones(
                    encoder_hidden_states.shape[:2], device=self.device, dtype=prompt_attention_mask.dtype
                )
            elif attention_mask is not None and prompt_attention_mask is None:
                prompt_attention_mask = torch.ones(
                    prompt_hidden_states.shape[:2], device=self.device, dtype=attention_mask.dtype
                )

            # concatenate text description states with prompt description states
            encoder_hidden_states = torch.cat([encoder_hidden_states, prompt_hidden_states], dim=1)
            if prompt_attention_mask is not None:
                attention_mask = torch.cat([attention_mask, prompt_attention_mask], dim=1)

            model_kwargs["encoder_outputs"].last_hidden_state = encoder_hidden_states
            model_kwargs["attention_mask"] = attention_mask

            # in this case, since we already concatenated the prompt hidden states and attention mask, we don't need them anymore.
            model_kwargs["prompt_hidden_states"] = None
            model_kwargs["prompt_attention_mask"] = None
        else:
            model_kwargs["prompt_hidden_states"] = prompt_hidden_states
            # we're keeping the prompt attention mask because it has to be prepended to the decoder attention mask on the fly
3014
        return model_kwargs
sanchit-gandhi's avatar
sanchit-gandhi committed
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063

    def _prepare_audio_encoder_kwargs_for_generation(
        self, input_values, model_kwargs, model_input_name: Optional[str] = None
    ):
        # 1. get audio encoder
        encoder = self.get_audio_encoder()
        # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device
        # as the inputs.
        if hasattr(encoder, "_hf_hook"):
            encoder._hf_hook.io_same_device = True

        # 2. Prepare encoder args and encoder kwargs from model kwargs.
        irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
        encoder_kwargs = {
            argument: value
            for argument, value in model_kwargs.items()
            if not any(argument.startswith(p) for p in irrelevant_prefix)
        }
        encoder_signature = set(inspect.signature(encoder.forward).parameters)
        encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
        if not encoder_accepts_wildcard:
            encoder_kwargs = {
                argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
            }

        # 3. make sure that encoder returns `ModelOutput`
        model_input_name = model_input_name if model_input_name is not None else self.audio_encoder.main_input_name
        encoder_kwargs["return_dict"] = True

        encoder_kwargs[model_input_name] = input_values
        audio_encoder_outputs = encoder.encode(**encoder_kwargs)
        audio_codes = audio_encoder_outputs.audio_codes
        audio_scales = audio_encoder_outputs.audio_scales

        frames, bsz, codebooks, seq_len = audio_codes.shape

        if frames != 1:
            raise ValueError(
                f"Expected 1 frame in the audio code outputs, got {frames} frames. Ensure chunking is "
                "disabled by setting `chunk_length=None` in the audio encoder."
            )

        decoder_input_ids = audio_codes[0, ...].reshape(bsz * self.decoder.num_codebooks, seq_len)

        model_kwargs["decoder_input_ids"] = decoder_input_ids
        model_kwargs["audio_scales"] = audio_scales
        return model_kwargs

    def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
eustlb's avatar
eustlb committed
3064
3065
3066
        return shift_tokens_right(
            labels, self.config.decoder.pad_token_id, self.config.decoder.bos_token_id
        ).transpose(1, 2)
sanchit-gandhi's avatar
sanchit-gandhi committed
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101

    def resize_token_embeddings(self, *args, **kwargs):
        raise NotImplementedError(
            "Resizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the"
            " respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or"
            " model.decoder.resize_token_embeddings(...))"
        )

    def _maybe_initialize_input_ids_for_generation(
        self,
        inputs: Optional[torch.Tensor] = None,
        bos_token_id: Optional[int] = None,
        model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
    ) -> torch.LongTensor:
        """Initializes input ids for generation, if necessary."""
        if inputs is not None:
            return inputs

        encoder_outputs = model_kwargs.get("encoder_outputs")
        if encoder_outputs is not None:
            # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
            shape = encoder_outputs[0].size()[:-1]
            return torch.ones(shape, dtype=torch.long, device=self.device) * -100

        if bos_token_id is None:
            raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")

        # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
        # soft-prompting or in multimodal implementations built on top of decoder-only language models.
        batch_size = 1
        for value in model_kwargs.values():
            if isinstance(value, torch.Tensor):
                batch_size = value.shape[0]
                break
        return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id
Yoach Lacombe's avatar
Yoach Lacombe committed
3102

eustlb's avatar
eustlb committed
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
    def _get_decoder_start_token_id(
        self, decoder_start_token_id: Union[int, List[int]] = None, bos_token_id: int = None
    ) -> int:
        decoder_start_token_id = (
            decoder_start_token_id
            if decoder_start_token_id is not None
            else self.generation_config.decoder_start_token_id
        )
        bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id

        if decoder_start_token_id is not None:
            return decoder_start_token_id
        elif bos_token_id is not None:
            return bos_token_id
        raise ValueError(
            "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
        )

    def _get_cache(self, cache_implementation: str, max_batch_size: int, max_cache_len: int, model_kwargs) -> Cache:
        """
        Sets a cache for `generate`, that will persist across calls. A new cache will only be initialized a
        new `generate` call requires a larger cache.

        Returns the resulting cache object.
        """
        cache_cls: Cache = NEED_SETUP_CACHE_CLASSES_MAPPING[cache_implementation]
        requires_cross_attention_cache = (
            self.config.is_encoder_decoder or model_kwargs.get("encoder_outputs") is not None
        )

        if hasattr(self, "_cache"):
            cache_to_check = self._cache.self_attention_cache if requires_cross_attention_cache else self._cache

        if cache_implementation == "sliding_window":
            max_cache_len = min(self.config.sliding_window, max_cache_len)

        need_new_cache = (
            not hasattr(self, "_cache")
            or (not isinstance(cache_to_check, cache_cls))
            or cache_to_check.max_batch_size != max_batch_size
            or cache_to_check.max_cache_len < max_cache_len
        )

        if requires_cross_attention_cache and hasattr(self, "_cache"):
            need_new_cache = (
                need_new_cache
                or self._cache.cross_attention_cache.max_cache_len != model_kwargs["encoder_outputs"][0].shape[1]
            )

        if need_new_cache:
            if hasattr(self.config, "_pre_quantization_dtype"):
                cache_dtype = self.config._pre_quantization_dtype
            else:
                cache_dtype = self.dtype
            cache_kwargs = {
                "config": self.config.decoder,
                "max_batch_size": max_batch_size,
                "max_cache_len": max_cache_len,
                "device": self.device,
                "dtype": cache_dtype,
            }
            self._cache = cache_cls(**cache_kwargs)
            if requires_cross_attention_cache:
                encoder_kwargs = cache_kwargs.copy()
                encoder_kwargs["max_cache_len"] = model_kwargs["encoder_outputs"][0].shape[1]
                config_cross_attention_cache = copy.deepcopy(self.config.decoder)
                config_cross_attention_cache.update(
                    {"num_key_value_heads": self.config.decoder.num_cross_attention_key_value_heads}
                )
                encoder_kwargs["config"] = config_cross_attention_cache
                self._cache = EncoderDecoderCache(self._cache, cache_cls(**encoder_kwargs))
        else:
            self._cache.reset()
        return self._cache

3178
3179
3180
3181
3182
    def freeze_encoders(self, freeze_text_encoder=True):
        if freeze_text_encoder:
            for param in self.text_encoder.parameters():
                param.requires_grad = False
            self.text_encoder._requires_grad = False
Yoach Lacombe's avatar
Yoach Lacombe committed
3183

3184
3185
3186
        for param in self.audio_encoder.parameters():
            param.requires_grad = False
        self.audio_encoder._requires_grad = False
sanchit-gandhi's avatar
sanchit-gandhi committed
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277

    @torch.no_grad()
    def generate(
        self,
        inputs: Optional[torch.Tensor] = None,
        generation_config: Optional[GenerationConfig] = None,
        logits_processor: Optional[LogitsProcessorList] = None,
        stopping_criteria: Optional[StoppingCriteriaList] = None,
        synced_gpus: Optional[bool] = None,
        streamer: Optional["BaseStreamer"] = None,
        **kwargs,
    ):
        """

        Generates sequences of token ids for models with a language modeling head.

        <Tip warning={true}>

        Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
        model's default generation configuration. You can override any `generation_config` by passing the corresponding
        parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.

        For an overview of generation strategies and code examples, check out the [following
        guide](./generation_strategies).

        </Tip>

        Parameters:
            inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
                The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
                method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
                should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
                `input_ids`, `input_values`, `input_features`, or `pixel_values`.
            generation_config (`~generation.GenerationConfig`, *optional*):
                The generation configuration to be used as base parametrization for the generation call. `**kwargs`
                passed to generate matching the attributes of `generation_config` will override them. If
                `generation_config` is not provided, the default will be used, which had the following loading
                priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
                configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
                default values, whose documentation should be checked to parameterize generation.
            logits_processor (`LogitsProcessorList`, *optional*):
                Custom logits processors that complement the default logits processors built from arguments and
                generation config. If a logit processor is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            stopping_criteria (`StoppingCriteriaList`, *optional*):
                Custom stopping criteria that complement the default stopping criteria built from arguments and a
                generation config. If a stopping criteria is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            synced_gpus (`bool`, *optional*, defaults to `False`):
                Whether to continue running the while loop until max_length (needed for ZeRO stage 3)
            streamer (`BaseStreamer`, *optional*):
                Streamer object that will be used to stream the generated sequences. Generated tokens are passed
                through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
            kwargs (`Dict[str, Any]`, *optional*):
                Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
                forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
                specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.

        Return:
            [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
            or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.

                If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateDecoderOnlyOutput`],
                    - [`~generation.GenerateBeamDecoderOnlyOutput`]

                If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateEncoderDecoderOutput`],
                    - [`~generation.GenerateBeamEncoderDecoderOutput`]
        """
        # 1. Handle `generation_config` and kwargs that might update it, and validate the resulting objects
        if generation_config is None:
            generation_config = self.generation_config

        generation_config = copy.deepcopy(generation_config)
        model_kwargs = generation_config.update(**kwargs)  # All unused kwargs must be model kwargs
        generation_config.validate()
        self._validate_model_kwargs(model_kwargs.copy())

        if model_kwargs.get("encoder_outputs") is not None and type(model_kwargs["encoder_outputs"]) == tuple:
            # wrap the unconditional outputs as a BaseModelOutput for compatibility with the rest of generate
            model_kwargs["encoder_outputs"] = BaseModelOutput(last_hidden_state=model_kwargs["encoder_outputs"][0])

        # 2. Set generation parameters if not already defined
        logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
        stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()

eustlb's avatar
eustlb committed
3278
3279
        requires_attention_mask = "encoder_outputs" not in model_kwargs
        kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
sanchit-gandhi's avatar
sanchit-gandhi committed
3280
3281
3282
3283
3284
3285

        # 3. Define model inputs
        inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
            inputs, generation_config.bos_token_id, model_kwargs
        )
        batch_size = inputs_tensor.shape[0]
eustlb's avatar
eustlb committed
3286
        self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=inputs_tensor.device)
sanchit-gandhi's avatar
sanchit-gandhi committed
3287
3288
3289
3290
3291
3292

        # 4. Define other model kwargs
        model_kwargs["use_cache"] = generation_config.use_cache

        if model_kwargs.get("attention_mask", None) is None and requires_attention_mask:
            model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
eustlb's avatar
eustlb committed
3293
                inputs_tensor, generation_config._pad_token_tensor, generation_config._eos_token_tensor
sanchit-gandhi's avatar
sanchit-gandhi committed
3294
3295
3296
3297
3298
            )

        if "encoder_outputs" not in model_kwargs:
            # encoder_outputs are created and added to `model_kwargs`
            model_kwargs = self._prepare_text_encoder_kwargs_for_generation(
eustlb's avatar
eustlb committed
3299
                inputs_tensor, model_kwargs, model_input_name, generation_config
sanchit-gandhi's avatar
sanchit-gandhi committed
3300
            )
Yoach Lacombe's avatar
Yoach Lacombe committed
3301

3302
3303
3304
3305
3306
3307
        if "prompt_hidden_states" not in model_kwargs and "prompt_input_ids" in model_kwargs:
            # `prompt_hidden_states` are created and added to `model_kwargs`
            model_kwargs = self._prepare_prompt_kwargs_for_generation(
                model_kwargs["prompt_input_ids"],
                model_kwargs,
            )
sanchit-gandhi's avatar
sanchit-gandhi committed
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319

        if "decoder_input_ids" not in model_kwargs and "input_values" in model_kwargs:
            model_kwargs = self._prepare_audio_encoder_kwargs_for_generation(
                model_kwargs["input_values"],
                model_kwargs,
            )

        # 5. Prepare `input_ids` which will be used for auto-regressive generation
        input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
            batch_size=batch_size,
            model_input_name=model_input_name,
            model_kwargs=model_kwargs,
eustlb's avatar
eustlb committed
3320
3321
            decoder_start_token_id=generation_config._decoder_start_token_tensor,
            bos_token_id=generation_config._bos_token_tensor,
sanchit-gandhi's avatar
sanchit-gandhi committed
3322
3323
3324
3325
            device=inputs_tensor.device,
        )

        # 6. Prepare `max_length` depending on other stopping criteria.
eustlb's avatar
eustlb committed
3326
        input_ids_length = input_ids.shape[-1]
sanchit-gandhi's avatar
sanchit-gandhi committed
3327
        has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
eustlb's avatar
eustlb committed
3328
3329
3330
3331
3332
3333
3334
3335
3336
        has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
        generation_config = self._prepare_generated_length(
            generation_config=generation_config,
            has_default_max_length=has_default_max_length,
            has_default_min_length=has_default_min_length,
            model_input_name=model_input_name,
            inputs_tensor=inputs_tensor,
            input_ids_length=input_ids_length,
        )
sanchit-gandhi's avatar
sanchit-gandhi committed
3337

eustlb's avatar
eustlb committed
3338
        if generation_config.cache_implementation is not None and model_kwargs.get("past_key_values") is not None:
sanchit-gandhi's avatar
sanchit-gandhi committed
3339
            raise ValueError(
eustlb's avatar
eustlb committed
3340
3341
                "Passing both `cache_implementation` (used to initialize certain caches) and `past_key_values` (a "
                "Cache object) is unsupported. Please use only one of the two."
sanchit-gandhi's avatar
sanchit-gandhi committed
3342
            )
eustlb's avatar
eustlb committed
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
        elif generation_config.cache_implementation is not None:
            if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING:
                if generation_config.cache_implementation == "static" and not self._supports_static_cache:
                    raise ValueError(
                        "This model does not support `cache_implementation='static'`. Please check the following "
                        "issue: https://github.com/huggingface/transformers/issues/28981"
                    )
                if not self.prompt_cross_attention:
                    # when we prepend prompt_hidden_state to inputs_embeds, max_cache_len needs to be actualised
                    # generation_config.max_length has already been increased by input_ids_length which is
                    # already counted in input_embeds_seq_length so we remove it
                    input_embeds_seq_length = model_kwargs["inputs_embeds"].shape[1]
                    max_cache_len = generation_config.max_length + input_embeds_seq_length - input_ids_length
                else:
                    max_cache_len = self.generation_config.max_length
                model_kwargs["past_key_values"] = self._get_cache(
                    generation_config.cache_implementation,
                    getattr(generation_config, "num_beams", 1) * batch_size,
                    max_cache_len,
                    model_kwargs,
                )
            elif generation_config.cache_implementation == "quantized":
                raise ValueError(
                    "This model does not support the quantized cache. If you want your model to support quantized "
                    "cache, please open an issue on the Parler-TTS repository https://github.com/huggingface/parler-tts"
                )
        # Use DynamicCache() instance by default. This will avoid back and forth from legacy format that
        # keeps copying the cache thus using much more memory
        elif generation_config.cache_implementation is None and self._supports_default_dynamic_cache():
            past = model_kwargs.get("past_key_values", None)
            requires_cross_attention_cache = (
                self.config.is_encoder_decoder or model_kwargs.get("encoder_outputs") is not None
sanchit-gandhi's avatar
sanchit-gandhi committed
3375
            )
eustlb's avatar
eustlb committed
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
            if past is None:
                model_kwargs["past_key_values"] = (
                    DynamicCache()
                    if not requires_cross_attention_cache
                    else EncoderDecoderCache(DynamicCache(), DynamicCache())
                )
            elif isinstance(past, tuple):
                model_kwargs["past_key_values"] = (
                    DynamicCache.from_legacy_cache(past)
                    if not requires_cross_attention_cache
                    else EncoderDecoderCache.from_legacy_cache(past)
                )
sanchit-gandhi's avatar
sanchit-gandhi committed
3388

Yoach Lacombe's avatar
Yoach Lacombe committed
3389
        # build the delay pattern mask for offsetting each codebook prediction by 1 (this behaviour is specific to Parler-TTS)
sanchit-gandhi's avatar
sanchit-gandhi committed
3390
3391
        input_ids, decoder_delay_pattern_mask = self.decoder.build_delay_pattern_mask(
            input_ids,
eustlb's avatar
eustlb committed
3392
3393
            bos_token_id=generation_config._bos_token_tensor,
            pad_token_id=generation_config._pad_token_tensor,
sanchit-gandhi's avatar
sanchit-gandhi committed
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
            max_length=generation_config.max_length,
        )
        # stash the delay mask so that we don't have to recompute in each forward pass
        model_kwargs["decoder_delay_pattern_mask"] = decoder_delay_pattern_mask

        # input_ids are ready to be placed on the streamer (if used)
        if streamer is not None:
            streamer.put(input_ids.cpu())

        # 7. determine generation mode
        is_greedy_gen_mode = (
            (generation_config.num_beams == 1)
            and (generation_config.num_beam_groups == 1)
            and generation_config.do_sample is False
        )
        is_sample_gen_mode = (
            (generation_config.num_beams == 1)
            and (generation_config.num_beam_groups == 1)
            and generation_config.do_sample is True
        )

eustlb's avatar
eustlb committed
3415
        # 8. prepare distribution pre_processing samplers
sanchit-gandhi's avatar
sanchit-gandhi committed
3416
3417
        logits_processor = self._get_logits_processor(
            generation_config=generation_config,
eustlb's avatar
eustlb committed
3418
            input_ids_seq_length=input_ids_length,
sanchit-gandhi's avatar
sanchit-gandhi committed
3419
3420
3421
            encoder_input_ids=inputs_tensor,
            prefix_allowed_tokens_fn=None,
            logits_processor=logits_processor,
eustlb's avatar
eustlb committed
3422
            device=input_ids.device,
sanchit-gandhi's avatar
sanchit-gandhi committed
3423
3424
        )

eustlb's avatar
eustlb committed
3425
        # 9. prepare stopping criteria
sanchit-gandhi's avatar
sanchit-gandhi committed
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
        stopping_criteria = self._get_stopping_criteria(
            generation_config=generation_config, stopping_criteria=stopping_criteria
        )

        if is_greedy_gen_mode:
            if generation_config.num_return_sequences > 1:
                raise ValueError(
                    "num_return_sequences has to be 1 when doing greedy search, "
                    f"but is {generation_config.num_return_sequences}."
                )

eustlb's avatar
eustlb committed
3437
3438
            # 10. run greedy search
            outputs = self._sample(
sanchit-gandhi's avatar
sanchit-gandhi committed
3439
3440
3441
                input_ids,
                logits_processor=logits_processor,
                stopping_criteria=stopping_criteria,
3442
                generation_config=generation_config,
sanchit-gandhi's avatar
sanchit-gandhi committed
3443
3444
3445
3446
3447
3448
                synced_gpus=synced_gpus,
                streamer=streamer,
                **model_kwargs,
            )

        elif is_sample_gen_mode:
eustlb's avatar
eustlb committed
3449
3450
            # 10. prepare logits warper
            logits_warper = self._get_logits_warper(generation_config, device=input_ids.device)
sanchit-gandhi's avatar
sanchit-gandhi committed
3451
3452
3453
3454
3455
3456
3457
3458
3459

            # expand input_ids with `num_return_sequences` additional sequences per batch
            input_ids, model_kwargs = self._expand_inputs_for_generation(
                input_ids=input_ids,
                expand_size=generation_config.num_return_sequences,
                is_encoder_decoder=self.config.is_encoder_decoder,
                **model_kwargs,
            )

eustlb's avatar
eustlb committed
3460
            # 11. run sample
3461
            outputs = self._sample(
sanchit-gandhi's avatar
sanchit-gandhi committed
3462
3463
3464
3465
                input_ids,
                logits_processor=logits_processor,
                logits_warper=logits_warper,
                stopping_criteria=stopping_criteria,
3466
                generation_config=generation_config,
sanchit-gandhi's avatar
sanchit-gandhi committed
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
                synced_gpus=synced_gpus,
                streamer=streamer,
                **model_kwargs,
            )

        else:
            raise ValueError(
                "Got incompatible mode for generation, should be one of greedy or sampling. "
                "Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
            )

        if generation_config.return_dict_in_generate:
            output_ids = outputs.sequences
        else:
            output_ids = outputs

3483
        # Apply the pattern mask to the final ids
sanchit-gandhi's avatar
sanchit-gandhi committed
3484
3485
        output_ids = self.decoder.apply_delay_pattern_mask(output_ids, model_kwargs["decoder_delay_pattern_mask"])

3486
        # Revert the pattern delay mask by filtering the eos and bos token ids from the delay pattern mask
3487
3488
        _, mask = self.decoder.build_delay_pattern_mask(
            input_ids,
eustlb's avatar
eustlb committed
3489
3490
            bos_token_id=generation_config._bos_token_tensor,
            pad_token_id=generation_config._pad_token_tensor,
3491
3492
            max_length=output_ids.shape[1],
        )
Yoach Lacombe's avatar
Yoach Lacombe committed
3493
3494
3495

        mask = (mask != generation_config.bos_token_id) & (mask != generation_config.pad_token_id)
        output_ids = output_ids[mask].reshape(batch_size, self.decoder.num_codebooks, -1)
sanchit-gandhi's avatar
sanchit-gandhi committed
3496
3497
3498
3499
3500
3501
3502
3503

        # append the frame dimension back to the audio codes
        output_ids = output_ids[None, ...]

        audio_scales = model_kwargs.get("audio_scales")
        if audio_scales is None:
            audio_scales = [None] * batch_size

Yoach Lacombe's avatar
Yoach Lacombe committed
3504
3505
3506
3507
3508
3509
        decode_sequentially = (
            generation_config.bos_token_id in output_ids
            or generation_config.pad_token_id in output_ids
            or generation_config.eos_token_id in output_ids
        )
        if not decode_sequentially:
Yoach Lacombe's avatar
Yoach Lacombe committed
3510
3511
3512
            output_values = self.audio_encoder.decode(
                output_ids,
                audio_scales=audio_scales,
Yoach Lacombe's avatar
Yoach Lacombe committed
3513
            ).audio_values.squeeze(1)
Yoach Lacombe's avatar
Yoach Lacombe committed
3514
3515
3516
3517
        else:
            output_values = []
            for sample_id in range(batch_size):
                sample = output_ids[:, sample_id]
Yoach Lacombe's avatar
Yoach Lacombe committed
3518
3519
                sample_mask = (sample >= self.audio_encoder.config.codebook_size).sum(dim=(0, 1)) == 0
                if sample_mask.sum() > 0:
Yoach Lacombe's avatar
Yoach Lacombe committed
3520
3521
                    sample = sample[:, :, sample_mask]
                    sample = self.audio_encoder.decode(sample[None, ...], [audio_scales[sample_id]]).audio_values
Yoach Lacombe's avatar
Yoach Lacombe committed
3522
                    output_values.append(sample.transpose(0, 2))
Yoach Lacombe's avatar
Yoach Lacombe committed
3523
                else:
Yoach Lacombe's avatar
Yoach Lacombe committed
3524
                    output_values.append(torch.zeros((1, 1, 1)).to(self.device))
3525
            # TODO: we should keep track of output length as well. Not really straightforward tbh
Yoach Lacombe's avatar
Yoach Lacombe committed
3526
3527
3528
3529
3530
            output_values = (
                torch.nn.utils.rnn.pad_sequence(output_values, batch_first=True, padding_value=0)
                .squeeze(-1)
                .squeeze(-1)
            )
sanchit-gandhi's avatar
sanchit-gandhi committed
3531
3532
3533
3534
        if generation_config.return_dict_in_generate:
            outputs.sequences = output_values
            return outputs
        else:
Yoach Lacombe's avatar
Yoach Lacombe committed
3535
            return output_values