attention.py 56.8 KB
Newer Older
1
# Copyright (c) 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
3
4
#
# See LICENSE for license information.
"""JAX multi-head attention modules"""
5
from __future__ import annotations
6
7
from enum import Enum
from functools import partial
8
9
10
from typing import Optional, Tuple, Union
import warnings

11
from jax.ad_checkpoint import checkpoint_name
12
13
import jax
import jax.numpy as jnp
14
from flax.linen import make_attention_mask
15

16
17
18
19
20
from transformer_engine_jax import NVTE_Bias_Type
from transformer_engine_jax import NVTE_Mask_Type
from transformer_engine_jax import NVTE_QKV_Layout
from transformer_engine_jax import NVTE_QKV_Format
from transformer_engine_jax import nvte_get_qkv_format
21
from transformer_engine_jax import NVTE_Softmax_Type
22

23
from . import cpp_extensions as tex
24
25
26


class AttnBiasType(Enum):
27
28
29
30
31
    """
    NO_BIAS: Softmax is performed as softmax(scale * qk)
    PRE_SCALE_BIAS: Softmax is performed as softmax(scale * (qk + bias))
    POST_SCALE_BIAS: Softmax is performed as softmax(scale * qk + bias)
    """
32

33
34
35
36
37
38
    NO_BIAS = NVTE_Bias_Type.NVTE_NO_BIAS
    PRE_SCALE_BIAS = NVTE_Bias_Type.NVTE_PRE_SCALE_BIAS
    POST_SCALE_BIAS = NVTE_Bias_Type.NVTE_POST_SCALE_BIAS


class AttnMaskType(Enum):
39
40
41
42
43
44
    """
    NO_MASK: No attention mask is applied.
    PADDING_MASK: Indicates the presence of paddings at the end of each sequence.
    CAUSAL_MASK: An upper triangular mask is applied to the softmax inputs.
    PADDING_CAUSAL_MASK: A combination of both causal and padding masks.
    """
45

46
47
48
    NO_MASK = NVTE_Mask_Type.NVTE_NO_MASK
    PADDING_MASK = NVTE_Mask_Type.NVTE_PADDING_MASK
    CAUSAL_MASK = NVTE_Mask_Type.NVTE_CAUSAL_MASK
49
    PADDING_CAUSAL_MASK = NVTE_Mask_Type.NVTE_PADDING_CAUSAL_MASK
50
51
    CAUSAL_BOTTOM_RIGHT_MASK = NVTE_Mask_Type.NVTE_CAUSAL_BOTTOM_RIGHT_MASK
    PADDING_CAUSAL_BOTTOM_RIGHT_MASK = NVTE_Mask_Type.NVTE_PADDING_CAUSAL_BOTTOM_RIGHT_MASK
52

53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    def is_causal(self):
        """Returns True if the mask is a causal mask"""
        return self in [
            AttnMaskType.CAUSAL_MASK,
            AttnMaskType.PADDING_CAUSAL_MASK,
            AttnMaskType.CAUSAL_BOTTOM_RIGHT_MASK,
            AttnMaskType.PADDING_CAUSAL_BOTTOM_RIGHT_MASK,
        ]

    def is_padding(self):
        """Returns True if the mask includes padding"""
        return self in [
            AttnMaskType.PADDING_MASK,
            AttnMaskType.PADDING_CAUSAL_MASK,
            AttnMaskType.PADDING_CAUSAL_BOTTOM_RIGHT_MASK,
        ]

    def is_bottom_right(self):
        """Returns True if the causal mask is calculated from the bottom-right section"""
        return self in [
            AttnMaskType.CAUSAL_BOTTOM_RIGHT_MASK,
            AttnMaskType.PADDING_CAUSAL_BOTTOM_RIGHT_MASK,
        ]


78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
class AttnSoftmaxType(Enum):
    """
    VANILLA_SOFTMAX: S[:,:,:,i] = exp(S[:,:,:,i])/sum(exp(S[:,:,:,:]), dim=-1),
    OFF_BY_ONE_SOFTMAX: S[:,:,:,i] = exp(S[:,:,:,i])/(1 + sum(exp(S[:,:,:,:]), dim=-1)),
    LEARNABLE_SOFTMAX: S[:,j,:,i] = exp(S[:,j,:,i])/(exp(alpha[j]) + sum(exp(S[:,j,:,:]), dim=-1)),
    where alpha is a learnable parameter in shape [H].
    """

    VANILLA_SOFTMAX = NVTE_Softmax_Type.NVTE_VANILLA_SOFTMAX
    OFF_BY_ONE_SOFTMAX = NVTE_Softmax_Type.NVTE_OFF_BY_ONE_SOFTMAX
    LEARNABLE_SOFTMAX = NVTE_Softmax_Type.NVTE_LEARNABLE_SOFTMAX

    @classmethod
    def from_str(cls, softmax_type: str) -> "AttnSoftmaxType":
        """Convert string to AttnSoftmaxType: 'vanilla', 'off_by_one', or 'learnable'."""
        softmax_type_map = {
            "vanilla": cls.VANILLA_SOFTMAX,
            "off_by_one": cls.OFF_BY_ONE_SOFTMAX,
            "learnable": cls.LEARNABLE_SOFTMAX,
        }
        result = softmax_type_map.get(softmax_type)
        if result is None:
            raise ValueError(
                f"Unknown softmax_type: {softmax_type}. "
                "Valid options: 'vanilla', 'off_by_one', 'learnable'"
            )
        return result


107
108
109
110
111
112
113
114
115
116
117
class QKVFormat(Enum):
    """
    SBHD: q,k,v memory layout with [s, b, ..., h, d]
    BSHD: q,k,v memory layout with [b, s, ..., h, d]
    THD: q,k,v memory layout is same as BSHD, but allow multiple segments packed in a sequence.
    """

    SBHD = NVTE_QKV_Format.NVTE_SBHD
    BSHD = NVTE_QKV_Format.NVTE_BSHD
    THD = NVTE_QKV_Format.NVTE_THD

118

119
class QKVLayout(Enum):
120
121
122
123
124
125
126
127
128
129
    """
    BSHD Format:
        - BS3HD: q,k,v are interleave packed as a tensor with shape [b, s, 3, h, d].
        - BSHD_BS2HD: q with shape [b, s, h, d] and kv are interleaved with shape [b, s, 2, h, d].
        - BSHD_BSHD_BSHD: q,k,v are seperate tensors with shape [b, s, h, d]
    THD Format: Shape is same as BSHD layout but allow multiple segments packed in a sequence.
        - T3HD: q,k,v are interleave packed as a tensor with shape [b, s, 3, h, d].
        - THD_T2HD: q with shape [b, s, h, d] and kv are interleaved with shape [b, s, 2, h, d].
        - THD_THD_THD: q,k,v are seperate tensors with shape [b, s, h, d]
    """
130

131
132
    BS3HD = NVTE_QKV_Layout.NVTE_BS3HD
    BSHD_BS2HD = NVTE_QKV_Layout.NVTE_BSHD_BS2HD
133
    BSHD_BSHD_BSHD = NVTE_QKV_Layout.NVTE_BSHD_BSHD_BSHD
134
135
136
137
    T3HD = NVTE_QKV_Layout.NVTE_T3HD
    THD_T2HD = NVTE_QKV_Layout.NVTE_THD_T2HD
    THD_THD_THD = NVTE_QKV_Layout.NVTE_THD_THD_THD

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
    def get_qkv_format(self):
        """
        Return the corresponding qkv_format (BSHD, SBHD, THD)
        """
        return QKVFormat(nvte_get_qkv_format(self.value))

    def is_qkvpacked(self):
        """
        Return True if the query, key, value is packed
        """
        return self in [QKVLayout.BS3HD, QKVLayout.T3HD]

    def is_kvpacked(self):
        """
        Return True if the key, value is packed
        """
        return self in [QKVLayout.BSHD_BS2HD, QKVLayout.THD_T2HD]

    def is_separate(self):
        """
        Return True if the query, key, value are three separate tensors
        """
        return self in [QKVLayout.BSHD_BSHD_BSHD, QKVLayout.THD_THD_THD]

    def is_thd(self):
        """
        Return True if the layout belongs to THD
        """
        return self in [QKVLayout.T3HD, QKVLayout.THD_T2HD, QKVLayout.THD_THD_THD]
167

Reese Wang's avatar
Reese Wang committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
    def to_qkvpacked(self):
        """
        Return the corresponding qkvpacked format, useful when adjusting q, k, v layout
        """
        qkv_format = self.get_qkv_format()
        if qkv_format == QKVFormat.BSHD:
            return QKVLayout.BS3HD
        if qkv_format == QKVFormat.THD:
            return QKVLayout.T3HD
        raise ValueError(f"Unsupported {qkv_format=}")

    def to_kvpacked(self):
        """
        Return the corresponding kvpacked format, useful when adjusting q, k, v layout
        """
        qkv_format = self.get_qkv_format()
        if qkv_format == QKVFormat.BSHD:
            return QKVLayout.BSHD_BS2HD
        if qkv_format == QKVFormat.THD:
            return QKVLayout.THD_T2HD
        raise ValueError(f"Unsupported {qkv_format=}")

    def to_separate(self):
        """
        Return the corresponding separate format, useful when adjusting q, k, v layout
        """
        qkv_format = self.get_qkv_format()
        if qkv_format == QKVFormat.BSHD:
            return QKVLayout.BSHD_BSHD_BSHD
        if qkv_format == QKVFormat.THD:
            return QKVLayout.THD_THD_THD
        raise ValueError(f"Unsupported {qkv_format=}")

201

202
203
204
205
206
207
208
209
210
211
212
213
214
class CPStrategy(Enum):
    """Defines the context parallel strategies of Jax fused attention.

    DEFAULT: Default strategy will choose automatically if context parallel axis is sharded.
    ALL_GATHER: All-gather/reduce scatter implementation.
    RING: Ring attention implementation (https://arxiv.org/abs/2310.01889).
    """

    DEFAULT = 0
    ALL_GATHER = 1
    RING = 2


Reese Wang's avatar
Reese Wang committed
215
216
217
218
219
220
class ReorderStrategy(Enum):
    """
    Defines the tokens re-order strategy for context parallel load balancing for causal mask.

    - DualChunkSwap: This strategy splits each query into two chunks and do the mirror swap between
    GPUs. This is currently used for non-THD load balance. It requires the max_seqlens be the
221
    multiple of 2 * cp_size.
Reese Wang's avatar
Reese Wang committed
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
      Examples:
      - Before reorder: GPU0: [0, 1, 2, 3]; GPU1: [4, 5, 6, 7]; GPU2: [8, 9, 10, 11]; GPU3: [12, 13, 14, 15];
      - After reorder: GPU0: [0, 1, 14, 15]; GPU1: [4, 5, 10, 11]; GPU2: [8, 9, 6, 7]; GPU3: [12, 13, 2, 3]

    - Striped: This strategy distributes the tokens in a striped (interleaved) manner across
      the sequence. This is currently used for THD load balance.
      Example: Consider 4 GPUs with seqlens=16.
      - Before reorder: GPU0: [0, 1, 2, 3]; GPU1: [4, 5, 6, 7]; ...; GPU3: [12, 13, 14, 15]
      - After reorder: GPU0: [0, 4, 8, 12]; GPU1: [1, 5, 9, 13]; ...; GPU3: [3, 7, 11, 15]
    """

    DualChunkSwap = 0
    Striped = 1


237
def make_swa_mask(
238
239
    segment_pos_q: jnp.ndarray,
    segment_pos_kv: jnp.ndarray,
240
241
    window_size: Optional[Tuple[int, int]] = None,
    dtype: jax.typing.DTypeLike = jnp.float32,
242
243
    segment_ids_q: jnp.ndarray = None,
    segment_ids_kv: jnp.ndarray = None,
244
245
):
    """
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    Generate a sliding window mask (1 = attend, 0 = masked).

    Args:
        segment_pos_q (jnp.ndarray):
            Query positions within each segment. For example, a batch with segment_ids =
            [[1, 1, 1, 2, 2, 2, 2, 2]] yields segment_pos =
            [[0, 1, 2, 0, 1, 2, 3, 4]].
        segment_pos_kv (jnp.ndarray):
            Key/value positions within each segment.
        window_size (Optional[Tuple[int, int]], optional):
            Sliding window size for local attention, where query at position i attends to keys
            in [i - window_size[0], i + window_size[1]] inclusive. A negative number means an
            infinite window; None means no sliding window.
            Defaults to None.
        dtype (jax.typing.DTypeLike, optional):
            Mask data type. Defaults to jnp.float32.
262
263
264
265
        segment_ids_q (jnp.ndarray):
            Query segment id that each token belongs to
        segment_ids_kv (jnp.ndarray):
            Key/value segment id that each token belongs to
266
267
268
269

    Returns:
        jnp.ndarray:
            The mask with shape [b, 1, max_seqlen_q, max_seqlen_kv].
270
    """
271
272
    if window_size is not None:
        left_window, right_window = window_size
273
    else:
274
275
276
277
278
        left_window = right_window = jnp.inf
    left_window = jnp.inf if left_window < 0 else left_window
    right_window = jnp.inf if right_window < 0 else right_window
    pos_q = jnp.expand_dims(segment_pos_q, axis=-1)
    pos_kv = jnp.expand_dims(segment_pos_kv, axis=-2)
279
280
281
282
283
284
285
286
287
288
289
290
    # For Bottom Right Causal Mask (BRCM)
    if segment_ids_q is not None and segment_ids_kv is not None:
        run_length_q = run_length_fill(segment_ids_q)
        run_length_kv = run_length_fill(segment_ids_kv)
        run_length_q_exp = jnp.expand_dims(run_length_q, axis=-1)
        run_length_kv_exp = jnp.expand_dims(run_length_kv, axis=-2)
        bottom_right_inv_swa_mask = (
            run_length_q_exp - pos_q + left_window >= run_length_kv_exp - pos_kv
        )
        bottom_right_inv_swa_mask = jnp.expand_dims(bottom_right_inv_swa_mask, axis=-3)
        return bottom_right_inv_swa_mask.astype(dtype)
    # All other cases other than BRCM
291
292
293
    inv_swa_mask = (pos_kv >= pos_q - left_window) & (pos_kv <= pos_q + right_window)
    inv_swa_mask = jnp.expand_dims(inv_swa_mask, axis=-3)
    return inv_swa_mask.astype(dtype)
294
295


296
297
298
299
300
301
def canonicalize_attn_mask_type(attn_mask_type: str):
    """Convert string attn_mask_type to AttnMaskType
    TE-JAX currently fall back to the padding version kernels for the libraries integration.
    The overhead between padding and non-padding version should be small.
    However, we will lease this limitation in the near feature.
    """
302
    match attn_mask_type:
303
        case "no_mask":
304
            return AttnMaskType.NO_MASK
305
        case "padding":
306
            return AttnMaskType.PADDING_MASK
307
        case "causal":
308
            return AttnMaskType.CAUSAL_MASK
309
310
        case "causal_bottom_right" | "bottom_right_causal":
            return AttnMaskType.CAUSAL_BOTTOM_RIGHT_MASK
311
        case "padding_causal" | "causal_padding":
312
            return AttnMaskType.PADDING_CAUSAL_MASK
313
314
315
316
317
318
319
        case (
            "padding_causal_bottom_right"
            | "causal_padding_bottom_right"
            | "bottom_right_causal_padding"
            | "bottom_right_padding_causal"
        ):
            return AttnMaskType.PADDING_CAUSAL_BOTTOM_RIGHT_MASK
320
    raise ValueError(
321
322
323
        f"Unsupported {attn_mask_type=}, supported attn_mask_type={{'no_mask', 'padding', 'causal',"
        " 'padding_causal', 'causal_padding', 'causal_bottom_right',"
        " 'padding_causal_bottom_right'}"
324
325
326
327
    )


def is_fused_attn_kernel_available(
328
    is_training,
329
330
331
332
333
    q_dtype,
    kv_dtype,
    qkv_layout,
    attn_bias_type,
    attn_mask_type,
334
    softmax_type,
335
336
337
338
339
    dropout_probability,
    q_num_heads,
    kv_num_heads,
    q_max_seqlen,
    kv_max_seqlen,
340
341
    head_dim_qk,
    head_dim_v,
342
    window_size: Optional[Tuple[int, int]] = None,
343
):
344
    """
345
    To check whether the fused attention kernel is supported
346
    """
347
    window_size_tuple = (-1, -1) if window_size is None else window_size
348
349
350

    def make_helper(attn_mask_type):
        return tex.FusedAttnHelper(
351
            is_training,
352
353
            q_dtype,
            kv_dtype,
Reese Wang's avatar
Reese Wang committed
354
355
356
            qkv_layout,
            attn_bias_type,
            attn_mask_type,
357
            softmax_type,
358
359
360
361
362
            dropout_probability,
            q_num_heads,
            kv_num_heads,
            q_max_seqlen,
            kv_max_seqlen,
363
364
            head_dim_qk,
            head_dim_v,
365
            window_size_tuple,
366
367
        )

368
    return make_helper(attn_mask_type).is_fused_attn_kernel_available()
369
370


371
def _obtain_batch_and_max_seqlen(qkv, qkv_layout):
372
373
374
375
376
377
378
379
380
381
382
383
384
385
    if qkv_layout.is_qkvpacked():
        assert len(qkv) == 1, f"qkv must be (qkvpacked,) with {qkv_layout=}"
        batch, q_max_seqlen, *_ = qkv[0].shape
        kv_max_seqlen = q_max_seqlen
    elif qkv_layout.is_kvpacked():
        assert len(qkv) == 2, f"qkv must be (query, kvpacked) with {qkv_layout=}"
        batch, q_max_seqlen, *_ = qkv[0].shape
        kv_max_seqlen = qkv[1].shape[1]
    elif qkv_layout.is_separate():
        assert len(qkv) == 3, f"qkv must be (query, key, value) with {qkv_layout=}"
        batch, q_max_seqlen, *_ = qkv[0].shape
        kv_max_seqlen = qkv[1].shape[1]
    else:
        raise ValueError(f"Unsupported {qkv_layout=}")
386
    return batch, q_max_seqlen, kv_max_seqlen
387

388

389
390
391
def reorder_causal_load_balancing(
    tensor, strategy: ReorderStrategy, cp_size: int, seq_dim: int, stripe_size: int | None = None
):
392
    """Reorders a tensor for load balancing the compute of causal attention."""
Reese Wang's avatar
Reese Wang committed
393
    if strategy == ReorderStrategy.DualChunkSwap:
394
395
396
397
398
        if stripe_size is not None:
            raise ValueError(
                f"Incorrect value for CP dual chunk reordering {stripe_size=}. stripe_size must be"
                " None"
            )
Reese Wang's avatar
Reese Wang committed
399
400
        return tex.attention.reorder_causal_dual_chunk_swap(tensor, cp_size, seq_dim, False)
    if strategy == ReorderStrategy.Striped:
401
402
403
404
405
406
407
408
409
410
411
412
        # stripe_size > 1 is only supported for CP+THD+AG+Striped>1+SWA
        # stripe_size = 128 is recommended for CP+THD+AG+Striped>1+SWA
        if stripe_size is not None and stripe_size <= 0:
            raise ValueError(
                f"Incorrect value for CP striped reordering {stripe_size=}. stripe_size must be a"
                " positive integer"
            )
        # Supporting old API defaults of stripe_size=1
        effective_stripe_size = 1 if stripe_size is None else stripe_size
        return tex.attention.reorder_causal_striped(
            tensor, cp_size, seq_dim, False, effective_stripe_size
        )
Reese Wang's avatar
Reese Wang committed
413
    raise ValueError(f"Unsupported {strategy=}")
414
415


Reese Wang's avatar
Reese Wang committed
416
def inverse_reorder_causal_load_balancing(
417
    tensor, strategy: ReorderStrategy, cp_size: int, seq_dim: int, stripe_size: int | None = None
Reese Wang's avatar
Reese Wang committed
418
):
419
    """Inverse operation of `reorder_causal_load_balancing`."""
Reese Wang's avatar
Reese Wang committed
420
    if strategy == ReorderStrategy.DualChunkSwap:
421
422
423
424
425
        if stripe_size is not None:
            raise ValueError(
                f"Incorrect value for CP dual chunk reordering {stripe_size=}. stripe_size must be"
                " None"
            )
Reese Wang's avatar
Reese Wang committed
426
427
        return tex.attention.reorder_causal_dual_chunk_swap(tensor, cp_size, seq_dim, True)
    if strategy == ReorderStrategy.Striped:
428
429
430
431
432
433
434
435
436
437
438
439
        # stripe_size > 1 is only supported for CP+THD+AG+Striped>1+SWA
        # stripe_size = 128 is recommended for CP+THD+AG+Striped>1+SWA
        if stripe_size is not None and stripe_size <= 0:
            raise ValueError(
                f"Incorrect value for CP reordering {stripe_size=}. stripe_size must be a positive"
                " integer"
            )
        # Supporting old API defaults of stripe_size=1
        effective_stripe_size = 1 if stripe_size is None else stripe_size
        return tex.attention.reorder_causal_striped(
            tensor, cp_size, seq_dim, True, effective_stripe_size
        )
Reese Wang's avatar
Reese Wang committed
440
    raise ValueError(f"Unsupported {strategy=}")
441
442


443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def _get_seqlens_and_offsets(segment_ids, max_segments_per_seq):
    # bincount map with 0s
    bincount_vmap = jax.vmap(partial(jnp.bincount, length=max_segments_per_seq + 1))
    seqlens_with_zero = bincount_vmap(segment_ids.astype(jnp.int32))
    seqlens = seqlens_with_zero[..., 1:]

    def _find_offsets(x):
        same_as_previous = jnp.logical_and(x[..., 1:] != x[..., :-1], x[..., 1:] != 0)
        first_column = x[..., :1] != 0
        same_as_previous = jnp.hstack((first_column, same_as_previous))
        return jax.vmap(partial(jnp.argwhere, size=(max_segments_per_seq + 1), fill_value=-1))(
            same_as_previous
        ).squeeze(-1)

    offsets = _find_offsets(segment_ids)
    return seqlens, offsets


def _mask_to_seqlens_offset(mask, max_segments_per_seq):
    assert mask.shape[1] == 1
    row_ids = mask.squeeze(axis=1).max(axis=-1)
    q_seqlen, q_offset = _get_seqlens_and_offsets(row_ids, max_segments_per_seq)
    col_ids = mask.squeeze(axis=1).max(axis=-2)
    kv_seqlen, kv_offset = _get_seqlens_and_offsets(col_ids, max_segments_per_seq)
    return q_seqlen, q_offset, kv_seqlen, kv_offset


470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
def _fast_causal_adjust_seqlen_and_offsets(
    segment_pos_q, q_len, q_offset, segment_pos_kv, kv_len, kv_offset
):
    # The assumption is that for any segment tokens respect causal ordering except at the ends
    # of the segment. This allows us to tweak the length and offset by only looking at the start
    # and end tokens between segments.
    is_active_segment = jnp.logical_and(q_len > 0, kv_len > 0)

    q_seq_id_start = jnp.take(segment_pos_q, q_offset[..., :-1], fill_value=-1)
    kv_seq_id_start = jnp.take(segment_pos_kv, kv_offset[..., :-1], fill_value=-1)
    skip_start_token = jnp.logical_and(kv_seq_id_start > q_seq_id_start, is_active_segment).astype(
        jnp.int32
    )

    q_len -= skip_start_token
    q_offset += jnp.insert(skip_start_token, skip_start_token.shape[-1], 0, axis=-1)

    q_seq_id_end = jnp.take(segment_pos_q, q_offset[..., 1:] - 1, fill_value=-1)
    kv_seq_id_end = jnp.take(segment_pos_kv, kv_offset[..., 1:] - 1, fill_value=-1)
    skip_end_token = jnp.logical_and(kv_seq_id_end > q_seq_id_end, is_active_segment).astype(
        jnp.int32
    )

    kv_len -= skip_end_token

    return q_len, kv_len, q_offset, kv_offset


def _segment_ids_pos_to_seqlens_offsets_fast_causal_path(
    segment_ids_q, segment_ids_kv, segment_pos_q, segment_pos_kv, max_segments_per_seq
):
    q_len, q_offset = _get_seqlens_and_offsets(segment_ids_q, max_segments_per_seq)
    kv_len, kv_offset = _get_seqlens_and_offsets(segment_ids_kv, max_segments_per_seq)
    return _fast_causal_adjust_seqlen_and_offsets(
        segment_pos_q, q_len, q_offset, segment_pos_kv, kv_len, kv_offset
    )


508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
def run_length_fill_flattened(segment_ids_flattened) -> jnp.ndarray:
    """
    Returns an array of run-lengths of the flattened segment ids
    """
    # Example for run_length_fill_flattened:
    # Input segment_ids_flattened:       [[1 1 2 2 2 0 3 0 4 4 4 4 4 0 0 0], [1 0 0 2 2 2 0 0 3 3 4 4 4 4 0 0]]
    # run_ids:                           [[0 0 1 1 1 2 3 4 5 5 5 5 5 6 6 6], [0 1 1 2 2 2 3 3 4 4 5 5 5 5 6 6]]
    # counts:                            [[2 3 1 1 1 5 3 0 0 0 0 0 0 0 0 0], [1 2 3 2 2 4 2 0 0 0 0 0 0 0 0 0]]
    # Returns segment_ids_run_length_1d: [[2 2 3 3 3 0 1 0 5 5 5 5 5 0 0 0], [1 0 0 3 3 3 0 0 2 2 4 4 4 4 0 0]]
    boundary = jnp.concatenate(
        [jnp.broadcast_to(True, (1,)), segment_ids_flattened[1:] != segment_ids_flattened[:-1]]
    )
    run_ids = jnp.cumsum(boundary) - 1
    # Each element could, in worst case, start a run
    max_runs = segment_ids_flattened.shape[-1]
    counts = jnp.bincount(run_ids, length=max_runs)
    # Fill in the missing values
    segment_ids_run_length_1d = counts[run_ids]
    segment_ids_run_length_1d = jnp.where(segment_ids_flattened == 0, 0, segment_ids_run_length_1d)
    return segment_ids_run_length_1d


def run_length_fill(segment_ids) -> jnp.ndarray:
    """
    Returns an array of run-lengths of the segment ids, with shape preserved
    """
    # Example for run_length_fill:
    # Input segment_ids:  [[1 1 2 2 2 0 3 0 4 4 4 4 4 0 0 0], [1 0 0 2 2 2 0 0 3 3 4 4 4 4 0 0]]
    # Returns run length: [[2 2 3 3 3 0 1 0 5 5 5 5 5 0 0 0], [1 0 0 3 3 3 0 0 2 2 4 4 4 4 0 0]]
    # Flatten all dimension except the last one prior to executing vmap run length
    orig_shape = segment_ids.shape
    segment_ids_flat = segment_ids.reshape(-1, orig_shape[-1])
    run_length_segment_id_shape = jax.vmap(run_length_fill_flattened, in_axes=0)(segment_ids_flat)
    return run_length_segment_id_shape.reshape(orig_shape)


544
545
546
547
548
549
550
551
552
def _segment_ids_pos_to_seqlens_offsets(
    segment_ids_q,
    segment_ids_kv,
    segment_pos_q,
    segment_pos_kv,
    attn_mask_type,
    window_size,
    max_segments_per_seq,
):
553
554
555
556
557
558
559
560
561
562
563
564
565
566
    # TODO(mgoldfarb-nvidia): Consider an opt-in for arbitrary masking if needed here.
    # Computing the full mask is expensive due to quadratic expansion of Q * KV masking.

    # Assumptions for cudnn causal mask correctness.
    # 1. Segments are monotonic [4 4 4 0 0 5 5 5 6 6 0 0]
    # 2. No intra-segment padding, only inter-segment paddding allowed
    # 3. Only start or end token within a segment may violate the causal order relationship
    #        1 5 9     0 4 8 10    0 4 8
    #    0             x           x
    #    4   x         x x         x x
    #    8   x x       x x x       x x x
    #
    # This fast path avoids expanding the mask to Q * KV matrix and instead allows us to
    # examine only O(Q+KV) elements.
567
568
569
570
571

    # For seqlens and seqoffsets calculations, the intermediate(temp) attn_mask creation
    # using the segment ids and pos along with mask type (causal or brcm) is sufficient.
    # It does not need to involve SW for this mask's creation

572
573
574
575
    # TODO(KshitijLakhani): Try exercising the fast path for BRCM as well
    if (attn_mask_type.is_causal() and window_size is None) or (
        window_size == (-1, -1) and not attn_mask_type.is_bottom_right()
    ):
576
577
578
579
        return _segment_ids_pos_to_seqlens_offsets_fast_causal_path(
            segment_ids_q, segment_ids_kv, segment_pos_q, segment_pos_kv, max_segments_per_seq
        )

580
581
582
583
584
585
586
587
588
589
590
    # (1 = attend, 0 = masked)
    segment_mask = make_attention_mask(
        segment_ids_q,
        segment_ids_kv,
        jnp.equal,
    )
    segment_mask_with_id = make_attention_mask(
        segment_ids_q,
        segment_ids_kv,
        lambda x, y: jnp.equal(x, y) * x,
    )
591
    # TE JAX Attn expects the THD segments to have q_token <= kv_tokens so that a correct cross-attn type BRCM can be applied
592
    attn_mask = segment_mask
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
    if attn_mask_type.is_bottom_right():
        run_length_out_q = run_length_fill(segment_ids_q)
        run_length_out_kv = run_length_fill(segment_ids_kv)
        # Example for brcm:
        # run_length_out_q:  [3 3 3 0 4 4 4 4]
        # segment_pos_q:     [0 1 2 3 0 1 2 3]
        # segment_ids_q:     [1 1 1 0 2 2 2 2]
        # run_length_out_kv: [4 4 4 4 0 0 10 10 10 10 10 10 10 10 10 10]
        # segment_pos_kv:    [0 1 2 3 4 5 0 1 2 3 4 5 6 7 8 9]
        # segment_ids_kv:    [1 1 1 1 0 0 2 2 2 2 2 2 2 2 2 2]
        # brcm:            [[[1 1 0 0 0 0 1 1 1 1 1 1 1 1 0 0]
        #                    [1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 0]
        #                    [1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1]
        #                    [1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1]
        #                    [1 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0]
        #                    [1 1 0 0 0 0 1 1 1 1 1 1 1 1 0 0]
        #                    [1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 0]
        #                    [1 1 1 1 0 0 1 1 1 1 1 1 1 1 1 1]]]
        # attn_mask(noswa):[[[1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
        #                    [1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0]
        #                    [1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0]
        #                    [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
        #                    [0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0]
        #                    [0 0 0 0 0 0 1 1 1 1 1 1 1 1 0 0]
        #                    [0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0]
        #                    [0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1]]]
        bottom_right_causal_mask = make_attention_mask(
            run_length_out_q - segment_pos_q,
            run_length_out_kv - segment_pos_kv,
            jnp.less_equal,
        )
        attn_mask = jnp.logical_and(segment_mask, bottom_right_causal_mask)
    elif attn_mask_type.is_causal():
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
        causal_mask = make_attention_mask(
            segment_pos_q,
            segment_pos_kv,
            jnp.greater_equal,
        )
        attn_mask = jnp.logical_and(segment_mask, causal_mask)

    attn_mask_with_id = jnp.where(attn_mask, segment_mask_with_id, 0)
    q_seqlen, q_offset, kv_seqlen, kv_offset = _mask_to_seqlens_offset(
        attn_mask_with_id, max_segments_per_seq
    )
    return q_seqlen, kv_seqlen, q_offset, kv_offset


def _segment_ids_to_seqlens(segment_ids_q, segment_ids_kv, attn_mask_type):
    # convert the mask to seqlens, mask doesn't support ragged offsets
    if not attn_mask_type.is_padding():
        q_max_seqlen = segment_ids_q.shape[-1]
        kv_max_seqlen = segment_ids_kv.shape[-1]
        q_seq_lens = jnp.full_like(q_max_seqlen, q_max_seqlen, dtype=jnp.int32)
        kv_seq_lens = jnp.full_like(kv_max_seqlen, kv_max_seqlen, dtype=jnp.int32)
    else:
        q_seq_lens = jnp.sum(segment_ids_q, axis=-1).astype(jnp.int32)
        kv_seq_lens = jnp.sum(segment_ids_kv, axis=-1).astype(jnp.int32)
    return q_seq_lens, kv_seq_lens


@jax.tree_util.register_pytree_node_class
class SequenceDescriptor:
655
    """A class to describe the sequences with flexible initialization.
656
657
658
659
660
    - SequenceDescriptor.from_seqlens
      For non-THD (non-packed) cases, where each batch has only 1 sequence.
    - SequenceDescriptor.from_seqlens_and_offsets
      For THD (packed) cases, where each batch may have not only 1 sequence.
    - SequenceDescriptor.from_segment_ids_and_pos
661
      Experimental feature for BSHD (with and without reordering) and THD (packed) cases without reordering
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
    """

    seqlens: Optional[Tuple[jnp.ndarray, jnp.ndarray]]
    seq_offsets: Optional[Tuple[jnp.ndarray, jnp.ndarray]]
    segment_ids: Optional[Tuple[jnp.ndarray, jnp.ndarray]]
    segment_pos: Optional[Tuple[jnp.ndarray, jnp.ndarray]]

    def __init__(self, seqlens=None, seq_offsets=None, segment_ids=None, segment_pos=None):
        """
        Initialize to Tuple(jnp.zeros, jnp.zeros) because the primitive only accepts pure jax array
        """
        self.seqlens = (jnp.zeros(0), jnp.zeros(0)) if seqlens is None else seqlens
        self.seq_offsets = (jnp.zeros(0), jnp.zeros(0)) if seq_offsets is None else seq_offsets
        self.segment_ids = (jnp.zeros(0), jnp.zeros(0)) if segment_ids is None else segment_ids
        self.segment_pos = (jnp.zeros(0), jnp.zeros(0)) if segment_pos is None else segment_pos

    def tree_flatten(self):
        """
        Flatten method to register as a pytree node
        """
        return ((self.seqlens, self.seq_offsets, self.segment_ids, self.segment_pos), None)

    @classmethod
    def tree_unflatten(cls, aux_data, children):
        """
        Unflatten method to register as a pytree node
        """
        del aux_data
        return cls(*children)

    def get_seqlens_and_offsets(
        self, attn_mask_type, qkv_layout, window_size, max_segments_per_seq
    ):
        """
        Acquire the seqlens/offsets for cuDNN backend
        """
        q_segment_ids, kv_segment_ids = self.segment_ids
        q_segment_pos, kv_segment_pos = self.segment_pos
        assert q_segment_ids.shape == q_segment_pos.shape
        assert kv_segment_ids.shape == kv_segment_pos.shape
        # No segment_ids/segment_pos
        if q_segment_ids.size + kv_segment_ids.size == 0:
            return self.seqlens, self.seq_offsets

        if qkv_layout.is_thd():
            q_seqlens, kv_seqlens, q_offsets, kv_offsets = _segment_ids_pos_to_seqlens_offsets(
                q_segment_ids,
                kv_segment_ids,
                q_segment_pos,
                kv_segment_pos,
                attn_mask_type,
                window_size,
                max_segments_per_seq,
            )
        else:
            q_seqlens, kv_seqlens = _segment_ids_to_seqlens(
                q_segment_ids,
                kv_segment_ids,
                attn_mask_type,
            )
            q_offsets = kv_offsets = jnp.zeros(0)
        return (q_seqlens, kv_seqlens), (q_offsets, kv_offsets)

    @classmethod
    def _expand_to_pair(
        cls, value: Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]]
    ) -> Tuple[jnp.ndarray, jnp.ndarray]:
        """
        Internal helper to ensure a single value expands into a pair (q, kv).
        """
        if isinstance(value, tuple):
            if len(value) != 2:
                raise ValueError("Input tuple must have exactly 2 elements.")
            return value

        if isinstance(value, jnp.ndarray):
            return value, value  # Duplicate for q=kv case

        raise TypeError(
            "Expected a jax.numpy.ndarray or a tuple of two jax.numpy.ndarray, "
            f"but got {type(value).__name__}."
        )

    @classmethod
    def from_seqlens(
        cls,
        seqlens: Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]],
    ) -> SequenceDescriptor:
        """
        Factory method for inputs with sequence lengths only (non-THD).
        Args:
            seqlens(Tuple(jnp.ndarray, jnp.ndarray)) = (q_seqlens, kv_seqlens):
                - q_seqlens (jnp.ndarray):
                  Sequence lengths for the query, with shape [batch].
                - kv_seqlen (jnp.ndarray):
                  Sequence lengths for the key and value, with shape [batch].
        Return:
            A SequenceDescriptor with only seqlens initialized.
        """
        q_seqlens, kv_seqlens = cls._expand_to_pair(seqlens)
        return cls(seqlens=(q_seqlens, kv_seqlens))

    @classmethod
    def from_seqlens_and_offsets(
        cls,
        seqlens: Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]],
        seq_offsets: Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]],
    ) -> SequenceDescriptor:
        """
        Factory method for inputs with sequence lengths and offsets (THD).
        Args:
            seqlens(Tuple(jnp.ndarray, jnp.ndarray)) = (q_seqlens, kv_seqlens):
                - q_seqlens (jnp.ndarray):
                  Sequence lengths for the query, with shape [batch, max_seqlen].
                  Unused positions are padded with -1.
                - kv_seqlen (jnp.ndarray):
                  Sequence lengths for the key and value, with shape [batch, max_seqlen].
                  Unused positions are padded with -1.
            seq_offsets(Tuple(jnp.ndarray, jnp.ndarray)) = (q_offsets, kv_offsets)
                - q_seq_offsets (jnp.ndarray):
                  The offsets in the sequence dim for the query, with shape [batch, max_seqlen + 1].
                  Unused positions are padded with -1.
                - kv_seq_offsets (jnp.ndarray):
                  The offsets in the sequence dim for the query, with shape [batch, max_seqlen + 1].
                  Unused positions are padded with -1.
        Return:
            A SequenceDescriptor with seqlens/seq_offsets initialized.
        """
        q_seqlens, kv_seqlens = cls._expand_to_pair(seqlens)
        q_offsets, kv_offsets = cls._expand_to_pair(seq_offsets)
        return cls(seqlens=(q_seqlens, kv_seqlens), seq_offsets=(q_offsets, kv_offsets))

    @classmethod
    def from_segment_ids_and_pos(
        cls,
        segment_ids: Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]],
        segment_pos: Optional[Union[jnp.ndarray, Tuple[jnp.ndarray, jnp.ndarray]]] = None,
799
800
801
        *,
        is_thd: bool,
        is_segment_ids_reordered: bool,
802
803
    ) -> SequenceDescriptor:
        """
804
805
806
        Experimental factory method for inputs with segment IDs and optional positions.
        segment_pos = None to be used only for: BSHD with or without load balancing and,
                                                THD without load balancing
807
808
809
810
811
812
813
814
815
816
817
818
819
        Args:
            segment_ids(Tuple(jnp.ndarray, jnp.ndarray)) = (q_segment_ids, kv_segment_ids):
                - q_segment_ids (jnp.ndarray):
                  Query segment ids start with 1, with shape [batch, max_seqlen].
                  0s are treated as paddings.
                - kv_segment_ids (jnp.ndarray):
                  Key, value segment ids start with 1, with shape [batch, max_seqlen].
                  0s are treated as paddings.
            segment_pos(Tuple(jnp.ndarray, jnp.ndarray)) = (q_segment_pos, kv_segment_pos)
                - q_segment_pos (jnp.ndarray):
                  The position inside each segment for query, with shape [batch, max_seqlen].
                - kv_segment_pos (jnp.ndarray):
                  The position inside each segment for key, value, with shape [batch, max_seqlen].
820
821
822
            is_thd(bool): If True, QKVLayout is of type THD, else it is BSHD
            is_segment_ids_reordered(bool): If True, the segment ids have been reordered for load balancing.
            Only THD with load balancing is expected to have this flag set to True
823
824
825
826
827
        Return:
            A SequenceDescriptor with segment_ids/segment_pos initialized.
        """
        q_seg_ids, kv_seg_ids = cls._expand_to_pair(segment_ids)

828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
        # Using defaults : segment pos has to be generated.
        if segment_pos is None:
            # THD + load balanced segment_ids are not supported in this function
            # BSHD + load balanced segment_ids are incorrect as BSHD handles reordering within the primitive itself
            if is_segment_ids_reordered:
                assert not is_thd, (
                    f"{segment_pos=} default arg is not supported for load balanced reordered"
                    " (Striped) THD inputs. Please pass the load balanced reordered segment_pos"
                    " and segment_ids explicitly to {from_segment_ids_and_pos.__qualname__}"
                    " using convenience function reorder_causal_load_balancing()"
                )
                assert is_thd, (
                    f"{segment_pos=} default arg is not supported for load balanced reordered (Dual"
                    " Chunk) BSHD inputs. BSHD segment_pos and segment_ids do not need to be load"
                    " balanced reordered. The reordering for these is performed within the"
                    " primitive"
                )

            # Generate the default pos for THD and BSHD non-reordered segment_ids
            def generate_default_pos(seg_ids):
                if is_thd:
                    batch_size, seq_size = seg_ids.shape
                    # Assume that the first token belongs to a segment and is not a padded token
                    first_is_segment = jnp.full((batch_size, 1), True, dtype=bool)
                    # Get segment start positions
                    segment_start = jnp.concatenate(
                        [
                            first_is_segment,
                            (seg_ids[..., 1:] != seg_ids[..., :-1]) & (seg_ids[..., 1:] != 0),
                        ],
                        axis=-1,
                    )
                    # Get offset for location where new segment starts
                    segment_start_idx = jax.vmap(lambda row: jnp.arange(row.size) * row)(
                        segment_start
                    )
                    segment_start_offsets = jax.vmap(jnp.maximum.accumulate)(segment_start_idx)

                    # Get the last non-zero index - after this everything is padding
                    # (B,)
                    last_nonzero_idx = jax.vmap(
                        lambda segids_row: jnp.max(
                            jnp.where(segids_row != 0, jnp.arange(seq_size), -1)
                        )
                    )(seg_ids)
                    seg_pos_no_thd = jnp.arange(seq_size)
                    # Get a mask which can be used to zero out all the padding at the end (after the non-zero index)
                    mask = seg_pos_no_thd <= last_nonzero_idx[:, None]

                    # Get the unmasked seg_pos for the THD sequence
                    seg_pos = (
                        jnp.broadcast_to(jnp.arange(seq_size), seg_ids.shape)
                        - segment_start_offsets
                    )

                    # Use the mask to zero out the padding at the end (after the non-zero index)
                    segment_pos = jax.vmap(
                        lambda pos_row, mask_row: jnp.where(mask_row, pos_row, 0)
                    )(seg_pos, mask)
                    return segment_pos

                seqlen = seg_ids.shape[-1]
                return jnp.broadcast_to(jnp.arange(seqlen), seg_ids.shape)
891
892
893
894

            q_seg_pos = generate_default_pos(q_seg_ids)
            kv_seg_pos = generate_default_pos(kv_seg_ids)
            segment_pos = (q_seg_pos, kv_seg_pos)
895
896
897
        # Explicitly passed segment_pos
        else:
            segment_pos = cls._expand_to_pair(segment_pos)
898
899
900
901
902
903
904
905

        return cls(
            segment_ids=(q_seg_ids, kv_seg_ids),
            segment_pos=segment_pos,
        )


def _legacy_fused_attn(
906
907
908
909
    qkv: Tuple[jnp.ndarray, ...],
    bias: Optional[jnp.ndarray],
    mask: Optional[jnp.ndarray],
    seed: Optional[jnp.ndarray],
910
911
    attn_bias_type: AttnBiasType,
    attn_mask_type: AttnMaskType,
912
    qkv_layout: QKVLayout,
913
    softmax_type: AttnSoftmaxType,
914
915
916
    scaling_factor: float,
    dropout_probability: float,
    is_training: bool,
917
    window_size: Optional[Tuple[int, int]] = None,
918
    context_parallel_strategy: CPStrategy = CPStrategy.DEFAULT,
919
920
    context_parallel_causal_load_balanced: bool = False,
    context_parallel_axis: str = "",
921
    softmax_offset: Optional[jnp.ndarray] = None,
922
):
923
    """
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
    Perform non-THD (non-packed) cuDNN fused attention.

    This function implements the following formula:
        BMM1 -> (PreBias) -> ScaleMaskSoftmax -> (PostBias) -> (Dropout) -> BMM2
    Args:
        qkv (Tuple[jnp.ndarray, ...]): A tuple containing query, key, and value tensors.
        It supports three formats:
            - `(qkv_packed,)`: For interleaved QKV packed format, typically used when query, key,
              and value have the same shape (e.g., self-attention).
            - `(query, kv_packed)`: For separate query and KV packed format, typically used when
              query has a different shape (e.g., cross-attention).
            - `(query, key, value)`: For separate query, key, and value tensors.
        bias (Optional[jnp.ndarray]): An optional bias tensor to be added to the attention scores.
        mask (Optional[jnp.ndarray]):
            An optional mask tensor to mask out the attention scores, `True` means mask out.
            Intra-sequence padding is not valid. The padded tokens can only on the right-most.
            Otherwise the results will be wrong.
        seed (Optional[jnp.ndarray]): Optional random seed for dropout.
Reese Wang's avatar
Reese Wang committed
942
943
        attn_bias_type (AttnBiasType): Type of attention bias.
        attn_mask_type (AttnMaskType): Type of attention mask.
944
        softmax_type (AttnSoftmaxType): Type of attention softmax.
Reese Wang's avatar
Reese Wang committed
945
        qkv_layout (QKVLayout): Layout of the QKV tensors.
946
947
948
        scaling_factor (float): Scaling factor for the attention scores.
        dropout_probability (float): Dropout probability to apply during attention.
        is_training (bool): Flag indicating whether the model is in training mode.
949
        window_size (Optional[Tuple[int, int]]): Sliding window size.
950
951
952
        context_parallel_causal_load_balanced (bool):
            Indicates the sequences are ordered for causal mask load balancing when running context parallelism.
        context_parallel_axis (str): The name of the context parallel axis.
953
954
    Returns:
        (jnp.ndarray): The output tensor from the fused attention.
955
    """
956
    assert (
957
        not qkv_layout.is_thd()
958
959
960
961
    ), "Please use transformer_engine.jax.attention.fused_attn_thd for THD format."

    # Check inputs qkv
    match qkv_layout:
Reese Wang's avatar
Reese Wang committed
962
        case QKVLayout.BS3HD:
963
            assert len(qkv) == 1, f"qkv=(packed_qkv,) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
964
        case QKVLayout.BSHD_BS2HD:
965
966
967
            assert (
                len(qkv) == 2
            ), f"qkv=(query, packed_kv) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
968
        case QKVLayout.BSHD_BSHD_BSHD:
969
970
971
            assert (
                len(qkv) == 3
            ), f"qkv=(query, key, value) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
972
973
        case _:
            raise ValueError(f"Unknown {qkv_layout=}")
974
975

    # convert the mask to seqlens, mask doesn't support ragged offsets
976
    if not attn_mask_type.is_padding():
977
978
979
        batch, q_max_seqlen, kv_max_seqlen = _obtain_batch_and_max_seqlen(qkv, qkv_layout)
        q_seq_lens = jnp.full((batch,), q_max_seqlen, dtype=jnp.int32)
        kv_seq_lens = jnp.full((batch,), kv_max_seqlen, dtype=jnp.int32)
zlsh80826's avatar
zlsh80826 committed
980
    else:
981
        assert mask is not None
982
        mask = jnp.logical_not(mask)
983
        q_seq_lens = jnp.sum(mask, axis=-2, dtype=jnp.int32)[..., 0, 0]
984
        if attn_mask_type == AttnMaskType.PADDING_MASK:
985
            kv_seq_lens = jnp.sum(mask, axis=-1, dtype=jnp.int32)[..., 0, 0]
986
987
        else:
            # When mask is causal, the actual seqlen is not the last row, use max to find it
988
            kv_seq_lens = jnp.max(jnp.sum(mask, axis=-1, dtype=jnp.int32), axis=(-1, -2))
989

990
991
    output = _fused_attn(
        qkv,
992
        bias,
993
        softmax_offset,
994
        SequenceDescriptor.from_seqlens((q_seq_lens, kv_seq_lens)),
995
        seed,
996
997
        attn_bias_type=attn_bias_type,
        attn_mask_type=attn_mask_type,
998
        softmax_type=softmax_type,
999
        qkv_layout=qkv_layout,
1000
1001
1002
        scaling_factor=scaling_factor,
        dropout_probability=dropout_probability,
        is_training=is_training,
1003
        max_segments_per_seq=1,
1004
        window_size=window_size,
1005
        context_parallel_strategy=context_parallel_strategy,
1006
1007
        context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
        context_parallel_axis=context_parallel_axis,
1008
    )
1009

1010
    return output
1011
1012


1013
1014
1015
1016
1017
1018
1019
1020
def fused_attn_thd(
    qkv: Tuple[jnp.ndarray, ...],
    bias: Optional[jnp.ndarray],
    q_seq_lens: jnp.ndarray,
    kv_seq_lens: jnp.ndarray,
    q_seq_offsets: jnp.ndarray,
    kv_seq_offsets: jnp.ndarray,
    seed: Optional[jnp.ndarray],
1021
1022
    attn_bias_type: AttnBiasType,
    attn_mask_type: AttnMaskType,
1023
    qkv_layout: QKVLayout,
1024
1025
1026
    scaling_factor: float,
    dropout_probability: float,
    is_training: bool,
1027
    max_segments_per_seq: int = 1,
1028
    window_size: Optional[Tuple[int, int]] = None,
1029
    context_parallel_strategy: CPStrategy = CPStrategy.DEFAULT,
1030
1031
    context_parallel_causal_load_balanced: bool = False,
    context_parallel_axis: str = "",
1032
    softmax_offset: Optional[jnp.ndarray] = None,
1033
):
1034
    """
1035
    Deprecated THD fused attn, please use fusd_attn with SequenceDescriptor
1036
    """
1037
1038
1039
1040
1041
    warnings.warn(
        "fused_attn_thd is deprecated, please use fused_attn with SequenceDescriptor",
        DeprecationWarning,
    )

1042
    assert (
1043
        qkv_layout.is_thd()
1044
1045
1046
1047
    ), "Please use transformer_engine.jax.attention.fused_attn for non-THD format."

    # Check inputs qkv
    match qkv_layout:
Reese Wang's avatar
Reese Wang committed
1048
        case QKVLayout.T3HD:
1049
            assert len(qkv) == 1, f"qkv=(packed_qkv,) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
1050
        case QKVLayout.THD_T2HD:
1051
1052
1053
            assert (
                len(qkv) == 2
            ), f"qkv=(query, packed_kv) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
1054
        case QKVLayout.THD_THD_THD:
1055
1056
1057
            assert (
                len(qkv) == 3
            ), f"qkv=(query, key, value) is expected with {qkv_layout=} but got {qkv=}"
Reese Wang's avatar
Reese Wang committed
1058
1059
        case _:
            raise ValueError(f"Unknown {qkv_layout=}")
1060
1061
1062
1063
1064
1065

    batch, q_max_seqlen, kv_max_seqlen = _obtain_batch_and_max_seqlen(qkv, qkv_layout)
    assert q_seq_lens.shape == (batch, q_max_seqlen)
    assert kv_seq_lens.shape == (batch, kv_max_seqlen)
    assert q_seq_offsets.shape == (batch, q_max_seqlen + 1)
    assert kv_seq_offsets.shape == (batch, kv_max_seqlen + 1)
1066

1067
    output = _fused_attn(
1068
        qkv,
1069
        bias,
1070
        softmax_offset,
1071
1072
1073
        SequenceDescriptor.from_seqlens_and_offsets(
            (q_seq_lens, kv_seq_lens), (q_seq_offsets, kv_seq_offsets)
        ),
1074
1075
1076
        seed,
        attn_bias_type=attn_bias_type,
        attn_mask_type=attn_mask_type,
1077
        qkv_layout=qkv_layout,
1078
        scaling_factor=scaling_factor,
1079
        softmax_type=AttnSoftmaxType.VANILLA_SOFTMAX,
1080
1081
        dropout_probability=dropout_probability,
        is_training=is_training,
1082
        max_segments_per_seq=max_segments_per_seq,
1083
        window_size=window_size,
1084
        context_parallel_strategy=context_parallel_strategy,
1085
1086
        context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
        context_parallel_axis=context_parallel_axis,
1087
    )
1088
1089
1090
1091

    return output


1092
@partial(jax.custom_vjp, nondiff_argnums=(5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))
1093
def _fused_attn(
1094
1095
    qkv: Tuple[jnp.ndarray, ...],
    bias: Optional[jnp.ndarray],
1096
    softmax_offset: Optional[jnp.ndarray],
1097
1098
    sequence_descriptor: SequenceDescriptor,
    seed: Optional[jnp.ndarray],
1099
1100
    attn_bias_type: AttnBiasType,
    attn_mask_type: AttnMaskType,
1101
    qkv_layout: QKVLayout,
1102
    softmax_type: AttnSoftmaxType,
1103
1104
1105
    scaling_factor: float,
    dropout_probability: float,
    is_training: bool,
1106
    max_segments_per_seq: int,
1107
    window_size: Optional[Tuple[int, int]],
1108
    context_parallel_strategy: CPStrategy,
1109
1110
    context_parallel_causal_load_balanced: bool,
    context_parallel_axis: str,
1111
    context_checkpoint_name: str = "context",
1112
    stripe_size: int | None = None,
1113
1114
):
    output, _ = _fused_attn_fwd_rule(
1115
        qkv,
1116
        bias,
1117
        softmax_offset,
1118
        sequence_descriptor,
1119
1120
1121
        seed,
        attn_bias_type,
        attn_mask_type,
1122
        qkv_layout,
1123
        softmax_type,
1124
1125
1126
        scaling_factor,
        dropout_probability,
        is_training,
1127
        max_segments_per_seq,
1128
        window_size,
1129
        context_parallel_strategy,
1130
1131
        context_parallel_causal_load_balanced,
        context_parallel_axis,
1132
        context_checkpoint_name=context_checkpoint_name,
1133
        stripe_size=stripe_size,
1134
    )
1135
1136
1137
    return output


1138
def _fused_attn_fwd_rule(
1139
    qkv,
1140
    bias,
1141
    softmax_offset,
1142
    sequence_descriptor,
1143
1144
1145
    seed,
    attn_bias_type,
    attn_mask_type,
1146
    qkv_layout,
1147
    softmax_type,
1148
1149
1150
    scaling_factor,
    dropout_probability,
    is_training,
1151
    max_segments_per_seq,
1152
    window_size,
1153
    context_parallel_strategy,
1154
1155
    context_parallel_causal_load_balanced,
    context_parallel_axis,
1156
    context_checkpoint_name,
1157
    stripe_size,
1158
1159
):
    output, softmax_aux, rng_state = tex.fused_attn_fwd(
1160
        qkv,
1161
        bias,
1162
        softmax_offset,
1163
        sequence_descriptor,
1164
        seed,
Reese Wang's avatar
Reese Wang committed
1165
1166
        attn_bias_type=attn_bias_type,
        attn_mask_type=attn_mask_type,
1167
        softmax_type=softmax_type,
Reese Wang's avatar
Reese Wang committed
1168
        qkv_layout=qkv_layout,
1169
1170
1171
        scaling_factor=scaling_factor,
        dropout_probability=dropout_probability,
        is_training=is_training,
1172
        max_segments_per_seq=max_segments_per_seq,
1173
        window_size=window_size,
1174
        context_parallel_strategy=context_parallel_strategy,
1175
1176
        context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
        context_parallel_axis=context_parallel_axis,
1177
        stripe_size=stripe_size,
1178
    )
1179
1180
1181
    output = checkpoint_name(output, context_checkpoint_name)
    softmax_aux = checkpoint_name(softmax_aux, context_checkpoint_name)
    rng_state = checkpoint_name(rng_state, context_checkpoint_name)
1182
    return output, (
1183
        qkv,
1184
        bias,
1185
        sequence_descriptor,
1186
1187
        softmax_aux,
        rng_state,
1188
        softmax_offset,
1189
1190
1191
1192
1193
        output,
    )


def _fused_attn_bwd_rule(
1194
1195
1196
    attn_bias_type,
    attn_mask_type,
    qkv_layout,
1197
    softmax_type,
1198
1199
1200
1201
    scaling_factor,
    dropout_probability,
    is_training,
    max_segments_per_seq,
1202
    window_size,
1203
    context_parallel_strategy,
1204
1205
    context_parallel_causal_load_balanced,
    context_parallel_axis,
1206
    context_checkpoint_name,
1207
    stripe_size,
1208
1209
    ctx,
    dz,
1210
):
1211
    del context_checkpoint_name
1212
1213
1214
    (
        qkv,
        bias,
1215
        sequence_descriptor,
1216
1217
        softmax_aux,
        rng_state,
1218
        softmax_offset,
1219
1220
        output,
    ) = ctx
1221
    grad_qkv, grad_bias, grad_softmax_offset = tex.fused_attn_bwd(
1222
        qkv,
1223
        bias,
1224
        softmax_offset,
1225
1226
1227
1228
        softmax_aux,
        rng_state,
        output,
        dz,
1229
        sequence_descriptor,
Reese Wang's avatar
Reese Wang committed
1230
1231
        attn_bias_type=attn_bias_type,
        attn_mask_type=attn_mask_type,
1232
        softmax_type=softmax_type,
Reese Wang's avatar
Reese Wang committed
1233
        qkv_layout=qkv_layout,
1234
1235
1236
        scaling_factor=scaling_factor,
        dropout_probability=dropout_probability,
        is_training=is_training,
1237
        max_segments_per_seq=max_segments_per_seq,
1238
        window_size=window_size,
1239
        context_parallel_strategy=context_parallel_strategy,
1240
1241
        context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
        context_parallel_axis=context_parallel_axis,
1242
        stripe_size=stripe_size,
1243
    )
1244
1245
    if attn_bias_type == AttnBiasType.NO_BIAS:
        grad_bias = None
1246
1247
    if softmax_type != AttnSoftmaxType.LEARNABLE_SOFTMAX:
        grad_softmax_offset = None
1248
1249
1250
    return (
        grad_qkv,
        grad_bias,
1251
        grad_softmax_offset,
1252
1253
1254
        None,
        None,
    )
1255
1256
1257


_fused_attn.defvjp(_fused_attn_fwd_rule, _fused_attn_bwd_rule)
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267


def fused_attn(
    qkv: Tuple[jnp.ndarray, ...],
    bias: Optional[jnp.ndarray],
    sequence_descriptor: SequenceDescriptor,
    seed: Optional[jnp.ndarray],
    attn_bias_type: AttnBiasType,
    attn_mask_type: AttnMaskType,
    qkv_layout: QKVLayout,
1268
    softmax_type: AttnSoftmaxType,
1269
1270
1271
1272
1273
1274
1275
1276
    scaling_factor: float,
    dropout_probability: float,
    is_training: bool,
    max_segments_per_seq: int = 1,
    window_size: Optional[Tuple[int, int]] = None,
    context_parallel_strategy: CPStrategy = CPStrategy.DEFAULT,
    context_parallel_causal_load_balanced: bool = False,
    context_parallel_axis: str = "",
1277
    context_checkpoint_name: str = "context",
1278
    softmax_offset: Optional[jnp.ndarray] = None,
1279
    stripe_size: int | None = None,
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
):
    """
    Perform cuDNN fused attention.

    This function implements the following formula:
        BMM1 -> (PreBias) -> ScaleMaskSoftmax -> (PostBias) -> (Dropout) -> BMM2
    Args:
        qkv (Tuple[jnp.ndarray, ...]): A tuple containing query, key, and value tensors.
        It supports three formats:
            - `(qkv_packed,)`: For interleaved QKV packed format, typically used when query, key,
              and value have the same shape (e.g., self-attention).
            - `(query, kv_packed)`: For separate query and KV packed format, typically used when
              query has a different shape (e.g., cross-attention).
            - `(query, key, value)`: For separate query, key, and value tensors.
        bias (Optional[jnp.ndarray]): An optional bias tensor to be added to the attention scores.
        sequence_descriptor (SequenceDescriptor): Descriptor for how to describe the sequence.
        seed (Optional[jnp.ndarray]): Optional random seed for dropout.
Reese Wang's avatar
Reese Wang committed
1297
1298
        attn_bias_type (AttnBiasType): Type of attention bias.
        attn_mask_type (AttnMaskType): Type of attention mask.
1299
        softmax_type (AttnSoftmaxType): Type of attention softmax.
Reese Wang's avatar
Reese Wang committed
1300
        qkv_layout (QKVLayout): Layout of the QKV tensors.
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
        scaling_factor (float): Scaling factor for the attention scores.
        dropout_probability (float): Dropout probability to apply during attention.
        is_training (bool): Flag indicating whether the model is in training mode.
        max_segments_per_seq (int):
            Indicating the maximum number of segments inside a sequence. This parameter is to
            constrain the limit usage and need to be static during the e2e training. The XLA compile
            time and memory consumption is proportional to `max_segments_per_seq`.
        window_size (Optional[Tuple[int, int]]):
            Sliding window size.
        context_parallel_causal_load_balanced (bool):
            Indicates the sequences are ordered for causal mask load balancing when running context parallelism.
        context_parallel_axis (str): The name of the context parallel axis.
1313
        context_checkpoint_name (str): The name of the context checkpoint for the custom VJP forward pass.
1314
1315
1316
        softmax_offset (Optional[jnp.ndarray]): An optional learnable softmax offset tensor with shape
            [1, num_heads, 1, 1]. Used when softmax_type is AttnSoftmaxType.LEARNABLE_SOFTMAX.
            If provided, this parameter will receive gradients during backpropagation.
1317
1318
1319
1320
1321
        stripe_size (int |  None):
            Indicates the striping size to be used when using ReorderStrategy.Striped.
            Currently, a stripe_size > 1 is only supported for CP + THD + Striped + AG, whereas a stripe_size=1
            is supported for both, CP + THD + Striped + AG and CP + THD + Striped + P2P(Ring)
            None indicates no striping strategy
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
    Returns:
        (jnp.ndarray): The output tensor from the fused attention.

    Examples (non-THD, also known as non-packed):
        >>> #  q_segment_ids = [[1, 1, 1, 0], [1, 1, 0, 0]], 0 means padded tokens
        >>> # kv_segment_ids = [[1, 0, 0, 0], [1, 1, 0, 0]], 0 means padded tokens
        >>> b, s, h, d = 2, 4, 12, 64
        >>> qkv = jnp.zeros((b, s, 3, h, d), dtype=jnp.bfloat16)
        >>> q_seq_lens = jnp.asarray([3, 2])
        >>> kv_seq_lens = jnp.asarray([1, 2])
        >>> sequence_desc = SequenceDescriptor.from_seqlens(
                seqlens=(q_seq_lens, kv_seq_lens))
        >>> out = fused_attn((qkv,), None, sequence_desc, None,
                             AttnBiasType.NO_BIAS, AttnMaskType.PADDING_CAUSAL_MASK,
                             QKVLayout.BS3HD, 0.125, 0, True, 3)

    Examples (THD, also known as packed):
        >>> # segment_ids = [[1, 1, 2, 3], [1, 1, 2, 0]], 0 means padded tokens
        >>> # segment_pos = [[0, 1, 0, 0], [0, 1, 0, 1]]
        >>> b, s, h, d = 2, 4, 12, 64
        >>> qkv = jnp.zeros((b, s, 3, h, d), dtype=jnp.bfloat16)
        >>> # 3 segments in first seq, 2 segments in second seq
        >>> q_seq_lens = kv_seq_lens = jnp.asarray([[2, 1, 1, -1], [2, 1, -1, -1]])
        >>> # seq_offsets need to include the end offset of the last segments
        >>> q_seq_offsets = kv_seq_offsets = jnp.asarray([[0, 2, 3, 4, -1], [0, 2, 3, -1, -1]])
        >>> sequence_desc = SequenceDescriptor.from_seqlens_and_offsets(
                seqlens=(q_seq_lens, kv_seq_lens),
                seq_offsets=(q_seq_offsets, kv_seq_offsets))
        >>> out = fused_attn((qkv,), None, sequence_desc, None,
                             AttnBiasType.NO_BIAS, AttnMaskType.PADDING_CAUSAL_MASK,
                             QKVLayout.T3HD, 0.125, 0, True, 3)
    """
1354
    if sequence_descriptor is None or isinstance(sequence_descriptor, jnp.ndarray):
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
        warnings.warn(
            "Pass mask to fused_attn is deprecated, please use SequenceDescriptor instead. "
            + "See help(transformer_engine.jax.attention.SequenceDescriptor) for details.",
            DeprecationWarning,
        )
        if max_segments_per_seq != 1:
            raise ValueError("Passing mask is only supported for non-THD case.")
        return _legacy_fused_attn(
            qkv,
            bias,
            sequence_descriptor,
            seed,
            attn_bias_type=attn_bias_type,
            attn_mask_type=attn_mask_type,
1369
            softmax_type=softmax_type,
1370
1371
1372
1373
1374
1375
1376
1377
            qkv_layout=qkv_layout,
            scaling_factor=scaling_factor,
            dropout_probability=dropout_probability,
            is_training=is_training,
            window_size=window_size,
            context_parallel_strategy=context_parallel_strategy,
            context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
            context_parallel_axis=context_parallel_axis,
1378
            softmax_offset=softmax_offset,
1379
1380
1381
1382
        )
    output = _fused_attn(
        qkv,
        bias,
1383
        softmax_offset,
1384
1385
1386
1387
1388
        sequence_descriptor,
        seed,
        attn_bias_type=attn_bias_type,
        attn_mask_type=attn_mask_type,
        qkv_layout=qkv_layout,
1389
        softmax_type=softmax_type,
1390
1391
1392
1393
1394
1395
1396
1397
        scaling_factor=scaling_factor,
        dropout_probability=dropout_probability,
        is_training=is_training,
        max_segments_per_seq=max_segments_per_seq,
        window_size=window_size,
        context_parallel_strategy=context_parallel_strategy,
        context_parallel_causal_load_balanced=context_parallel_causal_load_balanced,
        context_parallel_axis=context_parallel_axis,
1398
        context_checkpoint_name=context_checkpoint_name,
1399
        stripe_size=stripe_size,
1400
1401
    )
    return output