test_fused_attn.py 25.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# See LICENSE for license information.

from typing import Optional
import math

import jax
import jax.numpy as jnp
import numpy as np
import pytest

from flax.linen import combine_masks
from flax.linen import dot_product_attention
from flax.linen import make_attention_mask
from flax.linen import make_causal_mask
from jax import lax
from jax import nn as jax_nn
from jax import value_and_grad, jit

from transformer_engine.jax.fused_attn import AttnBiasType, AttnMaskType
from transformer_engine.jax.fused_attn import is_fused_attn_kernel_available
from transformer_engine.jax.fused_attn import self_fused_attn, cross_fused_attn

# Type annotations
Array = jnp.ndarray

SELF_CASES = [(32, 512, 16, 64), (32, 128, 16, 64)]
CROSS_CASES = [(32, 128, 512, 16, 64)]
DTYPES = [jnp.bfloat16, jnp.float16]
PAD_RATIO = [0.3]


def make_decoder_mask(tokens: Array) -> Array:
    causal_mask = make_causal_mask(tokens)
    padding_mask = make_attention_mask(tokens > 0, tokens > 0)
    return combine_masks(causal_mask, padding_mask)


def jax_self_fused_attn(qkv, bias, q_token, kv_token, dropout_rng, **kwargs):
    attn_mask_type = kwargs['attn_mask_type']
    if attn_mask_type == AttnMaskType.CAUSAL_MASK:
        mask = make_decoder_mask(q_token)
    else:
        mask = make_attention_mask(q_token > 0, kv_token > 0)

    query, key, value = jnp.split(qkv, [1, 2], axis=-3)
    query = jnp.squeeze(query)
    key = jnp.squeeze(key)
    value = jnp.squeeze(value)

    output = dot_product_attention(query,
                                   key,
                                   value,
                                   bias=bias,
                                   mask=mask,
57
                                   deterministic=not kwargs['is_training'],
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
                                   dropout_rate=kwargs['dropout_probability'],
                                   dropout_rng=dropout_rng,
                                   dtype=qkv.dtype)
    return output


def jax_cross_fused_attn(q, kv, q_token, kv_token, dropout_rng, **kwargs):
    assert q.dtype == kv.dtype

    attn_mask_type = kwargs['attn_mask_type']
    if attn_mask_type == AttnMaskType.CAUSAL_MASK:
        raise NotImplementedError
    mask = make_attention_mask(q_token > 0, kv_token > 0)

    query = q
    key, value = jnp.split(kv, [1], axis=-3)
    key = jnp.squeeze(key)
    value = jnp.squeeze(value)

    output = dot_product_attention(query,
                                   key,
                                   value,
                                   bias=None,
                                   mask=mask,
82
                                   deterministic=not kwargs['is_training'],
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
                                   dropout_rate=kwargs['dropout_probability'],
                                   dropout_rng=dropout_rng,
                                   dtype=q.dtype)
    return output


def customcall_self_fused_attn(qkv, bias, q_token, kv_token, dropout_rng, **kwargs):
    if kwargs['attn_mask_type'] == AttnMaskType.CAUSAL_MASK:
        mask = make_decoder_mask(q_token)
    else:
        mask = make_attention_mask(q_token > 0, kv_token > 0)

    # mask invert
    mask = (mask == 0)

    return self_fused_attn(qkv, bias, mask, dropout_rng, **kwargs)


def customcall_cross_fused_attn(q, kv, q_token, kv_token, dropout_rng, **kwargs):
    assert q.dtype == kv.dtype

    if kwargs['attn_mask_type'] == AttnMaskType.CAUSAL_MASK:
        raise NotImplementedError
    mask = make_attention_mask(q_token > 0, kv_token > 0)

    # mask invert
    mask = (mask == 0)

    return cross_fused_attn(q, kv, mask, dropout_rng, **kwargs)


@pytest.mark.skipif(not is_fused_attn_kernel_available(),
                    reason="Fused attention kernel is not supported.")
class TestSelfFusedAttnMax512():

118
119
    def set_input(self, b, s, h, d, *, attn_bias_type, attn_mask_type, dropout_probability, dtype,
                  is_training, pad_ratio):
120
121
122
123
124
125
126
127
128
129
130
        key = jax.random.PRNGKey(0)
        subkeys = jax.random.split(key, 2)

        qkv_shape = (b, s, 3, h, d)
        bias_shape = (1, h, s, s)

        pad_len = int(s * pad_ratio)
        self.valid_len = s - pad_len

        min_val, max_val = -1, 1
        self.qkv = jax.random.uniform(subkeys[0], qkv_shape, dtype, min_val, max_val)
131
132

        with_bias = attn_bias_type != AttnBiasType.NO_BIAS
133
134
        self.bias = jax.random.uniform(subkeys[1], bias_shape, dtype, min_val,
                                       max_val) if with_bias else None
135
136
137
138
139
140

        self.q_token = jnp.concatenate((jnp.ones((b, self.valid_len)), jnp.zeros((b, pad_len))),
                                       axis=-1)
        self.kv_token = self.q_token

        self.scaling_factor = 1. / math.sqrt(d)
141
        self.dropout_probability = dropout_probability
142
        self.dropout_rng = jax.random.PRNGKey(0) if self.dropout_probability > 0 else None
143
144
        self.attn_bias_type = attn_bias_type
        self.is_training = is_training
145
146

    @pytest.mark.parametrize('b, s, h, d', SELF_CASES)
147
    @pytest.mark.parametrize('attn_bias_type', [AttnBiasType.NO_BIAS, AttnBiasType.POST_SCALE_BIAS])
148
149
    @pytest.mark.parametrize('attn_mask_type',
                             [AttnMaskType.PADDING_MASK, AttnMaskType.CAUSAL_MASK])
150
151
152
    @pytest.mark.parametrize('dropout_probability', [0., 0.1])
    @pytest.mark.parametrize('dtype', DTYPES)
    @pytest.mark.parametrize('is_training', [True, False])
153
    @pytest.mark.parametrize('pad_ratio', PAD_RATIO)
154
155
156
157
158
159
160
161
    def test_sanity(self, b, s, h, d, attn_bias_type, attn_mask_type, dropout_probability, dtype,
                    is_training, pad_ratio):

        def grad_func(func, *args, **kwargs):
            # Keep only valid result for the gradient
            # fused_attn_max_512 output has shape (b, s, h, d)
            valid_ret, _ = jnp.split(func(*args, **kwargs), (self.valid_len,), axis=1)
            return jnp.mean(valid_ret, dtype=jnp.float32).astype(dtype)
162
163
164
165
166

        self.set_input(b,
                       s,
                       h,
                       d,
167
168
169
                       attn_bias_type=attn_bias_type,
                       attn_mask_type=attn_mask_type,
                       dropout_probability=dropout_probability,
170
                       dtype=dtype,
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
                       is_training=is_training,
                       pad_ratio=pad_ratio)

        kwargs = {
            'attn_bias_type': self.attn_bias_type,
            'attn_mask_type': attn_mask_type,
            'scaling_factor': self.scaling_factor,
            'dropout_probability': self.dropout_probability,
            'is_training': self.is_training
        }

        jitted_primitive = jit(
            value_and_grad(
                lambda qkv, bias, q_token, kv_token, dropout_rng: grad_func(
                    customcall_self_fused_attn, qkv, bias, q_token, kv_token, dropout_rng, **kwargs
                ), (0, 1)))

        primitive_out, (primitive_dqkv,
                        primitive_dbias) = jitted_primitive(self.qkv, self.bias, self.q_token,
                                                            self.kv_token, self.dropout_rng)

    @pytest.mark.parametrize('b, s, h, d', SELF_CASES)
    @pytest.mark.parametrize('attn_bias_type', [AttnBiasType.NO_BIAS, AttnBiasType.POST_SCALE_BIAS])
    @pytest.mark.parametrize('attn_mask_type',
                             [AttnMaskType.PADDING_MASK, AttnMaskType.CAUSAL_MASK])
    @pytest.mark.parametrize('dropout_probability', [0., 0.1])
    @pytest.mark.parametrize('dtype', DTYPES)
    @pytest.mark.parametrize('is_training', [True, False])
    @pytest.mark.parametrize('pad_ratio', PAD_RATIO)
    def test_forward(self, b, s, h, d, attn_bias_type, attn_mask_type, dropout_probability, dtype,
                     is_training, pad_ratio):
        # dropout can't get the bitmatch result
        if is_training and dropout_probability > 0.:
            return

        self.set_input(b,
                       s,
                       h,
                       d,
                       attn_bias_type=attn_bias_type,
211
                       attn_mask_type=attn_mask_type,
212
213
214
215
                       dropout_probability=dropout_probability,
                       dtype=dtype,
                       is_training=is_training,
                       pad_ratio=pad_ratio)
216
217
218
219
220
221
222
223
224
225

        primitive_out = customcall_self_fused_attn(self.qkv,
                                                   self.bias,
                                                   self.q_token,
                                                   self.kv_token,
                                                   self.dropout_rng,
                                                   attn_bias_type=self.attn_bias_type,
                                                   attn_mask_type=attn_mask_type,
                                                   scaling_factor=self.scaling_factor,
                                                   dropout_probability=self.dropout_probability,
226
                                                   is_training=self.is_training)
227
228
229
230
231
232
233
234

        reference_out = jax_self_fused_attn(self.qkv,
                                            self.bias,
                                            self.q_token,
                                            self.kv_token,
                                            self.dropout_rng,
                                            attn_mask_type=attn_mask_type,
                                            scaling_factor=self.scaling_factor,
235
236
                                            dropout_probability=self.dropout_probability,
                                            is_training=self.is_training)
237
238
239
240
241
242
243
244
245
246
247
248
249

        ref_valid, _ = jnp.split(reference_out, (self.valid_len,), axis=1)
        pri_valid, pri_invalid = jnp.split(primitive_out, (self.valid_len,), axis=1)

        np.testing.assert_allclose(jnp.asarray(pri_valid, np.float32),
                                   jnp.asarray(ref_valid, np.float32),
                                   rtol=1e-4,
                                   atol=1e-2)

        np.testing.assert_allclose(jnp.asarray(pri_invalid, jnp.float32),
                                   jnp.zeros_like(pri_invalid, jnp.float32))

    @pytest.mark.parametrize('b, s, h, d', SELF_CASES)
250
    @pytest.mark.parametrize('attn_bias_type', [AttnBiasType.NO_BIAS, AttnBiasType.POST_SCALE_BIAS])
251
252
    @pytest.mark.parametrize('attn_mask_type',
                             [AttnMaskType.PADDING_MASK, AttnMaskType.CAUSAL_MASK])
253
    @pytest.mark.parametrize('dropout_probability', [0.])    # dropout can't get the bitmatch result
254
    @pytest.mark.parametrize('dtype', DTYPES)
255
    @pytest.mark.parametrize('is_training', [True])    # backward is only used when is_training
256
    @pytest.mark.parametrize('pad_ratio', PAD_RATIO)
257
258
    def test_forward_backward(self, b, s, h, d, attn_bias_type, attn_mask_type, dropout_probability,
                              dtype, is_training, pad_ratio):
259
260
261
262
        self.set_input(b,
                       s,
                       h,
                       d,
263
                       attn_bias_type=attn_bias_type,
264
                       attn_mask_type=attn_mask_type,
265
266
267
268
                       dropout_probability=dropout_probability,
                       dtype=dtype,
                       is_training=is_training,
                       pad_ratio=pad_ratio)
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287

        def grad_func(fused_attn_max_512_func, *args, **kwargs):
            # Gradient is small, use a gradient multiplier to amplify the graident
            gradient_multiplier = 1000 if dtype == jnp.bfloat16 else 10000
            if attn_mask_type == AttnMaskType.CAUSAL_MASK:
                gradient_multiplier = gradient_multiplier / 10
            # Keep only valid result for the gradient
            # fused_attn_max_512 output has shape (b, s, h, d)
            valid_fused_attn_max_512_ret, _ = jnp.split(fused_attn_max_512_func(*args, **kwargs),
                                                        (self.valid_len,),
                                                        axis=1)
            return (jnp.mean(valid_fused_attn_max_512_ret, dtype=jnp.float32) *
                    gradient_multiplier).astype(dtype)

        kwargs = {
            'attn_bias_type': self.attn_bias_type,
            'attn_mask_type': attn_mask_type,
            'scaling_factor': self.scaling_factor,
            'dropout_probability': self.dropout_probability,
288
            'is_training': self.is_training
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
        }

        # Use FP16/BF16 to sum the results may cause overflow, use FP32 for the summation
        jitted_primitive = jit(
            value_and_grad(
                lambda qkv, bias, q_token, kv_token, dropout_rng: grad_func(
                    customcall_self_fused_attn, qkv, bias, q_token, kv_token, dropout_rng, **kwargs
                ), (0, 1)))

        jitted_reference = jit(
            value_and_grad(
                lambda qkv, bias, q_token, kv_token, dropout_rng: grad_func(
                    jax_self_fused_attn, qkv, bias, q_token, kv_token, dropout_rng, **kwargs),
                (0, 1)))

        primitive_out, (primitive_dqkv,
305
                        primitive_dbias) = jitted_primitive(self.qkv, self.bias, self.q_token,
306
307
308
                                                            self.kv_token, self.dropout_rng)

        reference_out, (reference_dqkv,
309
                        reference_dbias) = jitted_reference(self.qkv, self.bias, self.q_token,
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
                                                            self.kv_token, self.dropout_rng)

        np.testing.assert_allclose(jnp.asarray(primitive_out, np.float32),
                                   jnp.asarray(reference_out, np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        valid_primitive_dqkv, invalid_primitive_dqkv = jnp.split(primitive_dqkv, (self.valid_len,),
                                                                 axis=1)
        valid_reference_dqkv, invalid_reference_dqkv = jnp.split(reference_dqkv, (self.valid_len,),
                                                                 axis=1)

        # dQ
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dqkv[:, :, 0], np.float32),
                                   jnp.asarray(valid_reference_dqkv[:, :, 0], np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        # dK
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dqkv[:, :, 1], np.float32),
                                   jnp.asarray(valid_reference_dqkv[:, :, 1], np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        # dV
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dqkv[:, :, 2], np.float32),
                                   jnp.asarray(valid_reference_dqkv[:, :, 2], np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        assert jnp.allclose(invalid_primitive_dqkv, invalid_reference_dqkv)

        # Padded part should be 0s
        assert jnp.allclose(invalid_primitive_dqkv, jnp.zeros_like(invalid_primitive_dqkv))

345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
        if self.attn_bias_type != AttnBiasType.NO_BIAS:
            # dbias valid part
            np.testing.assert_allclose(
                jnp.asarray(primitive_dbias[:, :, :self.valid_len, :self.valid_len], np.float32),
                jnp.asarray(reference_dbias[:, :, :self.valid_len, :self.valid_len], np.float32),
                rtol=1e-4,
                atol=3e-5)

            # dbias padded part
            np.testing.assert_allclose(
                jnp.asarray(primitive_dbias[:, :, self.valid_len:, self.valid_len:], np.float32),
                jnp.asarray(reference_dbias[:, :, self.valid_len:, self.valid_len:], np.float32))

            assert jnp.allclose(
                primitive_dbias[:, :, self.valid_len:, self.valid_len:],
                jnp.zeros_like(primitive_dbias[:, :, self.valid_len:, self.valid_len:]))
361
362
363
364
365
366


@pytest.mark.skipif(not is_fused_attn_kernel_available(),
                    reason="Fused attention kernel is not supported.")
class TestCrossFusedAttnMax512():

367
368
    def set_input(self, b, s_q, s_kv, h, d, *, attn_mask_type, dropout_probability, dtype,
                  is_training, pad_ratio):
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
        key = jax.random.PRNGKey(0)
        subkeys = jax.random.split(key, 2)

        q_shape = (b, s_q, h, d)
        kv_shape = (b, s_kv, 2, h, d)
        q_pad_len = int(s_q * pad_ratio)
        kv_pad_len = int(s_kv * pad_ratio)
        self.q_valid_len = s_q - q_pad_len
        self.kv_valid_len = s_kv - kv_pad_len

        min_val, max_val = -1, 1
        self.q = jax.random.uniform(subkeys[0], q_shape, dtype, min_val, max_val)
        self.kv = jax.random.uniform(subkeys[1], kv_shape, dtype, min_val, max_val)

        self.q_token = jnp.concatenate((jnp.ones((b, self.q_valid_len)), jnp.zeros((b, q_pad_len))),
                                       axis=-1)
        self.kv_token = jnp.concatenate((jnp.ones((b, self.kv_valid_len)), jnp.zeros(
            (b, kv_pad_len))),
                                        axis=-1)
        self.scaling_factor = 1. / math.sqrt(d)
389
390
        self.dropout_probability = dropout_probability
        self.dropout_rng = jax.random.PRNGKey(0) if self.dropout_probability > 0 else None
391
        self.attn_bias_type = AttnBiasType.NO_BIAS
392
        self.is_training = is_training
393
394
395

    @pytest.mark.parametrize('b, s_q, s_kv, h, d', CROSS_CASES)
    @pytest.mark.parametrize('attn_mask_type', [AttnMaskType.PADDING_MASK])
396
    @pytest.mark.parametrize('dropout_probability', [0., 0.1])
397
    @pytest.mark.parametrize('dtype', DTYPES)
398
    @pytest.mark.parametrize('is_training', [True, False])
399
    @pytest.mark.parametrize('pad_ratio', PAD_RATIO)
400
401
402
403
404
    def test_forward(self, b, s_q, s_kv, h, d, attn_mask_type, dropout_probability, dtype,
                     is_training, pad_ratio):
        # dropout can't get the bitmatch result
        if is_training and dropout_probability > 0.:
            return
405
406
407
408
409
410
411

        self.set_input(b,
                       s_q,
                       s_kv,
                       h,
                       d,
                       attn_mask_type=attn_mask_type,
412
413
414
                       dropout_probability=dropout_probability,
                       dtype=dtype,
                       is_training=is_training,
415
416
417
418
419
420
421
422
423
424
425
                       pad_ratio=pad_ratio)

        primitive_out = customcall_cross_fused_attn(self.q,
                                                    self.kv,
                                                    self.q_token,
                                                    self.kv_token,
                                                    self.dropout_rng,
                                                    attn_bias_type=self.attn_bias_type,
                                                    attn_mask_type=attn_mask_type,
                                                    scaling_factor=self.scaling_factor,
                                                    dropout_probability=self.dropout_probability,
426
                                                    is_training=self.is_training)
427
428
429
430
431
432
433
434

        reference_out = jax_cross_fused_attn(self.q,
                                             self.kv,
                                             self.q_token,
                                             self.kv_token,
                                             self.dropout_rng,
                                             attn_mask_type=attn_mask_type,
                                             scaling_factor=self.scaling_factor,
435
436
                                             dropout_probability=self.dropout_probability,
                                             is_training=self.is_training)
437
438
439
440
441
442
443
444
445
446
447
448
449
450

        ref_valid, _ = jnp.split(reference_out, (self.q_valid_len,), axis=1)
        pri_valid, pri_invalid = jnp.split(primitive_out, (self.q_valid_len,), axis=1)

        np.testing.assert_allclose(jnp.asarray(pri_valid, np.float32),
                                   jnp.asarray(ref_valid, np.float32),
                                   rtol=1e-4,
                                   atol=2e-3)

        np.testing.assert_allclose(jnp.asarray(pri_invalid, jnp.float32),
                                   jnp.zeros_like(pri_invalid, jnp.float32))

    @pytest.mark.parametrize('b, s_q, s_kv, h, d', CROSS_CASES)
    @pytest.mark.parametrize('attn_mask_type', [AttnMaskType.PADDING_MASK])
451
    @pytest.mark.parametrize('dropout_probability', [0.])    # dropout can't get the bitmatch result
452
    @pytest.mark.parametrize('dtype', DTYPES)
453
    @pytest.mark.parametrize('is_training', [True])    # backward is only used when is_training
454
    @pytest.mark.parametrize('pad_ratio', PAD_RATIO)
455
456
    def test_forward_backward(self, b, s_q, s_kv, h, d, attn_mask_type, dropout_probability, dtype,
                              is_training, pad_ratio):
457
458
459
460
461
462
        self.set_input(b,
                       s_q,
                       s_kv,
                       h,
                       d,
                       attn_mask_type=attn_mask_type,
463
464
465
                       dropout_probability=dropout_probability,
                       dtype=dtype,
                       is_training=is_training,
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
                       pad_ratio=pad_ratio)

        def grad_func(fused_attn_max_512_func, *args, **kwargs):
            # Gradient is small, use a gradient multiplier to amplify the graident
            gradient_multiplier = 10000
            if attn_mask_type == AttnMaskType.CAUSAL_MASK:
                gradient_multiplier = gradient_multiplier / 10
            # Keep only valid result for the gradient
            # fused_attn_max_512 output has shape (b, s_q, h, d)
            valid_fused_attn_max_512_ret, _ = jnp.split(fused_attn_max_512_func(*args, **kwargs),
                                                        (self.q_valid_len,),
                                                        axis=1)
            return (jnp.mean(valid_fused_attn_max_512_ret, dtype=jnp.float32) *
                    gradient_multiplier).astype(dtype)

        kwargs = {
            'attn_bias_type': self.attn_bias_type,
            'attn_mask_type': attn_mask_type,
            'scaling_factor': self.scaling_factor,
            'dropout_probability': self.dropout_probability,
486
            'is_training': self.is_training
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
        }

        # Use FP16/BF16 to sum the results may cause overflow, use FP32 for the summation
        jitted_primitive = jit(
            value_and_grad(
                lambda q, kv, q_token, kv_token, dropout_rng: grad_func(
                    customcall_cross_fused_attn, q, kv, q_token, kv_token, dropout_rng, **kwargs),
                (0, 1)))

        jitted_reference = jit(
            value_and_grad(
                lambda q, kv, q_token, kv_token, dropout_rng: grad_func(
                    jax_cross_fused_attn, q, kv, q_token, kv_token, dropout_rng, **kwargs), (0, 1)))

        primitive_out, (primitive_dq,
                        primitive_dkv) = jitted_primitive(self.q, self.kv, self.q_token,
                                                          self.kv_token, self.dropout_rng)

        reference_out, (reference_dq,
                        reference_dkv) = jitted_reference(self.q, self.kv, self.q_token,
                                                          self.kv_token, self.dropout_rng)

        np.testing.assert_allclose(jnp.asarray(primitive_out, np.float32),
                                   jnp.asarray(reference_out, np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        valid_primitive_dq, invalid_primitive_dq = jnp.split(primitive_dq, (self.q_valid_len,),
                                                             axis=1)
        valid_reference_dq, invalid_reference_dq = jnp.split(reference_dq, (self.q_valid_len,),
                                                             axis=1)

        valid_primitive_dkv, invalid_primitive_dkv = jnp.split(primitive_dkv, (self.kv_valid_len,),
                                                               axis=1)
        valid_reference_dkv, invalid_reference_dkv = jnp.split(reference_dkv, (self.kv_valid_len,),
                                                               axis=1)

        # dQ
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dq, np.float32),
                                   jnp.asarray(valid_reference_dq, np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        # dK
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dkv[:, :, 0], np.float32),
                                   jnp.asarray(valid_reference_dkv[:, :, 0], np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        # dV
        np.testing.assert_allclose(jnp.asarray(valid_primitive_dkv[:, :, 1], np.float32),
                                   jnp.asarray(valid_reference_dkv[:, :, 1], np.float32),
                                   rtol=1e-4,
                                   atol=1e-5)

        assert jnp.allclose(invalid_primitive_dq, invalid_reference_dq)
        assert jnp.allclose(invalid_primitive_dkv, invalid_reference_dkv)

        # Padded part should be 0s
        assert jnp.allclose(invalid_primitive_dq, jnp.zeros_like(invalid_primitive_dq))
        assert jnp.allclose(invalid_primitive_dkv, jnp.zeros_like(invalid_primitive_dkv))