"pretrain_gpt.py" did not exist on "34be7dd3377c501803b888aa0637cf06dd06b8d8"
Commit 98957dd7 authored by luopl's avatar luopl
Browse files

init

parents
Pipeline #1625 canceled with stages
# Copyright (c) 2024, Tri Dao, Albert Gu.
"""We want triton==2.1.0 or 2.2.0 for this
"""
from typing import Optional
import math
from packaging import version
import torch
import torch.nn.functional as F
from torch import Tensor
from torch.cuda.amp import custom_bwd, custom_fwd
import triton
import triton.language as tl
from einops import rearrange, repeat
try:
from causal_conv1d import causal_conv1d_fn
import causal_conv1d_cuda
except ImportError:
causal_conv1d_fn, causal_conv1d_cuda = None, None
from mamba_ssm.ops.triton.ssd_bmm import _bmm_chunk_fwd, _bmm_chunk_bwd
from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_cumsum_fwd, _chunk_cumsum_bwd
from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_fwd, _chunk_state_bwd_db
from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_state_bwd_ddAcs_stable
from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state, chunk_state_ref
from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state_varlen
from mamba_ssm.ops.triton.ssd_state_passing import _state_passing_fwd, _state_passing_bwd
from mamba_ssm.ops.triton.ssd_state_passing import state_passing, state_passing_ref
from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_fwd, _chunk_scan_bwd_dz, _chunk_scan_bwd_dstates
from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_dC, _chunk_scan_bwd_dcb
from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_stable
from mamba_ssm.ops.triton.ssd_chunk_scan import chunk_scan, chunk_scan_ref
from mamba_ssm.ops.triton.ssd_chunk_scan import _chunk_scan_bwd_ddAcs_prev
from mamba_ssm.ops.triton.layernorm_gated import rmsnorm_fn, _layer_norm_fwd, _layer_norm_bwd
from mamba_ssm.ops.triton.k_activations import _swiglu_fwd, _swiglu_bwd
TRITON_22 = version.parse(triton.__version__) >= version.parse('2.2.0')
def init_to_zero(names):
return lambda nargs: [nargs[name].zero_() for name in names if nargs[name] is not None]
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64}, num_stages=3, num_warps=8, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4, pre_hook=init_to_zero(["ddt_ptr"])),
],
key=['chunk_size', 'hdim', 'dstate'],
)
@triton.jit
def _chunk_scan_chunk_state_bwd_dx_kernel(
# Pointers to matrices
x_ptr, cb_ptr, dout_ptr, dt_ptr, dA_cumsum_ptr, seq_idx_ptr, D_ptr,
b_ptr, dstates_ptr,
dx_ptr, ddt_ptr, dD_ptr,
# Matrix dimensions
chunk_size, hdim, dstate,
batch, seqlen, nheads_ngroups_ratio,
# Strides
stride_x_batch, stride_x_seqlen, stride_x_head, stride_x_hdim,
stride_cb_batch, stride_cb_chunk, stride_cb_head, stride_cb_csize_m, stride_cb_csize_k,
stride_dout_batch, stride_dout_seqlen, stride_dout_head, stride_dout_hdim,
stride_dt_batch, stride_dt_chunk, stride_dt_head, stride_dt_csize,
stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head, stride_dA_cs_csize,
stride_seq_idx_batch, stride_seq_idx_seqlen,
stride_D_head,
stride_b_batch, stride_b_seqlen, stride_b_head, stride_b_dstate,
stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_hdim, stride_dstates_dstate,
stride_dx_batch, stride_dx_seqlen, stride_dx_head, stride_dx_hdim,
stride_ddt_batch, stride_ddt_chunk, stride_ddt_head, stride_ddt_csize,
stride_dD_batch, stride_dD_chunk, stride_dD_head, stride_dD_csize, stride_dD_hdim,
# Meta-parameters
HAS_D: tl.constexpr,
D_HAS_HDIM: tl.constexpr,
HAS_SEQ_IDX: tl.constexpr,
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_DSTATE: tl.constexpr,
IS_TRITON_22: tl.constexpr,
):
pid_bc = tl.program_id(axis=1)
pid_c = pid_bc // batch
pid_b = pid_bc - pid_c * batch
pid_h = tl.program_id(axis=2)
num_pid_n = tl.cdiv(hdim, BLOCK_SIZE_N)
pid_m = tl.program_id(axis=0) // num_pid_n
pid_n = tl.program_id(axis=0) % num_pid_n
x_ptr += pid_b * stride_x_batch + pid_c * chunk_size * stride_x_seqlen + pid_h * stride_x_head
cb_ptr += pid_b * stride_cb_batch + pid_c * stride_cb_chunk + (pid_h // nheads_ngroups_ratio) * stride_cb_head
dout_ptr += pid_b * stride_dout_batch + pid_c * chunk_size * stride_dout_seqlen + pid_h * stride_dout_head
dt_ptr += pid_b * stride_dt_batch + pid_c * stride_dt_chunk + pid_h * stride_dt_head
ddt_ptr += pid_b * stride_ddt_batch + pid_c * stride_ddt_chunk + pid_h * stride_ddt_head
dA_cumsum_ptr += pid_b * stride_dA_cs_batch + pid_c * stride_dA_cs_chunk + pid_h * stride_dA_cs_head
b_ptr += pid_b * stride_b_batch + pid_c * chunk_size * stride_b_seqlen + (pid_h // nheads_ngroups_ratio) * stride_b_head
dstates_ptr += pid_b * stride_dstates_batch + pid_c * stride_dstates_chunk + pid_h * stride_dstates_head
if HAS_SEQ_IDX:
seq_idx_ptr += pid_b * stride_seq_idx_batch + pid_c * chunk_size * stride_seq_idx_seqlen
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
chunk_size_limit = min(chunk_size, seqlen - pid_c * chunk_size)
acc = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
dA_cs_m = tl.load(dA_cumsum_ptr + offs_m * stride_dA_cs_csize, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32)
dA_cs_last = tl.load(dA_cumsum_ptr + (chunk_size - 1) * stride_dA_cs_csize).to(tl.float32)
if not HAS_SEQ_IDX:
scale = tl.exp(dA_cs_last - dA_cs_m)
else:
seq_idx_m = tl.load(seq_idx_ptr + offs_m * stride_seq_idx_seqlen, mask=offs_m < chunk_size_limit, other=-1)
seq_idx_last = tl.load(seq_idx_ptr + (chunk_size_limit - 1) * stride_seq_idx_seqlen)
scale = tl.where(seq_idx_m == seq_idx_last, tl.exp(dA_cs_last - dA_cs_m), 0.0)
# Might be faster to just do 1 iteration with larger BLOCK_SIZE_K, up to block size 128
# However, we're getting error with the Triton compiler 2.1.0 for that code path:
# Unexpected mma -> mma layout conversion
# Triton 2.2.0 fixes this
offs_dstate = tl.arange(0, BLOCK_SIZE_DSTATE if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128 else BLOCK_SIZE_K)
b_ptrs = b_ptr + (offs_m[:, None] * stride_b_seqlen + offs_dstate[None, :] * stride_b_dstate)
dstates_ptrs = dstates_ptr + (offs_n[None, :] * stride_dstates_hdim + offs_dstate[:, None] * stride_dstates_dstate)
if IS_TRITON_22 and BLOCK_SIZE_DSTATE <= 128:
b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate), other=0.0)
dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate) & (offs_n[None, :] < hdim), other=0.0)
dstates = dstates.to(b_ptr.dtype.element_ty)
acc = tl.dot(b, dstates) * scale[:, None]
else:
for k in range(0, dstate, BLOCK_SIZE_K):
b = tl.load(b_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_dstate[None, :] < dstate - k), other=0.0)
dstates = tl.load(dstates_ptrs, mask=(offs_dstate[:, None] < dstate - k) & (offs_n[None, :] < hdim), other=0.0)
dstates = dstates.to(b_ptr.dtype.element_ty)
acc += tl.dot(b, dstates)
b_ptrs += BLOCK_SIZE_K * stride_b_dstate
dstates_ptrs += BLOCK_SIZE_K * stride_dstates_dstate
acc *= scale[:, None]
# x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim)
# x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32)
# dt_ptrs = dt_ptr + offs_m * stride_dt_csize
# dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32)
# ddt = tl.sum(acc * x, axis=1) * dt_m
# ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize
# tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size)
offs_k = tl.arange(0, BLOCK_SIZE_K)
cb_ptrs = cb_ptr + (offs_m[:, None] * stride_cb_csize_m + offs_k[None, :] * stride_cb_csize_k)
dout_ptrs = dout_ptr + (offs_k[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim)
dA_cumsum_ptrs = dA_cumsum_ptr + offs_k * stride_dA_cs_csize
K_MAX = chunk_size_limit
K_MIN = pid_m * BLOCK_SIZE_M
cb_ptrs += K_MIN * stride_cb_csize_k
dout_ptrs += K_MIN * stride_dout_seqlen
dA_cumsum_ptrs += K_MIN * stride_dA_cs_csize
for k in range(K_MIN, K_MAX, BLOCK_SIZE_K):
k = tl.multiple_of(k, BLOCK_SIZE_K)
# For some reason setting mask to (offs_m[:, None] < chunk_size_limit) is much slower
cb = tl.load(cb_ptrs, mask=(offs_m[:, None] < chunk_size) & (offs_k[None, :] < K_MAX - k), other=0.0)
dout = tl.load(dout_ptrs, mask=(offs_k[:, None] < K_MAX - k) & (offs_n[None, :] < hdim), other=0.0)
dA_cs_k = tl.load(dA_cumsum_ptrs, mask=offs_k < K_MAX - k, other=0.0).to(tl.float32)
cb *= tl.exp(dA_cs_k[None, :] - dA_cs_m[:, None])
# If we don't have the (k + offs_k[None, :] < K_MAX) mask, for indices outside this range,
# we might have dA_cs_m = 0.0 and dA_cs_k very negative, and tl.exp will return inf.
# Multiplying with cb, which is 0.0 outside the range, will make the result NaN.
# This will cause NaN in acc, and hence NaN in dx and ddt.
mask = (k + offs_k[None, :] >= offs_m[:, None]) & (k + offs_k[None, :] < K_MAX)
cb = tl.where(mask, cb, 0.0)
cb = cb.to(dout_ptr.dtype.element_ty)
acc += tl.dot(cb, dout)
cb_ptrs += BLOCK_SIZE_K * stride_cb_csize_k
dout_ptrs += BLOCK_SIZE_K * stride_dout_seqlen
dA_cumsum_ptrs += BLOCK_SIZE_K * stride_dA_cs_csize
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dt_ptrs = dt_ptr + offs_m * stride_dt_csize
dt_m = tl.load(dt_ptrs, mask=offs_m < chunk_size_limit, other=0.0).to(tl.float32)
dx = acc * dt_m[:, None]
dx_ptr += pid_b * stride_dx_batch + pid_c * chunk_size * stride_dx_seqlen + pid_h * stride_dx_head
dx_ptrs = dx_ptr + (offs_m[:, None] * stride_dx_seqlen + offs_n[None, :] * stride_dx_hdim)
if HAS_D:
dout_res_ptrs = dout_ptr + (offs_m[:, None] * stride_dout_seqlen + offs_n[None, :] * stride_dout_hdim)
dout_res = tl.load(dout_res_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32)
if D_HAS_HDIM:
D = tl.load(D_ptr + pid_h * stride_D_head + offs_n, mask=offs_n < hdim, other=0.0).to(tl.float32)
else:
D = tl.load(D_ptr + pid_h * stride_D_head).to(tl.float32)
dx += dout_res * D
tl.store(dx_ptrs, dx, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim))
x_ptrs = x_ptr + (offs_m[:, None] * stride_x_seqlen + offs_n[None, :] * stride_x_hdim)
x = tl.load(x_ptrs, mask=(offs_m[:, None] < chunk_size_limit) & (offs_n[None, :] < hdim), other=0.0).to(tl.float32)
if HAS_D:
dD_ptr += pid_b * stride_dD_batch + pid_c * stride_dD_chunk + pid_h * stride_dD_head + pid_m * stride_dD_csize
if D_HAS_HDIM:
dD_ptrs = dD_ptr + offs_n * stride_dD_hdim
dD = tl.sum(dout_res * x, axis=0)
tl.store(dD_ptrs, dD, mask=offs_n < hdim)
else:
dD = tl.sum(dout_res * x)
tl.store(dD_ptr, dD)
ddt = tl.sum(acc * x, axis=1)
ddt_ptrs = ddt_ptr + offs_m * stride_ddt_csize
tl.atomic_add(ddt_ptrs, ddt, mask=offs_m < chunk_size)
def _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=None, seq_idx=None, dx=None):
batch, seqlen, nheads, headdim = x.shape
_, _, nchunks, chunk_size = dt.shape
_, _, ngroups, dstate = B.shape
assert nheads % ngroups == 0
assert B.shape == (batch, seqlen, ngroups, dstate)
assert CB.shape == (batch, nchunks, ngroups, chunk_size, chunk_size)
assert dt.shape == (batch, nheads, nchunks, chunk_size)
assert dA_cumsum.shape == dt.shape
assert dout.shape == x.shape
assert dstates.shape == (batch, nchunks, nheads, headdim, dstate)
if seq_idx is not None:
assert seq_idx.shape == (batch, seqlen)
if D is not None:
assert D.shape == (nheads, headdim) or D.shape == (nheads,)
assert D.stride(-1) == 1
BLOCK_SIZE_min = 32
dD = torch.empty(triton.cdiv(chunk_size, BLOCK_SIZE_min), batch, nchunks, nheads,
headdim if D.dim() == 2 else 1, device=D.device, dtype=torch.float32)
else:
dD = None
dD_strides = ((dD.stride(0), dD.stride(1), dD.stride(2), dD.stride(3), dD.stride(4))
if D is not None else (0, 0, 0, 0, 0))
if dx is None:
dx = torch.empty_like(x)
else:
assert dx.shape == x.shape
ddt = torch.empty(batch, nheads, nchunks, chunk_size, device=dout.device, dtype=torch.float32)
grid_dx = lambda META: (triton.cdiv(chunk_size, META['BLOCK_SIZE_M']) * triton.cdiv(headdim, META['BLOCK_SIZE_N']),
batch * nchunks, nheads)
with torch.cuda.device(x.device.index):
_chunk_scan_chunk_state_bwd_dx_kernel[grid_dx](
x, CB, dout, dt, dA_cumsum, seq_idx, D, B, dstates, dx, ddt, dD,
chunk_size, headdim, dstate,
batch, seqlen, nheads // ngroups,
x.stride(0), x.stride(1), x.stride(2), x.stride(3),
CB.stride(0), CB.stride(1), CB.stride(2), CB.stride(-1), CB.stride(-2),
dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3),
dt.stride(0), dt.stride(2), dt.stride(1), dt.stride(3),
dA_cumsum.stride(0), dA_cumsum.stride(2), dA_cumsum.stride(1), dA_cumsum.stride(3),
*((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)),
D.stride(0) if D is not None else 0,
B.stride(0), B.stride(1), B.stride(2), B.stride(3),
dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3), dstates.stride(4),
dx.stride(0), dx.stride(1), dx.stride(2), dx.stride(3),
ddt.stride(0), ddt.stride(2), ddt.stride(1), ddt.stride(3),
dD_strides[1], dD_strides[2], dD_strides[3], dD_strides[0], dD_strides[4],
D is not None,
D.dim() == 2 if D is not None else True,
HAS_SEQ_IDX=seq_idx is not None,
BLOCK_SIZE_DSTATE=max(triton.next_power_of_2(dstate), 16),
IS_TRITON_22=TRITON_22
)
if D is not None:
BLOCK_SIZE_actual = _chunk_scan_chunk_state_bwd_dx_kernel.best_config.kwargs["BLOCK_SIZE_M"]
n_valid_blocks = (chunk_size + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual
dD = dD[:n_valid_blocks].sum(dim=(0, 1, 2)).to(dtype=D.dtype)
if D.dim() == 1:
dD = rearrange(dD, "h 1 -> h")
return dx, ddt.to(dtype=dt.dtype), dD
def _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf"))):
batch, seqlen, nheads, headdim = x.shape
_, _, ngroups, dstate = B.shape
assert nheads % ngroups == 0
assert B.shape == (batch, seqlen, ngroups, dstate)
assert x.shape == (batch, seqlen, nheads, headdim)
assert dt.shape == (batch, seqlen, nheads)
assert A.shape == (nheads,)
assert C.shape == B.shape
if z is not None:
assert z.shape == x.shape
if D is not None:
assert D.shape == (nheads, headdim) or D.shape == (nheads,)
if seq_idx is not None:
assert seq_idx.shape == (batch, seqlen)
if B.stride(-1) != 1:
B = B.contiguous()
if C.stride(-1) != 1:
C = C.contiguous()
if x.stride(-1) != 1 and x.stride(1) != 1: # Either M or K dimension should be contiguous
x = x.contiguous()
if z is not None and z.stride(-1) != 1 and z.stride(1) != 1: # Either M or K dimension should be contiguous
z = z.contiguous()
if D is not None and D.stride(-1) != 1:
D = D.contiguous()
if initial_states is not None:
assert initial_states.shape == (batch, nheads, headdim, dstate)
# # (batch, nchunks, chunk_size, chunk_size) or (batch, nchunks, nheads, chunk_size, chunk_size)
# dA_cumsum_tmp0, dt_tmp0 = _chunk_cumsum_fwd(dt[:, :147], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus)
# dA_cumsum_tmp1, dt_tmp1 = _chunk_cumsum_fwd(dt[:, 147:], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus)
# dA_cumsum_tmp2, dt_tmp2 = _chunk_cumsum_fwd(dt[:, 147:256], A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus)
dA_cumsum, dt = _chunk_cumsum_fwd(dt, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit)
states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True)
# states_tmp0 = _chunk_state_fwd(B[:, :147], x[:, :147], dt_tmp0, dA_cumsum_tmp0, states_in_fp32=True)
# states_tmp1 = _chunk_state_fwd(B[:, 147:], x[:, 147:], dt_tmp1, dA_cumsum_tmp1, states_in_fp32=True)
# states_tmp2 = _chunk_state_fwd(B[:, 147:256], x[:, 147:256], dt_tmp2, dA_cumsum_tmp2, states_in_fp32=True)
states, final_states = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1],
initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None,
seq_idx=seq_idx, chunk_size=chunk_size, out_dtype=C.dtype)
states, final_states = [rearrange(t, "... (p n) -> ... p n", n=dstate) for t in [states, final_states]]
# states_tmp0 = rearrange(_state_passing_fwd(rearrange(states_tmp0, "... p n -> ... (p n)"), dA_cumsum_tmp0[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate)
# states_tmp1 = rearrange(_state_passing_fwd(rearrange(states_tmp1, "... p n -> ... (p n)"), dA_cumsum_tmp1[:, :, :, -1], chunk_size=chunk_size), "... (p n) -> ... p n", n=dstate)
CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32)
out, out_x = _chunk_scan_fwd(CB, x, dt, dA_cumsum, C, states, D=D, z=z, seq_idx=seq_idx)
if cu_seqlens is None:
return out, out_x, dt, dA_cumsum, states, final_states
else:
assert batch == 1, "passing cu_seqlens to get the varlen states is only supported if batch dimension is 1"
varlen_states = chunk_state_varlen(B.squeeze(0), x.squeeze(0), dt.squeeze(0), dA_cumsum.squeeze(0),
cu_seqlens, states.squeeze(0))
return out, out_x, dt, dA_cumsum, states, final_states, varlen_states
def _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, chunk_size, D=None, z=None,
dt_bias=None, initial_states=None, dfinal_states=None, seq_idx=None, dt_softplus=False,
dt_limit=(0.0, float("inf")),
dx=None, ddt=None, dB=None, dC=None, dz=None, recompute_output=False):
if dout.stride(-1) != 1:
dout = dout.contiguous()
batch, seqlen, nheads, headdim = x.shape
nchunks = math.ceil(seqlen / chunk_size)
_, _, ngroups, dstate = B.shape
assert dout.shape == (batch, seqlen, nheads, headdim)
assert dt.shape == (batch, seqlen, nheads)
assert A.shape == (nheads,)
assert nheads % ngroups == 0
assert B.shape == (batch, seqlen, ngroups, dstate)
assert C.shape == B.shape
assert out.shape == x.shape
if initial_states is not None:
assert initial_states.shape == (batch, nheads, headdim, dstate)
if seq_idx is not None:
assert seq_idx.shape == (batch, seqlen)
if dx is not None:
assert dx.shape == x.shape
if dB is not None:
assert dB.shape == B.shape
dB_given = dB
else:
dB_given = torch.empty_like(B)
if dC is not None:
assert dC.shape == C.shape
dC_given = dC
else:
dC_given = torch.empty_like(C)
if dz is not None:
assert z is not None
assert dz.shape == z.shape
if ddt is not None:
assert ddt.shape == dt.shape
ddt_given = ddt
else:
ddt_given = torch.empty_like(dt)
# TD: For some reason Triton (2.1.0 and 2.2.0) errors with
# "[CUDA]: invalid device context" (e.g. during varlne test), and cloning makes it work. Idk why.
dt_in = dt.clone()
dA_cumsum, dt = _chunk_cumsum_fwd(dt_in, A, chunk_size, dt_bias=dt_bias, dt_softplus=dt_softplus,
dt_limit=dt_limit)
CB = _bmm_chunk_fwd(C, B, chunk_size, seq_idx=seq_idx, output_dtype=torch.float32)
states = _chunk_state_fwd(B, x, dt, dA_cumsum, seq_idx=seq_idx, states_in_fp32=True)
states, _ = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1],
initial_states=rearrange(initial_states, "... p n -> ... (p n)") if initial_states is not None else None,
seq_idx=seq_idx, chunk_size=chunk_size)
states = rearrange(states, "... (p n) -> ... p n", n=dstate)
if z is not None:
dz, dout, dD, *rest = _chunk_scan_bwd_dz(x, z, out, dout, chunk_size=chunk_size, has_ddAcs=False, D=D, dz=dz, recompute_output=recompute_output)
outz = rest[0] if recompute_output else out
else:
dz = None
outz = out
dstates = _chunk_scan_bwd_dstates(C, dA_cumsum, dout, seq_idx=seq_idx, dtype=states.dtype)
# dstates has length nchunks, containing the gradient to initial states at index 0 and
# gradient to the states of chunk (nchunks - 2) at index (nchunks - 1)
# Do computation in fp32 but convert dstates and states to fp16/bf16 since dstates and states
# will be used in matmul in the next kernels.
dstates, ddA_chunk_cumsum, dinitial_states, states = _state_passing_bwd(
rearrange(states, "... p n -> ... (p n)"),
dA_cumsum[:, :, :, -1],
rearrange(dstates, "... p n -> ... (p n)"),
dfinal_states=rearrange(dfinal_states, "... p n -> ... (p n)") if dfinal_states is not None else None,
seq_idx=seq_idx,
has_initial_states=initial_states is not None,
dstates_dtype=x.dtype,
states_dtype=x.dtype,
chunk_size=chunk_size,
)
# dstates has length nchunks, containing the gradient to states of chunk 0 at index 0 and
# gradient to the final states at index (nchunks - 1)
# states has length nchunks, containing the initial states at index 0 and the state for chunk (nchunks - 2) at index (nchunks - 1)
# The final states is not stored.
states = rearrange(states, "... (p n) -> ... p n", n=dstate)
dstates = rearrange(dstates, "... (p n) -> ... p n", n=dstate)
dinitial_states = rearrange(dinitial_states, "... (p n) -> ... p n", n=dstate) if dinitial_states is not None else None
dx, ddt, dD_from_x = _chunk_scan_chunk_state_bwd_dx(x, dt, dA_cumsum, B, CB, dout, dstates, D=D, seq_idx=seq_idx, dx=dx)
# dB = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, ngroups=ngroups)
dB, ddA_next = _chunk_state_bwd_db(x, dt, dA_cumsum, dstates, seq_idx=seq_idx, B=B, ngroups=ngroups)
# dC = _chunk_scan_bwd_dC(states[:, :-1].to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups)
dC, ddA_cumsum_prev = _chunk_scan_bwd_dC(states.to(x.dtype), dA_cumsum, dout, seq_idx=seq_idx, C=C, ngroups=ngroups)
# Computing ddA with the dcb kernel is much slower, so we're not using it for now
dCB = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, ngroups=ngroups)
# dCB, ddA_tmp = _chunk_scan_bwd_dcb(x, dt, dA_cumsum, dout, seq_idx=seq_idx, CB=CB, ngroups=ngroups)
dCB = dCB.to(CB.dtype)
_bmm_chunk_bwd(C, dCB, residual=dB, out=dB_given)
_bmm_chunk_bwd(B, rearrange(dCB, "... l s -> ... s l"), residual=dC, out=dC_given)
# If we have z, then dout_x is recomputed in fp32 so dD = (dout_x * x).sum() is more accurate
# than dD_from_x = (dout_x * x).sum() where dout_x is in fp16/bf16
if z is None:
dD = dD_from_x
# Formula for ddA_cumsum, assuming out is the output of the forward pass before adding x * D.
# ddA_cumsum = torch.einsum("bclhp,bclhp->bhcl", out.float(), dout.float()) - ddt * dt
# However, this is numerically unstable: when we do the reverse cumsum on ddA_cumsum, there might
# be a lot of underflow.
# This is already done as part of bwd_dC kernel
# ddA_cumsum_prev = _chunk_scan_bwd_ddAcs_prev(states[:, :-1], C, dout, dA_cumsum, seq_idx=seq_idx)
ddA_cumsum_prev[..., -1] += ddA_chunk_cumsum
ddA_prev = ddA_cumsum_prev.flip([-1]).cumsum(dim=-1).flip([-1])
# This is already done as part of bwd_dB kernel
# ddA_next = _chunk_state_bwd_ddAcs_stable(B, x, dt, dA_cumsum, dstates, seq_idx=seq_idx)
# We don't need to pass in seq_idx because CB also zeros out entries where seq_idx[i] != seq_idx[j]
ddA = _chunk_scan_bwd_ddAcs_stable(x, dt, dA_cumsum, dout, CB)
ddA += ddA_next + ddA_prev
ddt_given, dA, ddt_bias = _chunk_cumsum_bwd(ddA, ddt, dt_in, A, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit, ddt=ddt_given)
# These 2 lines are just to test ddt and dA being computed by old code
# _, dA = selective_scan_bwd(dout, x, dt, A, B, C, D=D.float(), z=z)
# ddt_given.copy_(ddt)
return_vals = (dx, ddt_given, dA, dB_given, dC_given, dD, dz, ddt_bias, dinitial_states)
return return_vals if not recompute_output else (*return_vals, outz)
def selective_scan_bwd(dout, x, dt, A, B, C, D=None, z=None):
"""
Argument:
dout: (batch, seqlen, nheads, headdim)
x: (batch, seqlen, nheads, headdim)
dt: (batch, nheads, nchunks, chunk_size) or (batch, nheads, headdim, nchunks, chunk_size)
A: (nheads) or (dim, dstate)
B: (batch, seqlen, ngroups, dstate)
C: (batch, seqlen, ngroups, dstate)
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, nheads, headdim)
Return:
out: (batch, seqlen, nheads, headdim)
"""
import selective_scan
batch, seqlen, nheads, headdim = x.shape
chunk_size = dt.shape[-1]
_, _, ngroups, dstate = B.shape
assert nheads % ngroups == 0
x = rearrange(x, "b l h p -> b (h p) l")
squeeze_dt = dt.dim() == 4
if dt.dim() == 4:
dt = repeat(dt, "b h c l -> b h p c l", p=headdim)
dt = rearrange(dt, "b h p c l -> b (h p) (c l)", p=headdim)
squeeze_A = A.dim() == 1
if A.dim() == 1:
A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32)
else:
A = A.to(dtype=torch.float32)
B = rearrange(B, "b l g n -> b g n l")
C = rearrange(C, "b l g n -> b g n l")
if D is not None:
if D.dim() == 2:
D = rearrange(D, "h p -> (h p)")
else:
D = repeat(D, "h -> (h p)", p=headdim)
if z is not None:
z = rearrange(z, "b l h p -> b (h p) l")
if x.stride(-1) != 1:
x = x.contiguous()
if dt.stride(-1) != 1:
dt = dt.contiguous()
if D is not None:
D = D.contiguous()
if B.stride(-1) != 1:
B = B.contiguous()
if C.stride(-1) != 1:
C = C.contiguous()
if z is not None and z.stride(-1) != 1:
z = z.contiguous()
_, intermediate, *rest = selective_scan.fwd(x, dt.to(dtype=x.dtype), A, B, C, D, z, None, False)
if z is not None:
out = rest[0]
else:
out = None
dout = rearrange(dout, "b l h p -> b (h p) l")
if dout.stride(-1) != 1:
dout = dout.contiguous()
# The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the
# backward of selective_scan with the backward of chunk).
# Here we just pass in None and dz will be allocated in the C++ code.
_, ddt, dA, *rest = selective_scan.bwd(
x, dt.to(dtype=x.dtype), A, B, C, D, z, None, dout, intermediate, out, None, False,
False # option to recompute out_z, not used here
)
ddt = rearrange(ddt, "b (h p) (c l) -> b h p c l", p=headdim, l=chunk_size)
if squeeze_dt:
ddt = ddt.float().sum(dim=2)
if squeeze_A:
dA = rearrange(dA, "(h p) n -> h p n", p=headdim).sum(dim=(1, 2))
return ddt, dA
class MambaChunkScanCombinedFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False):
ctx.dt_dtype = dt.dtype
if not return_varlen_states:
cu_seqlens = None
else:
assert cu_seqlens is not None, "cu_seqlens must be provided if return_varlen_states is True"
out, out_x, dt_out, dA_cumsum, states, final_states, *rest = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, cu_seqlens=cu_seqlens, dt_softplus=dt_softplus, dt_limit=dt_limit)
ctx.save_for_backward(out if z is None else out_x, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx)
ctx.dt_softplus = dt_softplus
ctx.chunk_size = chunk_size
ctx.dt_limit = dt_limit
ctx.return_final_states = return_final_states
ctx.return_varlen_states = return_varlen_states
if not return_varlen_states:
return out if not return_final_states else (out, final_states)
else:
varlen_states = rest[0]
return (out, varlen_states) if not return_final_states else (out, final_states, varlen_states)
@staticmethod
def backward(ctx, dout, *args):
out, x, dt, dA_cumsum, A, B, C, D, z, dt_bias, initial_states, seq_idx = ctx.saved_tensors
assert not ctx.return_varlen_states, "return_varlen_states is not supported in backward"
dfinal_states = args[0] if ctx.return_final_states else None
dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd(dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=ctx.dt_softplus, dt_limit=ctx.dt_limit)
return dx, ddt, dA, dB, dC, None, dD, dz, ddt_bias, dinitial_states, None, None, None, None, None, None
def mamba_chunk_scan_combined(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, initial_states=None, seq_idx=None, cu_seqlens=None, dt_softplus=False, dt_limit=(0.0, float("inf")), return_final_states=False, return_varlen_states=False):
"""
Argument:
x: (batch, seqlen, nheads, headdim)
dt: (batch, seqlen, nheads)
A: (nheads)
B: (batch, seqlen, ngroups, dstate)
C: (batch, seqlen, ngroups, dstate)
chunk_size: int
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, nheads, headdim)
dt_bias: (nheads,)
initial_states: (batch, nheads, headdim, dstate)
seq_idx: (batch, seqlen)
cu_seqlens: (num_sequences + 1) or None, only used if return_varlen_states is True
dt_softplus: Whether to apply softplus to dt
Return:
out: (batch, seqlen, nheads, headdim)
"""
return MambaChunkScanCombinedFn.apply(x, dt, A, B, C, chunk_size, D, z, dt_bias, initial_states, seq_idx, cu_seqlens, dt_softplus, dt_limit, return_final_states, return_varlen_states)
def mamba_chunk_scan(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False):
"""
Argument:
x: (batch, seqlen, nheads, headdim)
dt: (batch, seqlen, nheads)
A: (nheads)
B: (batch, seqlen, ngroups, dstate)
C: (batch, seqlen, ngroups, dstate)
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, nheads, headdim)
dt_bias: (nheads,)
Return:
out: (batch, seqlen, nheads, headdim)
"""
batch, seqlen, nheads, headdim = x.shape
dstate = B.shape[-1]
if seqlen % chunk_size != 0:
dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size))
dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size)
dt = dt.float() # We want high precision for this before cumsum
if dt_bias is not None:
dt = dt + rearrange(dt_bias, "h -> h 1 1")
if dt_softplus:
dt = F.softplus(dt)
dA = dt * rearrange(A, "h -> h 1 1")
dA = dt * rearrange(A, "h -> h 1 1")
dA_cumsum = torch.cumsum(dA, dim=-1)
# 1. Compute the state for each chunk
states = chunk_state(B, x, dt, dA_cumsum, states_in_fp32=True)
# 2. Pass the state to all the chunks by weighted cumsum.
states = rearrange(state_passing(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0],
"... (p n) -> ... p n", n=dstate)
# 3. Compute the output for each chunk
out = chunk_scan(B, C, x, dt, dA_cumsum, states, D=D, z=z)
return out
def ssd_chunk_scan_combined_ref(x, dt, A, B, C, chunk_size, D=None, z=None, dt_bias=None, dt_softplus=False):
"""
Argument:
x: (batch, seqlen, nheads, headdim)
dt: (batch, seqlen, nheads)
A: (nheads)
B: (batch, seqlen, ngroups, dstate)
C: (batch, seqlen, ngroups, dstate)
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, nheads, headdim)
dt_bias: (nheads,)
Return:
out: (batch, seqlen, nheads, headdim)
"""
batch, seqlen, nheads, headdim = x.shape
dstate = B.shape[-1]
if seqlen % chunk_size != 0:
dt = F.pad(dt, (0, 0, 0, chunk_size - seqlen % chunk_size))
dt = rearrange(dt, "b (c l) h -> b h c l", l=chunk_size)
dt = dt.float() # We want high precision for this before cumsum
if dt_bias is not None:
dt = dt + rearrange(dt_bias, "h -> h 1 1")
if dt_softplus:
dt = F.softplus(dt)
dA = dt * rearrange(A, "h -> h 1 1")
dA_cumsum = torch.cumsum(dA, dim=-1)
# 1. Compute the state for each chunk
states = chunk_state_ref(B, x, dt, dA_cumsum)
states_dtype = states.dtype
if states.dtype not in [torch.float32, torch.float64]:
states = states.to(torch.float32)
# 2. Pass the state to all the chunks by weighted cumsum.
# state_passing_ref is much less numerically stable
states = rearrange(state_passing_ref(rearrange(states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1])[0],
"... (p n) -> ... p n", n=dstate)
states = states.to(states_dtype)
# 3. Compute the output for each chunk
out = chunk_scan_ref(B, C, x, dt, dA_cumsum, states, D=D, z=z)
return out
def ssd_selective_scan(x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf"))):
"""
Argument:
x: (batch, seqlen, nheads, headdim)
dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim)
A: (nheads) or (dim, dstate)
B: (batch, seqlen, ngroups, dstate)
C: (batch, seqlen, ngroups, dstate)
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, nheads, headdim)
dt_bias: (nheads,) or (nheads, headdim)
Return:
out: (batch, seqlen, nheads, headdim)
"""
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
batch, seqlen, nheads, headdim = x.shape
_, _, ngroups, dstate = B.shape
x = rearrange(x, "b l h p -> b (h p) l")
if dt.dim() == 3:
dt = repeat(dt, "b l h -> b l h p", p=headdim)
dt = rearrange(dt, "b l h p -> b (h p) l")
if A.dim() == 1:
A = repeat(A, "h -> (h p) n", p=headdim, n=dstate).to(dtype=torch.float32)
else:
A = A.to(dtype=torch.float32)
B = rearrange(B, "b l g n -> b g n l")
C = rearrange(C, "b l g n -> b g n l")
if D is not None:
if D.dim() == 2:
D = rearrange(D, "h p -> (h p)")
else:
D = repeat(D, "h -> (h p)", p=headdim)
if z is not None:
z = rearrange(z, "b l h p -> b (h p) l")
if dt_bias is not None:
if dt_bias.dim() == 1:
dt_bias = repeat(dt_bias, "h -> h p", p=headdim)
dt_bias = rearrange(dt_bias, "h p -> (h p)")
if dt_limit != (0.0, float("inf")):
if dt_bias is not None:
dt = dt + rearrange(dt_bias, "d -> d 1")
if dt_softplus:
dt = F.softplus(dt)
dt = dt.clamp(min=dt_limit[0], max=dt_limit[1]).to(x.dtype)
dt_bias = None
dt_softplus = None
out = selective_scan_fn(x, dt, A, B, C, D=D, z=z, delta_bias=dt_bias, delta_softplus=dt_softplus)
return rearrange(out, "b (h p) l -> b l h p", p=headdim)
def mamba_conv1d_scan_ref(xBC, conv1d_weight, conv1d_bias, dt, A, chunk_size, D=None, z=None,
dt_bias=None, dt_softplus=False, dt_limit=(0.0, float("inf")),
activation="silu", headdim=None, ngroups=1):
"""
Argument:
xBC: (batch, seqlen, dim + 2 * ngroups * dstate) where dim == nheads * headdim
conv1d_weight: (dim + 2 * ngroups * dstate, width)
conv1d_bias: (dim + 2 * ngroups * dstate,)
dt: (batch, seqlen, nheads) or (batch, seqlen, nheads, headdim)
A: (nheads)
D: (nheads, headdim) or (nheads,)
z: (batch, seqlen, dim)
dt_bias: (nheads) or (nheads, headdim)
headdim: if D is 1D and z is None, headdim must be passed in
Return:
out: (batch, seqlen, dim)
"""
batch, seqlen, nheads = dt.shape[:3]
assert nheads % ngroups == 0
if z is not None:
dim = z.shape[-1]
assert dim % nheads == 0
headdim = dim // nheads
else:
if D.dim() == 1:
assert headdim is not None
else:
headdim = D.shape[1]
dim = nheads * headdim
xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation),
"b d s -> b s d")
dstate = (xBC.shape[-1] - dim) // ngroups // 2
x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1)
x = rearrange(x, "b l (h p) -> b l h p", h=nheads)
B = rearrange(B, "b l (g n) -> b l g n", g=ngroups)
C = rearrange(C, "b l (g n) -> b l g n", g=ngroups)
z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None
out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(), z=z, dt_bias=dt_bias, dt_softplus=dt_softplus, dt_limit=dt_limit)
return rearrange(out, "b s h p -> b s (h p)")
class MambaSplitConv1dScanCombinedFn(torch.autograd.Function):
@staticmethod
@custom_fwd
def forward(ctx, zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu",
rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None,
ngroups=1, norm_before_gate=True):
assert activation in [None, "silu", "swish"]
if D.dim() == 1:
assert headdim is not None
nheads, = D.shape
else:
nheads, headdim = D.shape
batch, seqlen, _ = zxbcdt.shape
dim = nheads * headdim
assert nheads % ngroups == 0
dstate = (conv1d_weight.shape[0] - dim) // ngroups // 2
d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ngroups * dstate - nheads) // 2
assert d_nonssm >= 0
assert zxbcdt.shape == (batch, seqlen, 2 * d_nonssm + 2 * dim + 2 * ngroups * dstate + nheads)
assert dt_bias.shape == (nheads,)
assert A.shape == (nheads,)
zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + ngroups * dstate * 2, nheads], dim=-1)
seq_idx = seq_idx.contiguous() if seq_idx is not None else None
xBC_conv = rearrange(
causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"),
conv1d_weight, conv1d_bias, seq_idx, None, None, activation in ["silu", "swish"]),
"b d s -> b s d"
)
x, B, C = torch.split(xBC_conv, [dim, ngroups * dstate, ngroups * dstate], dim=-1)
x = rearrange(x, "b l (h p) -> b l h p", h=nheads)
B = rearrange(B, "b l (g n) -> b l g n", g=ngroups)
C = rearrange(C, "b l (g n) -> b l g n", g=ngroups)
z = rearrange(z, "b l (h p) -> b l h p", h=nheads) if z is not None else None
if rmsnorm_weight is None:
out, out_x, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit)
out = rearrange(out, "b s h p -> b s (h p)")
rstd = None
if d_nonssm > 0:
out = torch.cat([_swiglu_fwd(zx0), out], dim=-1)
else:
out_x, _, dt_out, dA_cumsum, states, final_states = _mamba_chunk_scan_combined_fwd(x, dt, A, B, C, chunk_size=chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=dt_limit)
# reshape input data into 2D tensor
x_rms = rearrange(out_x, "b s h p -> (b s) (h p)")
z_rms = rearrange(z, "b s h p -> (b s) (h p)")
rmsnorm_weight = rmsnorm_weight.contiguous()
if d_nonssm == 0:
out = None
else:
out01 = torch.empty((batch, seqlen, d_nonssm + dim), dtype=x_rms.dtype, device=x_rms.device)
out = rearrange(out01[..., d_nonssm:], "b s d -> (b s) d")
_swiglu_fwd(zx0, out=out01[..., :d_nonssm])
out, _, rstd = _layer_norm_fwd(x_rms, rmsnorm_weight, None, rmsnorm_eps, z_rms, out=out,
group_size=dim // ngroups,
norm_before_gate=norm_before_gate, is_rms_norm=True)
if d_nonssm == 0:
out = rearrange(out, "(b s) d -> b s d", b=batch)
else:
out = out01
ctx.outproj_weight_dtype = outproj_weight.dtype if outproj_weight is not None else None
if outproj_weight is not None:
if torch.is_autocast_enabled():
dtype = torch.get_autocast_gpu_dtype()
out, outproj_weight = out.to(dtype), outproj_weight.to(dtype)
outproj_bias = outproj_bias.to(dtype) if outproj_bias is not None else None
out = F.linear(out, outproj_weight, outproj_bias)
else:
assert outproj_bias is None
ctx.save_for_backward(zxbcdt, conv1d_weight, conv1d_bias,
out_x, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias)
ctx.dt_limit = dt_limit
ctx.return_final_states = return_final_states
ctx.activation = activation
ctx.rmsnorm_eps = rmsnorm_eps
ctx.norm_before_gate = norm_before_gate
ctx.chunk_size = chunk_size
ctx.headdim = headdim
ctx.ngroups = ngroups
return out if not return_final_states else (out, final_states)
@staticmethod
@custom_bwd
def backward(ctx, dout, *args):
zxbcdt, conv1d_weight, conv1d_bias, out, A, D, dt_bias, initial_states, seq_idx, rmsnorm_weight, rstd, outproj_weight, outproj_bias = ctx.saved_tensors
dfinal_states = args[0] if ctx.return_final_states else None
headdim = ctx.headdim
nheads = D.shape[0]
dim = nheads * headdim
assert nheads % ctx.ngroups == 0
dstate = (conv1d_weight.shape[0] - dim) // ctx.ngroups // 2
d_nonssm = (zxbcdt.shape[-1] - 2 * dim - 2 * ctx.ngroups * dstate - nheads) // 2
assert d_nonssm >= 0
recompute_output = outproj_weight is not None
if recompute_output:
out_recompute = torch.empty(*out.shape[:2], d_nonssm + dim, device=out.device, dtype=out.dtype)
out0_recompute, out1_recompute = out_recompute.split([d_nonssm, dim], dim=-1)
zx0, z, xBC, dt = torch.split(zxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1)
# Recompute x, B, C
xBC_conv = rearrange(
causal_conv1d_cuda.causal_conv1d_fwd(rearrange(xBC, "b s d -> b d s"),
conv1d_weight, conv1d_bias, seq_idx, None, None, ctx.activation in ["silu", "swish"]),
"b d s -> b s d"
)
x, B, C = torch.split(xBC_conv, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1)
x = rearrange(x, "b l (h p) -> b l h p", h=nheads)
B = rearrange(B, "b l (g n) -> b l g n", g=ctx.ngroups)
C = rearrange(C, "b l (g n) -> b l g n", g=ctx.ngroups)
dzxbcdt = torch.empty_like(zxbcdt)
dzx0, dz, dxBC_given, ddt_given = torch.split(dzxbcdt, [2 * d_nonssm, dim, dim + 2 * ctx.ngroups * dstate, nheads], dim=-1)
dxBC = torch.empty_like(xBC)
dx, dB, dC = torch.split(dxBC, [dim, ctx.ngroups * dstate, ctx.ngroups * dstate], dim=-1)
z = rearrange(z, "b l (h p) -> b l h p", h=nheads)
dx = rearrange(dx, "b l (h p) -> b l h p", h=nheads)
dB = rearrange(dB, "b l (g n) -> b l g n", g=ctx.ngroups)
dC = rearrange(dC, "b l (g n) -> b l g n", g=ctx.ngroups)
if outproj_weight is not None:
dout_og = dout
dout = F.linear(dout, outproj_weight.t())
if d_nonssm > 0:
dout0, dout = dout.split([d_nonssm, dim], dim=-1)
_swiglu_bwd(zx0, dout0, dxy=dzx0, recompute_output=True, out=out0_recompute)
dout = rearrange(dout, "b s (h p) -> b s h p", p=headdim)
if rmsnorm_weight is None:
dz = rearrange(dz, "b l (h p) -> b l h p", h=nheads)
dx, ddt, dA, dB, dC, dD, dz, ddt_bias, dinitial_states, *rest = _mamba_chunk_scan_combined_bwd(
dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=z, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC, dz=dz, recompute_output=recompute_output
)
out_for_linear = rearrange(rest[0], "b s h p -> b s (h p)") if recompute_output else None
drmsnorm_weight = None
else:
batch = dout.shape[0]
dy_rms = rearrange(dout, "b s h p -> (b s) (h p)")
dz = rearrange(dz, "b l d -> (b l) d")
x_rms = rearrange(out, "b s h p -> (b s) (h p)")
z_rms = rearrange(z, "b s h p -> (b s) (h p)")
out1_recompute = rearrange(out1_recompute, "b s d -> (b s) d") if recompute_output else None
dout, drmsnorm_weight, _, dz, *rest = _layer_norm_bwd(dy_rms, x_rms, rmsnorm_weight, None, ctx.rmsnorm_eps, None, rstd, z_rms, norm_before_gate=ctx.norm_before_gate, is_rms_norm=True, recompute_output=recompute_output, dz=dz, out=out1_recompute if recompute_output else None)
out_for_linear = out_recompute if recompute_output else None
dout = rearrange(dout, "(b s) (h p) -> b s h p", b=batch, p=headdim)
dx, ddt, dA, dB, dC, dD, _, ddt_bias, dinitial_states = _mamba_chunk_scan_combined_bwd(
dout, x, dt, A, B, C, out, ctx.chunk_size, D=D, z=None, dt_bias=dt_bias, initial_states=initial_states, dfinal_states=dfinal_states, seq_idx=seq_idx, dt_softplus=True, dt_limit=ctx.dt_limit, dx=dx, ddt=ddt_given, dB=dB, dC=dC
)
if outproj_weight is not None:
doutproj_weight = torch.einsum("bso,bsd->od", dout_og, out_for_linear)
doutproj_bias = dout_og.sum(dim=(0, 1)) if outproj_bias is not None else None
else:
doutproj_weight, doutproj_bias = None, None
dxBC_given = rearrange(dxBC_given, "b s d -> b d s")
dxBC_given, dweight, dbias, *_ = causal_conv1d_cuda.causal_conv1d_bwd(
rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias,
rearrange(dxBC, "b s d -> b d s"), seq_idx, None, None, dxBC_given, False, ctx.activation in ["silu", "swish"]
)
dxBC_given = rearrange(dxBC_given, "b d s -> b s d")
return dzxbcdt, dweight, dbias, ddt_bias, dA, dD, None, dinitial_states, None, None, None, None, drmsnorm_weight, None, doutproj_weight, doutproj_bias, None, None, None
def mamba_split_conv1d_scan_combined(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states=None, seq_idx=None, dt_limit=(0.0, float("inf")), return_final_states=False, activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True):
"""
Argument:
zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim
conv1d_weight: (dim + 2 * ngroups * dstate, width)
conv1d_bias: (dim + 2 * ngroups * dstate,)
dt_bias: (nheads,)
A: (nheads)
D: (nheads, headdim) or (nheads,)
initial_states: (batch, nheads, headdim, dstate)
seq_idx: (batch, seqlen), int32
rmsnorm_weight: (dim,)
outproj_weight: (out_dim, dim)
outproj_bias: (out_dim,)
headdim: if D is 1D, headdim must be passed in
norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z))
Return:
out: (batch, seqlen, dim)
"""
return MambaSplitConv1dScanCombinedFn.apply(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, initial_states, seq_idx, dt_limit, return_final_states, activation, rmsnorm_weight, rmsnorm_eps, outproj_weight, outproj_bias, headdim, ngroups, norm_before_gate)
def mamba_split_conv1d_scan_ref(zxbcdt, conv1d_weight, conv1d_bias, dt_bias, A, D, chunk_size, dt_limit=(0.0, float("inf")), activation="silu", rmsnorm_weight=None, rmsnorm_eps=1e-6, outproj_weight=None, outproj_bias=None, headdim=None, ngroups=1, norm_before_gate=True):
"""
Argument:
zxbcdt: (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads) where dim == nheads * headdim
conv1d_weight: (dim + 2 * ngroups * dstate, width)
conv1d_bias: (dim + 2 * ngroups * dstate,)
dt_bias: (nheads,)
A: (nheads)
D: (nheads, headdim) or (nheads,)
rmsnorm_weight: (dim,)
outproj_weight: (out_dim, dim)
outproj_bias: (out_dim,)
headdim: if D is 1D, headdim must be passed in
norm_before_gate: if True, we do RMSNorm(x) * F.silu(z). If False, we do RMSNorm(x * F.silu(z))
Return:
out: (batch, seqlen, dim)
"""
if D.dim() == 1:
assert headdim is not None
nheads, = D.shape
else:
nheads, headdim = D.shape
assert nheads % ngroups == 0
batch, seqlen, _ = zxbcdt.shape
dim = nheads * headdim
dstate = (zxbcdt.shape[-1] - 2 * dim - nheads) // ngroups // 2
assert zxbcdt.shape == (batch, seqlen, 2 * dim + 2 * ngroups * dstate + nheads)
assert dt_bias.shape == (nheads,)
assert A.shape == (nheads,)
if rmsnorm_weight is not None:
assert rmsnorm_weight.shape == (dim,)
z, xBC, dt = torch.split(zxbcdt, [dim, dim + 2 * ngroups * dstate, nheads], dim=-1)
xBC = rearrange(causal_conv1d_fn(rearrange(xBC, "b s d -> b d s"), conv1d_weight, conv1d_bias, activation=activation),
"b d s -> b s d")
x, B, C = torch.split(xBC, [dim, ngroups * dstate, ngroups * dstate], dim=-1)
x = rearrange(x, "b l (h p) -> b l h p", h=nheads)
B = rearrange(B, "b l (g n) -> b l g n", g=ngroups)
C = rearrange(C, "b l (g n) -> b l g n", g=ngroups)
z = rearrange(z, "b l (h p) -> b l h p", h=nheads)
out = ssd_selective_scan(x, dt.to(x.dtype), A, B, C, D=D.float(),
z=z if rmsnorm_weight is None else None, dt_bias=dt_bias, dt_softplus=True, dt_limit=dt_limit)
out = rearrange(out, "b s h p -> b s (h p)")
if rmsnorm_weight is not None:
out = rmsnorm_fn(out, rmsnorm_weight, None, z=rearrange(z, "b l h p -> b l (h p)"), eps=rmsnorm_eps,
norm_before_gate=norm_before_gate)
if outproj_weight is not None:
out = F.linear(out, outproj_weight, outproj_bias)
return out
# Copyright (c) 2024, Tri Dao, Albert Gu.
"""We want triton==2.1.0 or 2.2.0 for this
"""
import math
import torch
import torch.nn.functional as F
import triton
import triton.language as tl
from einops import rearrange, repeat
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE': 64}),
triton.Config({'BLOCK_SIZE': 128}),
triton.Config({'BLOCK_SIZE': 256}),
triton.Config({'BLOCK_SIZE': 512}),
triton.Config({'BLOCK_SIZE': 1024}),
triton.Config({'BLOCK_SIZE': 2048}),
],
key=['dim'],
)
@triton.jit
def _state_passing_fwd_kernel(
# Pointers to matrices
states_ptr, out_ptr, final_states_ptr, dA_cs_ptr, initstates_ptr, seq_idx_ptr,
# Matrix dimensions
dim, nchunks, seqlen, chunk_size,
# Strides
stride_states_batch, stride_states_chunk, stride_states_head, stride_states_dim,
stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim,
stride_final_states_batch, stride_final_states_head, stride_final_states_dim,
stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head,
stride_initstates_batch, stride_initstates_head, stride_initstates_dim,
stride_seq_idx_batch, stride_seq_idx_seqlen,
# Meta-parameters
HAS_INITSTATES: tl.constexpr,
HAS_SEQ_IDX: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
pid_b = tl.program_id(axis=1)
pid_h = tl.program_id(axis=2)
pid_m = tl.program_id(axis=0)
states_ptr += pid_b * stride_states_batch + pid_h * stride_states_head
dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head
out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head
final_states_ptr += pid_b * stride_final_states_batch + pid_h * stride_final_states_head
if HAS_INITSTATES:
initstates_ptr += pid_b * stride_initstates_batch + pid_h * stride_initstates_head
if HAS_SEQ_IDX:
seq_idx_ptr += pid_b * stride_seq_idx_batch
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
states_ptrs = states_ptr + offs_m * stride_states_dim
out_ptrs = out_ptr + offs_m * stride_out_dim
final_states_ptrs = final_states_ptr + offs_m * stride_final_states_dim
if not HAS_INITSTATES:
states = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32)
else:
initstates_ptrs = initstates_ptr + offs_m * stride_initstates_dim
states = tl.load(initstates_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
tl.store(out_ptrs, states, mask=offs_m < dim)
out_ptrs += stride_out_chunk
seq_idx = 0
for c in range(nchunks):
new_states = tl.load(states_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
dA_cs = tl.load(dA_cs_ptr).to(tl.float32)
scale = tl.exp(dA_cs)
if HAS_SEQ_IDX:
seq_idx_new = tl.load(seq_idx_ptr + (min((c + 1) * chunk_size, seqlen) - 1) * stride_seq_idx_seqlen)
scale = tl.where(seq_idx_new == seq_idx, scale, 0.0)
seq_idx = seq_idx_new
states = scale * states + new_states
if c < nchunks - 1:
tl.store(out_ptrs, states, mask=offs_m < dim)
else:
tl.store(final_states_ptrs, states, mask=offs_m < dim)
states_ptrs += stride_states_chunk
dA_cs_ptr += stride_dA_cs_chunk
out_ptrs += stride_out_chunk
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE': 64}),
triton.Config({'BLOCK_SIZE': 128}),
triton.Config({'BLOCK_SIZE': 256}),
triton.Config({'BLOCK_SIZE': 512}),
triton.Config({'BLOCK_SIZE': 1024}),
triton.Config({'BLOCK_SIZE': 2048}),
],
key=['dim'],
)
@triton.jit
def _state_passing_bwd_kernel(
# Pointers to matrices
dout_ptr, out_ptr, dA_cs_ptr, dfinal_states_ptr, seq_idx_ptr,
dstates_ptr, ddA_cs_ptr, dinitstates_ptr, states_converted_ptr,
# Matrix dimensions
dim, nchunks, seqlen, chunk_size,
# Strides
stride_dout_batch, stride_dout_chunk, stride_dout_head, stride_dout_dim,
stride_out_batch, stride_out_chunk, stride_out_head, stride_out_dim,
stride_dA_cs_batch, stride_dA_cs_chunk, stride_dA_cs_head,
stride_dfinal_states_batch, stride_dfinal_states_head, stride_dfinal_states_dim,
stride_seq_idx_batch, stride_seq_idx_seqlen,
stride_dstates_batch, stride_dstates_chunk, stride_dstates_head, stride_dstates_dim,
stride_ddA_cs_batch, stride_ddA_cs_chunk, stride_ddA_cs_head,
stride_dinitstates_batch, stride_dinitstates_head, stride_dinitstates_dim,
# Meta-parameters
CONVERT_STATES: tl.constexpr,
HAS_DFINAL_STATES: tl.constexpr,
HAS_DINITSTATES: tl.constexpr,
HAS_SEQ_IDX: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
):
pid_b = tl.program_id(axis=1)
pid_h = tl.program_id(axis=2)
pid_m = tl.program_id(axis=0)
dstates_ptr += pid_b * stride_dstates_batch + pid_h * stride_dstates_head + (nchunks - 1) * stride_dstates_chunk
dA_cs_ptr += pid_b * stride_dA_cs_batch + pid_h * stride_dA_cs_head + (nchunks - 1) * stride_dA_cs_chunk
ddA_cs_ptr += pid_b * stride_ddA_cs_batch + pid_h * stride_ddA_cs_head + (nchunks - 1) * stride_ddA_cs_chunk + pid_m
out_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk
dout_ptr += pid_b * stride_dout_batch + pid_h * stride_dout_head + (nchunks - 1) * stride_dout_chunk
if CONVERT_STATES:
states_converted_ptr += pid_b * stride_out_batch + pid_h * stride_out_head + (nchunks - 1) * stride_out_chunk
if HAS_DFINAL_STATES:
dfinal_states_ptr += pid_b * stride_dfinal_states_batch + pid_h * stride_dfinal_states_head
if HAS_DINITSTATES:
dinitstates_ptr += pid_b * stride_dinitstates_batch + pid_h * stride_dinitstates_head
if HAS_SEQ_IDX:
seq_idx_ptr += pid_b * stride_seq_idx_batch
offs_m = pid_m * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
dstates_ptrs = dstates_ptr + offs_m * stride_dstates_dim
out_ptrs = out_ptr + offs_m * stride_out_dim
dout_ptrs = dout_ptr + offs_m * stride_dout_dim
if CONVERT_STATES:
states_converted_ptrs = states_converted_ptr + offs_m * stride_out_dim
if HAS_DFINAL_STATES:
dstates = tl.load(dfinal_states_ptr + offs_m * stride_dfinal_states_dim, mask=offs_m < dim, other=0.0).to(tl.float32)
else:
dstates = tl.zeros((BLOCK_SIZE, ), dtype=tl.float32)
tl.store(dstates_ptrs, dstates, mask=offs_m < dim)
if HAS_SEQ_IDX:
seq_idx = tl.load(seq_idx_ptr + (seqlen - 1) * stride_seq_idx_seqlen)
dstates_ptrs -= stride_dstates_chunk
for c in range(nchunks - 1):
dA_cs = tl.load(dA_cs_ptr).to(tl.float32)
scale = tl.exp(dA_cs)
if HAS_SEQ_IDX:
seq_idx_new = tl.load(seq_idx_ptr + (((nchunks - c - 1) * chunk_size - 1) * stride_seq_idx_seqlen))
scale = tl.where(seq_idx_new == seq_idx, scale, 0.0)
seq_idx = seq_idx_new
out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
if CONVERT_STATES:
tl.store(states_converted_ptrs, out, mask=offs_m < dim)
ddA = tl.sum(out * dstates) * scale
tl.store(ddA_cs_ptr, ddA)
dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
dstates = scale * dstates + dout
tl.store(dstates_ptrs, dstates, mask=offs_m < dim)
dout_ptrs -= stride_dout_chunk
dstates_ptrs -= stride_dstates_chunk
dA_cs_ptr -= stride_dA_cs_chunk
ddA_cs_ptr -= stride_ddA_cs_chunk
out_ptrs -= stride_out_chunk
if CONVERT_STATES:
states_converted_ptrs -= stride_out_chunk
if CONVERT_STATES:
out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
tl.store(states_converted_ptrs, out, mask=offs_m < dim)
if not HAS_DINITSTATES:
tl.store(ddA_cs_ptr, 0.0)
else:
dA_cs = tl.load(dA_cs_ptr).to(tl.float32)
scale = tl.exp(dA_cs)
if HAS_SEQ_IDX:
scale = tl.where(seq_idx == 0, scale, 0.0)
out = tl.load(out_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
ddA = tl.sum(out * dstates) * scale
tl.store(ddA_cs_ptr, ddA)
dout = tl.load(dout_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32)
dstates = scale * dstates + dout
tl.store(dinitstates_ptr + offs_m * stride_dinitstates_dim, dstates, mask=offs_m < dim)
def _state_passing_fwd(states, dA_chunk_cumsum, initial_states=None, seq_idx=None, chunk_size=None,
out_dtype=None):
batch, nchunks, nheads, dim = states.shape
assert dA_chunk_cumsum.shape == (batch, nheads, nchunks)
if initial_states is not None:
assert initial_states.shape == (batch, nheads, dim)
if seq_idx is not None:
assert chunk_size is not None
seqlen = seq_idx.shape[-1]
assert seq_idx.shape == (batch, seqlen)
out_dtype = states.dtype if out_dtype is None else out_dtype
out = torch.empty((batch, nchunks, nheads, dim), device=states.device, dtype=out_dtype)
final_states = torch.empty((batch, nheads, dim), device=states.device, dtype=torch.float32)
grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads)
with torch.cuda.device(states.device.index):
_state_passing_fwd_kernel[grid](
states, out, final_states, dA_chunk_cumsum, initial_states, seq_idx,
dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0,
states.stride(0), states.stride(1), states.stride(2), states.stride(3),
out.stride(0), out.stride(1), out.stride(2), out.stride(3),
final_states.stride(0), final_states.stride(1), final_states.stride(2),
dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1),
*((initial_states.stride(0), initial_states.stride(1), initial_states.stride(2))
if initial_states is not None else (0, 0, 0)),
*((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)),
HAS_INITSTATES=initial_states is not None,
HAS_SEQ_IDX=seq_idx is not None,
)
return out, final_states
def _state_passing_bwd(
states, dA_chunk_cumsum, dout, dfinal_states=None, seq_idx=None, has_initial_states=None,
dstates_dtype=None, states_dtype=None, chunk_size=None
):
"""
states contains the initial_states at index 0. The final states are not included in states.
"""
batch, nchunks, nheads, dim = states.shape
assert dA_chunk_cumsum.shape == (batch, nheads, nchunks)
assert dout.shape == (batch, nchunks, nheads, dim)
if seq_idx is not None:
assert chunk_size is not None
seqlen = seq_idx.shape[-1]
assert seq_idx.shape == (batch, seqlen)
dstates = torch.empty_like(dout, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype)
if states_dtype is not None and states_dtype != states.dtype:
states_converted = torch.empty_like(states, dtype=dstates_dtype if dstates_dtype is not None else dout.dtype)
assert states_converted.stride() == states.stride()
else:
states_converted = None
if has_initial_states:
dinitstates = torch.empty_like(dstates[:, 0])
else:
dinitstates = None
if dfinal_states is not None:
assert dfinal_states.shape == (batch, nheads, dim)
BLOCK_SIZE_min = 64
n_blocks = (dim + BLOCK_SIZE_min - 1) // BLOCK_SIZE_min
ddA_chunk_cumsum = torch.empty(batch, nheads, nchunks, n_blocks,
dtype=torch.float32, device=dA_chunk_cumsum.device)
grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE']), batch, nheads)
with torch.cuda.device(dout.device.index):
_state_passing_bwd_kernel[grid](
dout, states, dA_chunk_cumsum, dfinal_states, seq_idx,
dstates, ddA_chunk_cumsum, dinitstates, states_converted,
dim, nchunks, seqlen if seq_idx is not None else 0, chunk_size if seq_idx is not None else 0,
dout.stride(0), dout.stride(1), dout.stride(2), dout.stride(3),
states.stride(0), states.stride(1), states.stride(2), states.stride(3),
dA_chunk_cumsum.stride(0), dA_chunk_cumsum.stride(2), dA_chunk_cumsum.stride(1),
*((dfinal_states.stride(0), dfinal_states.stride(1), dfinal_states.stride(2))
if dfinal_states is not None else (0, 0, 0)),
*((seq_idx.stride(0), seq_idx.stride(1)) if seq_idx is not None else (0, 0)),
dstates.stride(0), dstates.stride(1), dstates.stride(2), dstates.stride(3),
ddA_chunk_cumsum.stride(0), ddA_chunk_cumsum.stride(2), ddA_chunk_cumsum.stride(1),
*((dinitstates.stride(0), dinitstates.stride(1), dinitstates.stride(2))
if dinitstates is not None else (0, 0, 0)),
CONVERT_STATES=states_converted is not None,
HAS_DFINAL_STATES=dfinal_states is not None,
HAS_DINITSTATES=dinitstates is not None,
HAS_SEQ_IDX=seq_idx is not None,
)
BLOCK_SIZE_actual = _state_passing_bwd_kernel.best_config.kwargs["BLOCK_SIZE"]
n_valid_blocks = (dim + BLOCK_SIZE_actual - 1) // BLOCK_SIZE_actual
ddA_chunk_cumsum = ddA_chunk_cumsum[..., :n_valid_blocks].sum(dim=-1).to(dtype=dA_chunk_cumsum.dtype)
if states_dtype is not None and states_dtype == states.dtype:
states_converted = states
return (dstates, ddA_chunk_cumsum, dinitstates) if states_dtype is None else (dstates, ddA_chunk_cumsum, dinitstates, states_converted)
class StatePassingFn(torch.autograd.Function):
@staticmethod
def forward(ctx, states, dA_chunk_cumsum, initial_states=None):
batch, nchunks, nheads, dim = states.shape
assert dA_chunk_cumsum.shape == (batch, nheads, nchunks)
if states.stride(-1) != 1:
states = states.contiguous()
out, final_states = _state_passing_fwd(states, dA_chunk_cumsum, initial_states)
ctx.save_for_backward(out, dA_chunk_cumsum)
ctx.has_initial_states = initial_states is not None
return out, final_states
@staticmethod
def backward(ctx, dout, dfinal_states):
out, dA_chunk_cumsum = ctx.saved_tensors
batch, nchunks, nheads, dim = out.shape
assert dout.shape == (batch, nchunks, nheads, dim)
assert dA_chunk_cumsum.shape == (batch, nheads, nchunks)
assert dfinal_states.shape == (batch, nheads, dim)
if dout.stride(-1) != 1:
dout = dout.contiguous()
dstates, ddA_chunk_cumsum, dinitstates = _state_passing_bwd(
out, dA_chunk_cumsum, dout, dfinal_states=dfinal_states , has_initial_states=ctx.has_initial_states
)
return dstates, ddA_chunk_cumsum, dinitstates
def state_passing(states, dA_chunk_cumsum, initial_states=None):
"""
Argument:
states: (batch, nchunks, nheads, dim)
dA_chunk_cumsum: (batch, nheads, nchunks)
initial_states: (batch, nheads, dim)
Return:
out: (batch, nchunks, nheads, dim)
final_states: (batch, nheads, dim)
"""
return StatePassingFn.apply(states, dA_chunk_cumsum, initial_states)
def state_passing_ref(states, dA_chunk_cumsum, initial_states=None):
"""
Argument:
states: (batch, nchunks, nheads, dim)
dA_chunk_cumsum: (batch, nheads, nchunks)
initial_states: (batch, nheads, dim)
Return:
out: (batch, nchunks, nheads, dim)
final_states: (batch, nheads, dim)
"""
if initial_states is None:
initial_states = torch.zeros_like(states[:, 0])
states = torch.cat([rearrange(initial_states, "b h d -> b 1 h d"), states], dim=1)
dA_chunk_cumsum = F.pad(dA_chunk_cumsum, (1, 0))
dA_chunk_cumsum = torch.cumsum(dA_chunk_cumsum, dim=-1)
nchunks = dA_chunk_cumsum.shape[-1]
# (batch, nheads, nchunks, nchunks)
dt_chunk_segment_sum = dA_chunk_cumsum[:, :, :, None] - dA_chunk_cumsum[:, :, None, :]
# (batch, nheads, nchunks, nchunks)
decay_chunk = torch.exp(dt_chunk_segment_sum)
causal_mask = torch.tril(torch.ones(nchunks, nchunks, device=states.device, dtype=bool), diagonal=0)
decay_chunk = decay_chunk.masked_fill(~causal_mask, 0)
out = torch.einsum("bhzc,bchd->bzhd", decay_chunk.to(dtype=states.dtype), states)
return out[:, :-1], out[:, -1]
# Copyright (c) 2023, Albert Gu, Tri Dao.
import gc
import time
from collections import namedtuple
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, Optional, Sequence, Union
import torch
import torch.nn.functional as F
from einops import rearrange, repeat
from torch import Tensor
from torch.profiler import ProfilerActivity, profile, record_function
from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput, TextStreamer
@dataclass
class InferenceParams:
"""Inference parameters that are passed to the main model in order
to efficienly calculate and store the context during inference."""
max_seqlen: int
max_batch_size: int
seqlen_offset: int = 0
batch_size_offset: int = 0
key_value_memory_dict: dict = field(default_factory=dict)
lengths_per_sample: Optional[Tensor] = None
def reset(self, max_seqlen, max_batch_size):
self.max_seqlen = max_seqlen
self.max_batch_size = max_batch_size
self.seqlen_offset = 0
if self.lengths_per_sample is not None:
self.lengths_per_sample.zero_()
def modify_logits_for_min_p_filtering(logits, min_p):
"""Set the logits for none min_p values to -inf. Done in-place."""
if min_p <= 0.0 or min_p >= 1.0:
return
indices_to_remove = logits < min_p
logits.masked_fill_(indices_to_remove, float("-Inf"))
# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py
# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231
def modify_logits_for_top_k_filtering(logits, top_k):
"""Set the logits for none top-k values to -inf. Done in-place."""
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits.masked_fill_(indices_to_remove, float("-Inf"))
# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py
# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170
def modify_logits_for_top_p_filtering(logits, top_p):
"""Set the logits for none top-p values to -inf. Done in-place."""
if top_p <= 0.0 or top_p >= 1.0:
return
# First sort and calculate cumulative sum of probabilities.
sorted_logits, sorted_indices = torch.sort(logits, descending=False)
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs <= (1 - top_p)
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(
1, sorted_indices, sorted_indices_to_remove
)
logits.masked_fill_(indices_to_remove, float("-inf"))
def modify_logit_for_repetition_penalty(logits, prev_output_tokens, repetition_penalty=1.0):
"""Apply repetition penalty. See https://arxiv.org/abs/1909.05858
logits: (batch_size, vocab_size)
prev_output_tokens: (batch_size, seq_len)
"""
if repetition_penalty == 1.0:
return logits
score = torch.gather(logits, 1, prev_output_tokens)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * repetition_penalty, score / repetition_penalty)
logits.scatter_(1, prev_output_tokens, score)
return logits
def sample(logits, top_k=1, top_p=0.0, min_p=0.0, temperature=1.0):
"""Sample from top-k logits.
Arguments:
logits: Tensor of shape (batch_size, vocab_size)
"""
if top_k == 1: # Short-circuit for greedy decoding
return logits.argmax(dim=-1)
else:
if top_p > 0.0:
assert top_p <= 1.0, "top-p should be in (0, 1]."
if top_k > 0:
top_k = min(top_k, logits.size(-1)) # Safety check
logits_top, indices = torch.topk(logits, top_k, dim=-1)
if temperature != 1.0:
logits_top /= temperature
modify_logits_for_top_p_filtering(logits_top, top_p)
return indices[
torch.arange(indices.shape[0], device=indices.device),
torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1),
]
else:
if min_p > 0.0:
logits_top = logits.clone()
max_prob = logits_top[..., 0].item()
min_prob = max_prob * min_p
modify_logits_for_min_p_filtering(logits_top, min_prob)
if temperature != 1.0:
logits_top /= temperature
return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1)
# Clone so that when we modify for top_p we don't change the original logits
logits_top = logits / temperature if temperature != 1.0 else logits.clone()
modify_logits_for_top_p_filtering(logits_top, top_p)
return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(
dim=-1
)
@torch.inference_mode()
def decode(
input_ids,
model,
max_length,
top_k=1,
top_p=0.0,
min_p=0.0,
temperature=1.0,
repetition_penalty=1.0,
eos_token_id=None,
teacher_outputs=None,
vocab_size=None,
cg=False,
enable_timing=False,
streamer: Optional[TextStreamer] = None
):
"""Decoding, either greedy or with top-k or top-p sampling.
If top-k = 0, don't limit the number of candidates (pure sampling).
Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first,
then top-p.
We assume that all sequences in the same batch have the same length.
Arguments:
input_ids: (batch, seq_len)
max_length: int
teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the
logits, the next token is taken from the teacher_outputs. Useful for testing.
Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields:
sequences: (batch, max_length)
scores: tuples of (batch, vocab_size)
"""
if streamer is not None:
streamer.put(input_ids.cpu())
batch_size, seqlen_og = input_ids.shape
teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0
if cg:
if not hasattr(model, "_decoding_cache"):
model._decoding_cache = None
model._decoding_cache = update_graph_cache(
model,
model._decoding_cache,
batch_size,
seqlen_og,
max_length,
)
inference_params = model._decoding_cache.inference_params
inference_params.reset(max_length, batch_size)
else:
inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size)
def get_logits(input_ids, inference_params):
decoding = inference_params.seqlen_offset > 0
if decoding:
position_ids = torch.full(
(batch_size, 1),
inference_params.seqlen_offset,
dtype=torch.long,
device=input_ids.device,
)
else:
position_ids = None
if not cg or not decoding:
logits = model(
input_ids,
position_ids=position_ids,
inference_params=inference_params,
num_last_tokens=1,
).logits.squeeze(dim=1)
else:
logits = model._decoding_cache.run(
input_ids, position_ids, inference_params.seqlen_offset
).squeeze(dim=1)
return logits[..., :vocab_size] if vocab_size is not None else logits
def sample_tokens(logits, inference_params):
if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset:
token = sample(logits, top_k=top_k, top_p=top_p, min_p=min_p, temperature=temperature)
else:
token = teacher_outputs[:, inference_params.seqlen_offset]
# return rearrange(token, "b -> b 1")
return token.unsqueeze(1)
def should_stop(current_token, inference_params):
if inference_params.seqlen_offset == 0:
return False
if eos_token_id is not None and (current_token == eos_token_id).all():
return True
if inference_params.seqlen_offset >= max_length - 1:
return True
return False
start = torch.cuda.Event(enable_timing=enable_timing)
end = torch.cuda.Event(enable_timing=enable_timing)
if enable_timing:
start.record()
scores, sequences = [], [input_ids]
sequences_cat = input_ids
while not should_stop(sequences[-1], inference_params):
scores.append(get_logits(sequences[-1], inference_params))
inference_params.seqlen_offset += sequences[-1].shape[1]
if repetition_penalty == 1.0:
sampled_tokens = sample_tokens(scores[-1], inference_params)
else:
logits = modify_logit_for_repetition_penalty(
scores[-1].clone(), sequences_cat, repetition_penalty
)
sampled_tokens = sample_tokens(logits, inference_params)
sequences_cat = torch.cat([sequences_cat, sampled_tokens], dim=1)
sequences.append(sampled_tokens)
if streamer is not None:
streamer.put(sampled_tokens.cpu())
if streamer is not None:
streamer.end()
if enable_timing:
end.record()
torch.cuda.synchronize()
print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms")
output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput
return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores))
class GenerationMixin:
def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
raise NotImplementedError
def generate(
self,
input_ids,
max_length,
top_k=1,
top_p=0.0,
min_p=0.0,
temperature=1.0,
return_dict_in_generate=False,
output_scores=False,
**kwargs,
):
output = decode(
input_ids, self, max_length, top_k=top_k, top_p=top_p, min_p = min_p, temperature=temperature, **kwargs
)
if not output_scores:
output.scores = None
return output if return_dict_in_generate else output.sequences
@dataclass
class DecodingCGCache:
max_batch_size: int = 0
max_seqlen: int = 0
device = None
dtype = None
callables: dict = field(default_factory=dict)
mempool = None
inference_params: Optional[InferenceParams] = None
run: Optional[Callable] = None
@torch.inference_mode()
def update_graph_cache(
model,
cache,
batch_size,
seqlen_og,
max_seqlen,
decoding_seqlens=(1,),
dtype=None,
n_warmups=2,
):
if cache is None:
cache = DecodingCGCache()
param_example = next(iter(model.parameters()))
device = param_example.device
if dtype is None:
dtype = param_example.dtype
if (
(device, dtype) != (cache.device, cache.dtype)
or batch_size > cache.max_batch_size
or max_seqlen > cache.max_seqlen
): # Invalidate the cache
cache.callables = {}
cache.mempool = None
cache.inference_params = None
gc.collect()
cache.device, cache.dtype = device, dtype
cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen
assert hasattr(model, "allocate_inference_cache"), "CUDA graph decoding requires that the model has a method allocate_inference_cache"
inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype)
lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device)
cache.inference_params = InferenceParams(
max_seqlen=max_seqlen,
max_batch_size=batch_size,
seqlen_offset=seqlen_og,
key_value_memory_dict=inf_cache,
lengths_per_sample=lengths_per_sample,
)
cache.mempool = torch.cuda.graphs.graph_pool_handle()
for decoding_seqlen in decoding_seqlens:
if (batch_size, decoding_seqlen) not in cache.callables:
cache.callables[batch_size, decoding_seqlen] = capture_graph(
model,
cache.inference_params,
batch_size,
max_seqlen,
decoding_seqlen=decoding_seqlen,
mempool=cache.mempool,
n_warmups=n_warmups,
)
def dispatch(input_ids, position_ids, seqlen):
batch_size, decoding_seqlen = input_ids.shape[:2]
return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen)
cache.run = dispatch
cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing
return cache
def capture_graph(
model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2
):
device = next(iter(model.parameters())).device
input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device)
position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device)
seqlen_offset_og = inference_params.seqlen_offset
inference_params.seqlen_offset = max_seqlen - decoding_seqlen
inference_params.lengths_per_sample[:] = inference_params.seqlen_offset
# Warmup before capture
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(n_warmups):
logits = model(
input_ids,
position_ids=position_ids,
inference_params=inference_params,
num_last_tokens=decoding_seqlen,
).logits
s.synchronize()
# This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0,
# which requires that graph launch and non-captured launch to not overlap (I think,
# that's how I interpret the documentation). I'm not sure if this is required.
if torch.distributed.is_initialized():
torch.distributed.barrier()
torch.cuda.current_stream().wait_stream(s)
# Captures the graph
# To allow capture, automatically sets a side stream as the current stream in the context
graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(graph, pool=mempool):
logits = model(
input_ids,
position_ids=position_ids,
inference_params=inference_params,
num_last_tokens=decoding_seqlen,
).logits
def run(new_input_ids, new_position_ids, seqlen):
inference_params.lengths_per_sample[:] = seqlen
input_ids.copy_(new_input_ids)
position_ids.copy_(new_position_ids)
graph.replay()
return logits.clone()
inference_params.seqlen_offset = seqlen_offset_og
return run
import json
import torch
from transformers.utils import WEIGHTS_NAME, CONFIG_NAME
from transformers.utils.hub import cached_file
def load_config_hf(model_name):
resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False)
return json.load(open(resolved_archive_file))
def load_state_dict_hf(model_name, device=None, dtype=None):
# If not fp32, then we don't want to load directly to the GPU
mapped_device = "cpu" if dtype not in [torch.float32, None] else device
resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False)
return torch.load(resolved_archive_file, map_location=mapped_device)
# Convert dtype before moving to GPU to save memory
if dtype is not None:
state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()}
state_dict = {k: v.to(device=device) for k, v in state_dict.items()}
return state_dict
# 模型唯一标识
modelCode=943
# 模型名称
modelName=mamba2_pytorch
# 模型描述
modelDescription=Transformers are SSMs: Generalized Models and Efficient Algorithms Through Structured State Space Duality
# 应用场景
appScenario=推理,科研,制造,医疗,家居,教育
# 框架类型
frameType=pytorch
[project]
name = "mamba_ssm"
description = "Mamba state-space model"
readme = "README.md"
authors = [
{ name = "Tri Dao", email = "tri@tridao.me" },
{ name = "Albert Gu", email = "agu@cs.cmu.edu" }
]
requires-python = ">= 3.7"
dynamic = ["version"]
license = { file = "LICENSE" } # Include a LICENSE file in your repo
keywords = ["cuda", "pytorch", "state-space model"]
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: Unix"
]
dependencies = [
"ninja",
"einops",
"triton",
"transformers",
"packaging",
"setuptools>=61.0.0",
]
urls = { name = "Repository", url = "https://github.com/state-spaces/mamba"}
[project.optional-dependencies]
causal-conv1d = [
"causal-conv1d>=1.2.0"
]
dev = [
"pytest"
]
[build-system]
requires = [
"setuptools>=61.0.0",
"wheel",
"packaging",
"ninja",
]
build-backend = "setuptools.build_meta"
--- /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h 2023-12-12 20:11:48.000000000 +0000
+++ rocm_update_files/amd_hip_bf16.h 2024-05-20 17:40:26.983349079 +0000
@@ -137,7 +137,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT16_CONV
* \brief Converts float to bfloat16
*/
-__HOST_DEVICE__ __hip_bfloat16 __float2bfloat16(float f) {
+__HOST_DEVICE__ static inline __hip_bfloat16 __float2bfloat16(float f) {
__hip_bfloat16 ret;
union {
float fp32;
@@ -181,7 +181,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
* \brief Converts and moves bfloat162 to float2
*/
-__HOST_DEVICE__ float2 __bfloat1622float2(const __hip_bfloat162 a) {
+__HOST_DEVICE__ static inline float2 __bfloat1622float2(const __hip_bfloat162 a) {
return float2{__bfloat162float(a.x), __bfloat162float(a.y)};
}
@@ -209,7 +209,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
* \brief Convert double to __hip_bfloat16
*/
-__HOST_DEVICE__ __hip_bfloat16 __double2bfloat16(const double a) {
+__HOST_DEVICE__ static inline __hip_bfloat16 __double2bfloat16(const double a) {
return __float2bfloat16((float)a);
}
@@ -217,7 +217,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
* \brief Convert float2 to __hip_bfloat162
*/
-__HOST_DEVICE__ __hip_bfloat162 __float22bfloat162_rn(const float2 a) {
+__HOST_DEVICE__ static inline __hip_bfloat162 __float22bfloat162_rn(const float2 a) {
return __hip_bfloat162{__float2bfloat16(a.x), __float2bfloat16(a.y)};
}
@@ -247,7 +247,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
* \brief Converts high 16 bits of __hip_bfloat162 to float and returns the result
*/
-__HOST_DEVICE__ float __high2float(const __hip_bfloat162 a) { return __bfloat162float(a.y); }
+__HOST_DEVICE__ static inline float __high2float(const __hip_bfloat162 a) { return __bfloat162float(a.y); }
/**
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
@@ -275,7 +275,7 @@
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
* \brief Converts low 16 bits of __hip_bfloat162 to float and returns the result
*/
-__HOST_DEVICE__ float __low2float(const __hip_bfloat162 a) { return __bfloat162float(a.x); }
+__HOST_DEVICE__ static inline float __low2float(const __hip_bfloat162 a) { return __bfloat162float(a.x); }
/**
* \ingroup HIP_INTRINSIC_BFLOAT162_CONV
# Copyright (c) 2023, Albert Gu, Tri Dao.
import sys
import warnings
import os
import re
import ast
from pathlib import Path
from packaging.version import parse, Version
import platform
import shutil
from setuptools import setup, find_packages
import subprocess
import urllib.request
import urllib.error
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
import torch
from torch.utils.cpp_extension import (
BuildExtension,
CUDAExtension,
CUDA_HOME,
HIP_HOME
)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
PACKAGE_NAME = "mamba_ssm"
BASE_WHEEL_URL = "https://github.com/state-spaces/mamba/releases/download/{tag_name}/{wheel_name}"
# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels
# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation
FORCE_BUILD = os.getenv("MAMBA_FORCE_BUILD", "FALSE") == "TRUE"
SKIP_CUDA_BUILD = os.getenv("MAMBA_SKIP_CUDA_BUILD", "FALSE") == "TRUE"
# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI
FORCE_CXX11_ABI = os.getenv("MAMBA_FORCE_CXX11_ABI", "FALSE") == "TRUE"
def get_platform():
"""
Returns the platform name as used in wheel filenames.
"""
if sys.platform.startswith("linux"):
return "linux_x86_64"
elif sys.platform == "darwin":
mac_version = ".".join(platform.mac_ver()[0].split(".")[:2])
return f"macosx_{mac_version}_x86_64"
elif sys.platform == "win32":
return "win_amd64"
else:
raise ValueError("Unsupported platform: {}".format(sys.platform))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output(
[cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True
)
output = raw_output.split()
release_idx = output.index("release") + 1
bare_metal_ver = parse(output[release_idx].split(",")[0])
return raw_output, bare_metal_ver
def get_hip_version(rocm_dir):
hipcc_bin = "hipcc" if rocm_dir is None else os.path.join(rocm_dir, "bin", "hipcc")
try:
raw_output = subprocess.check_output(
[hipcc_bin, "--version"], universal_newlines=True
)
except Exception as e:
print(
f"hip installation not found: {e} ROCM_PATH={os.environ.get('ROCM_PATH')}"
)
return None, None
for line in raw_output.split("\n"):
if "HIP version" in line:
rocm_version = parse(line.split()[-1].rstrip('-').replace('-', '+')) # local version is not parsed correctly
return line, rocm_version
return None, None
def get_torch_hip_version():
if torch.version.hip:
return parse(torch.version.hip.split()[-1].rstrip('-').replace('-', '+'))
else:
return None
def check_if_hip_home_none(global_option: str) -> None:
if HIP_HOME is not None:
return
# warn instead of error because user could be downloading prebuilt wheels, so hipcc won't be necessary
# in that case.
warnings.warn(
f"{global_option} was requested, but hipcc was not found. Are you sure your environment has hipcc available?"
)
def check_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
# warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary
# in that case.
warnings.warn(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
return nvcc_extra_args + ["--threads", "4"]
cmdclass = {}
ext_modules = []
HIP_BUILD = bool(torch.version.hip)
if not SKIP_CUDA_BUILD:
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cc_flag = []
if HIP_BUILD:
check_if_hip_home_none(PACKAGE_NAME)
rocm_home = os.getenv("ROCM_PATH")
_, hip_version = get_hip_version(rocm_home)
if HIP_HOME is not None:
if hip_version < Version("6.0"):
raise RuntimeError(
f"{PACKAGE_NAME} is only supported on ROCm 6.0 and above. "
"Note: make sure HIP has a supported version by running hipcc --version."
)
if hip_version == Version("6.0"):
warnings.warn(
f"{PACKAGE_NAME} requires a patch to be applied when running on ROCm 6.0. "
"Refer to the README.md for detailed instructions.",
UserWarning
)
cc_flag.append("-DBUILD_PYTHON_PACKAGE")
else:
check_if_cuda_home_none(PACKAGE_NAME)
# Check, if CUDA11 is installed for compute capability 8.0
if CUDA_HOME is not None:
_, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME)
if bare_metal_version < Version("11.6"):
raise RuntimeError(
f"{PACKAGE_NAME} is only supported on CUDA 11.6 and above. "
"Note: make sure nvcc has a supported version by running nvcc -V."
)
cc_flag.append("-gencode")
cc_flag.append("arch=compute_53,code=sm_53")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_62,code=sm_62")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_72,code=sm_72")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_87,code=sm_87")
if bare_metal_version >= Version("11.8"):
cc_flag.append("-gencode")
cc_flag.append("arch=compute_90,code=sm_90")
# HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as
# torch._C._GLIBCXX_USE_CXX11_ABI
# https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920
if FORCE_CXX11_ABI:
torch._C._GLIBCXX_USE_CXX11_ABI = True
if HIP_BUILD:
extra_compile_args = {
"cxx": ["-O3", "-std=c++17"],
"nvcc": [
"-O3",
"-std=c++17",
f"--offload-arch={os.getenv('HIP_ARCHITECTURES', 'native')}",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-fgpu-flush-denormals-to-zero",
]
+ cc_flag,
}
else:
extra_compile_args = {
"cxx": ["-O3", "-std=c++17"],
"nvcc": append_nvcc_threads(
[
"-O3",
"-std=c++17",
"-U__CUDA_NO_HALF_OPERATORS__",
"-U__CUDA_NO_HALF_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT16_OPERATORS__",
"-U__CUDA_NO_BFLOAT16_CONVERSIONS__",
"-U__CUDA_NO_BFLOAT162_OPERATORS__",
"-U__CUDA_NO_BFLOAT162_CONVERSIONS__",
"--expt-relaxed-constexpr",
"--expt-extended-lambda",
"--use_fast_math",
"--ptxas-options=-v",
"-lineinfo",
]
+ cc_flag
),
}
ext_modules.append(
CUDAExtension(
name="selective_scan_cuda",
sources=[
"csrc/selective_scan/selective_scan.cpp",
"csrc/selective_scan/selective_scan_fwd_fp32.cu",
"csrc/selective_scan/selective_scan_fwd_fp16.cu",
"csrc/selective_scan/selective_scan_fwd_bf16.cu",
"csrc/selective_scan/selective_scan_bwd_fp32_real.cu",
"csrc/selective_scan/selective_scan_bwd_fp32_complex.cu",
"csrc/selective_scan/selective_scan_bwd_fp16_real.cu",
"csrc/selective_scan/selective_scan_bwd_fp16_complex.cu",
"csrc/selective_scan/selective_scan_bwd_bf16_real.cu",
"csrc/selective_scan/selective_scan_bwd_bf16_complex.cu",
],
extra_compile_args=extra_compile_args,
include_dirs=[Path(this_dir) / "csrc" / "selective_scan"],
)
)
def get_package_version():
with open(Path(this_dir) / PACKAGE_NAME / "__init__.py", "r") as f:
version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE)
public_version = ast.literal_eval(version_match.group(1))
local_version = os.environ.get("MAMBA_LOCAL_VERSION")
if local_version:
return f"{public_version}+{local_version}"
else:
return str(public_version)
def get_wheel_url():
# Determine the version numbers that will be used to determine the correct wheel
torch_version_raw = parse(torch.__version__)
if HIP_BUILD:
# We're using the HIP version used to build torch, not the one currently installed
torch_hip_version = get_torch_hip_version()
hip_ver = f"{torch_hip_version.major}{torch_hip_version.minor}"
else:
# We're using the CUDA version used to build torch, not the one currently installed
# _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME)
torch_cuda_version = parse(torch.version.cuda)
# For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2
# to save CI time. Minor versions should be compatible.
torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2")
cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}"
gpu_compute_version = hip_ver if HIP_BUILD else cuda_version
cuda_or_hip = "hip" if HIP_BUILD else "cu"
python_version = f"cp{sys.version_info.major}{sys.version_info.minor}"
platform_name = get_platform()
mamba_ssm_version = get_package_version()
torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}"
cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper()
# Determine wheel URL based on CUDA version, torch version, python version and OS
wheel_filename = f"{PACKAGE_NAME}-{mamba_ssm_version}+{cuda_or_hip}{gpu_compute_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl"
wheel_url = BASE_WHEEL_URL.format(
tag_name=f"v{mamba_ssm_version}", wheel_name=wheel_filename
)
return wheel_url, wheel_filename
class CachedWheelsCommand(_bdist_wheel):
"""
The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot
find an existing wheel (which is currently the case for all installs). We use
the environment parameters to detect whether there is already a pre-built version of a compatible
wheel available and short-circuits the standard full build pipeline.
"""
def run(self):
if FORCE_BUILD:
return super().run()
wheel_url, wheel_filename = get_wheel_url()
print("Guessing wheel URL: ", wheel_url)
try:
urllib.request.urlretrieve(wheel_url, wheel_filename)
# Make the archive
# Lifted from the root wheel processing command
# https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}"
wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl")
print("Raw wheel path", wheel_path)
shutil.move(wheel_filename, wheel_path)
except urllib.error.HTTPError:
print("Precompiled wheel not found. Building from source...")
# If the wheel could not be downloaded, build from source
super().run()
setup(
name=PACKAGE_NAME,
version=get_package_version(),
packages=find_packages(
exclude=(
"build",
"csrc",
"include",
"tests",
"dist",
"docs",
"benchmarks",
"mamba_ssm.egg-info",
)
),
long_description=long_description,
long_description_content_type="text/markdown",
ext_modules=ext_modules,
cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension}
if ext_modules
else {
"bdist_wheel": CachedWheelsCommand,
}
)
# Copyright (C) 2023, Tri Dao.
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, selective_scan_ref
from mamba_ssm.ops.selective_scan_interface import mamba_inner_fn, mamba_inner_ref
# @pytest.mark.parametrize('wtype', [torch.float32, torch.complex64])
@pytest.mark.parametrize('wtype', [torch.float32])
# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize('itype', [torch.float32])
# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096])
@pytest.mark.parametrize('seqlen', [128, 256, 512, 1024, 2048, 4096])
# @pytest.mark.parametrize('seqlen', [128])
# @pytest.mark.parametrize("return_last_state", [False, True])
@pytest.mark.parametrize("return_last_state", [True])
# @pytest.mark.parametrize('has_delta_bias', [False, True])
@pytest.mark.parametrize('has_delta_bias', [True])
# @pytest.mark.parametrize('delta_softplus', [False, True])
@pytest.mark.parametrize('delta_softplus', [True])
# @pytest.mark.parametrize('has_z', [False, True])
@pytest.mark.parametrize('has_z', [True])
# @pytest.mark.parametrize('has_D', [False, True])
@pytest.mark.parametrize('has_D', [True])
@pytest.mark.parametrize("varBC_groups", [1, 2])
# @pytest.mark.parametrize("varBC_groups", [1])
# @pytest.mark.parametrize("is_variable_C", [False, True])
@pytest.mark.parametrize("is_variable_C", [True])
# @pytest.mark.parametrize("is_variable_B", [False, True])
@pytest.mark.parametrize("is_variable_B", [True])
def test_selective_scan(is_variable_B, is_variable_C, varBC_groups, has_D, has_z, has_delta_bias,
delta_softplus, return_last_state, seqlen, itype, wtype):
if varBC_groups > 1 and (not is_variable_B or not is_variable_C):
pytest.skip() # This config is not applicable
device = 'cuda'
rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3)
if itype == torch.bfloat16:
rtol, atol = 3e-2, 5e-2
rtolw, atolw = (1e-3, 1e-3)
if has_z: # If we have z, the errors on the weights seem higher
rtolw = max(rtolw, rtol)
atolw = max(atolw, atol)
# set seed
torch.random.manual_seed(0)
batch_size = 2
dim = 4
dstate = 8
is_complex = wtype == torch.complex64
A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_()
if not is_variable_B:
B_shape = (dim, dstate)
elif varBC_groups == 1:
B_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2)
else:
B_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2)
B = torch.randn(*B_shape, device=device, dtype=wtype if not is_variable_B else itype,
requires_grad=True)
if not is_variable_C:
C_shape = (dim, dstate)
elif varBC_groups == 1:
C_shape = (batch_size, dstate, seqlen if not is_complex else seqlen * 2)
else:
C_shape = (batch_size, varBC_groups, dstate, seqlen if not is_complex else seqlen * 2)
C = torch.randn(*C_shape, device=device, dtype=wtype if not is_variable_C else itype,
requires_grad=True)
if has_D:
D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
else:
D = None
if has_z:
z = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True)
else:
z = None
if has_delta_bias:
delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_()
else:
delta_bias = None
u = torch.randn(batch_size, dim, seqlen, device=device, dtype=itype, requires_grad=True)
delta = (0.5 * torch.rand(batch_size, dim, seqlen, device=device, dtype=itype)).requires_grad_()
A_ref = A.detach().clone().requires_grad_()
B_ref = B.detach().clone().requires_grad_()
C_ref = C.detach().clone().requires_grad_()
D_ref = D.detach().clone().requires_grad_() if D is not None else None
z_ref = z.detach().clone().requires_grad_() if z is not None else None
u_ref = u.detach().clone().requires_grad_()
delta_ref = delta.detach().clone().requires_grad_()
delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None
out, *rest = selective_scan_fn(
u, delta, A, B, C, D, z=z,
delta_bias=delta_bias, delta_softplus=delta_softplus,
return_last_state=return_last_state
)
if return_last_state:
state = rest[0]
out_ref, *rest = selective_scan_ref(
u_ref, delta_ref, A_ref, B_ref, C_ref, D_ref, z=z_ref,
delta_bias=delta_bias_ref, delta_softplus=delta_softplus,
return_last_state=return_last_state
)
if return_last_state:
state_ref = rest[0]
# dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A))
# dt_u = delta * u
print(f'Output max diff: {(out - out_ref).abs().max().item()}')
print(f'Output mean diff: {(out - out_ref).abs().mean().item()}')
assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
if return_last_state:
print(f'State max diff: {(state - state_ref).abs().max().item()}')
assert torch.allclose(state, state_ref, rtol=rtol, atol=atol)
g = torch.randn_like(out)
out_ref.backward(g)
out.backward(g)
print(f'du max diff: {(u.grad - u_ref.grad).abs().max().item()}')
print(f'ddelta max diff: {(delta.grad - delta_ref.grad).abs().max().item()}')
print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}')
print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}')
print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}')
if has_D:
print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}')
if has_z:
print(f'dz max diff: {(z.grad - z_ref.grad).abs().max().item()}')
if has_delta_bias:
print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}')
assert torch.allclose(u.grad, u_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2)
assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10)
assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5)
assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol,
atol=atolw if not is_variable_B else atol)
assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol,
atol=atolw if not is_variable_C else atol)
if has_D:
assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw)
if has_z:
assert torch.allclose(z.grad, z_ref.grad, rtol=rtolw, atol=atolw)
if has_delta_bias:
assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw)
@pytest.mark.parametrize('wtype', [torch.float32, torch.complex64])
# @pytest.mark.parametrize('wtype', [torch.complex64])
# @pytest.mark.parametrize('itype', [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize('itype', [torch.float32])
# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 372, 512, 784, 1024, 1134, 2048, 4096])
@pytest.mark.parametrize('seqlen', [128])
@pytest.mark.parametrize("is_variable_C", [False, True])
# @pytest.mark.parametrize("is_variable_C", [False])
@pytest.mark.parametrize("is_variable_B", [False, True])
# @pytest.mark.parametrize("is_variable_B", [True])
def test_mamba_inner_fn(is_variable_B, is_variable_C, seqlen, itype, wtype):
device = 'cuda'
rtol, atol = (6e-4, 2e-3) if itype == torch.float32 else (3e-3, 5e-3)
if itype == torch.bfloat16:
rtol, atol = 3e-2, 5e-2
rtolw, atolw = (1e-3, 1e-3)
# If we have z, the errors on the weights seem higher
rtolw = max(rtolw, rtol)
atolw = max(atolw, atol)
# set seed
torch.random.manual_seed(0)
batch_size = 2
dim = 768
dstate = 8
dt_rank = 48
is_complex = wtype == torch.complex64
xz = torch.randn(batch_size, 2 * dim, seqlen, device=device, dtype=itype, requires_grad=True)
conv1d_weight = torch.randn(dim, 1, 3, device=device, dtype=torch.float32, requires_grad=True)
conv1d_bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
x_proj_weight = torch.randn(dt_rank + (bool(is_variable_B) + bool(is_variable_C)) * dstate
* (1 if not is_complex else 2),
dim, device=device, dtype=itype, requires_grad=True)
delta_proj_weight = torch.randn(dim, dt_rank, device=device, dtype=itype, requires_grad=True)
out_proj_weight = torch.randn(dim // 2, dim, device=device, dtype=itype, requires_grad=True)
out_proj_bias = None
A = (-0.5 * torch.rand(dim, dstate, device=device, dtype=wtype)).requires_grad_()
B = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True)
if not is_variable_B else None)
C = (torch.randn(dim, dstate, device=device, dtype=wtype, requires_grad=True)
if not is_variable_C else None)
D = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True)
delta_bias = (0.5 * torch.rand(dim, device=device, dtype=torch.float32)).requires_grad_()
B_proj_bias = None
C_proj_bias = None
xz_ref = xz.detach().clone().requires_grad_()
conv1d_weight_ref = conv1d_weight.detach().clone().requires_grad_()
conv1d_bias_ref = conv1d_bias.detach().clone().requires_grad_()
x_proj_weight_ref = x_proj_weight.detach().clone().requires_grad_()
delta_proj_weight_ref = delta_proj_weight.detach().clone().requires_grad_()
out_proj_weight_ref = out_proj_weight.detach().clone().requires_grad_()
out_proj_bias_ref = (out_proj_bias.detach().clone().requires_grad_()
if out_proj_bias is not None else None)
A_ref = A.detach().clone().requires_grad_()
B_ref = B.detach().clone().requires_grad_() if B is not None else None
C_ref = C.detach().clone().requires_grad_() if C is not None else None
D_ref = D.detach().clone().requires_grad_()
delta_bias_ref = delta_bias.detach().clone().requires_grad_() if delta_bias is not None else None
out = mamba_inner_fn(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight,
out_proj_weight, out_proj_bias,
A, B, C, D, delta_bias=delta_bias, delta_softplus=True)
out_ref = mamba_inner_ref(xz_ref, conv1d_weight_ref, conv1d_bias_ref, x_proj_weight_ref,
delta_proj_weight_ref, out_proj_weight_ref, out_proj_bias_ref,
A_ref, B_ref, C_ref, D_ref,
delta_bias=delta_bias_ref, delta_softplus=True)
# dA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A))
# dt_u = delta * u
print(f'Output max diff: {(out - out_ref).abs().max().item()}')
print(f'Output mean diff: {(out - out_ref).abs().mean().item()}')
assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
g = torch.randn_like(out)
out_ref.backward(g)
out.backward(g)
print(f'dxz max diff: {(xz.grad - xz_ref.grad).abs().max().item()}')
print(f'dA max diff: {(A.grad - A_ref.grad).abs().max().item()}')
if not is_variable_B:
print(f'dB max diff: {(B.grad - B_ref.grad).abs().max().item()}')
if not is_variable_C:
print(f'dC max diff: {(C.grad - C_ref.grad).abs().max().item()}')
print(f'dD max diff: {(D.grad - D_ref.grad).abs().max().item()}')
print(f'ddelta_bias max diff: {(delta_bias.grad - delta_bias_ref.grad).abs().max().item()}')
print(f'dout_proj_weight max diff: {(out_proj_weight.grad - out_proj_weight_ref.grad).abs().max().item()}')
print(f'ddelta_proj_weight max diff: {(delta_proj_weight.grad - delta_proj_weight_ref.grad).abs().max().item()}')
print(f'dx_proj_weight max diff: {(x_proj_weight.grad - x_proj_weight_ref.grad).abs().max().item()}')
print(f'dconv1d_weight max diff: {(conv1d_weight.grad - conv1d_weight_ref.grad).abs().max().item()}')
print(f'dconv1d_bias max diff: {(conv1d_bias.grad - conv1d_bias_ref.grad).abs().max().item()}')
# assert torch.allclose(xz.grad, xz_ref.grad.to(dtype=itype), rtol=rtol * 2, atol=atol * 2)
# assert torch.allclose(delta.grad, delta_ref.grad.to(dtype=itype), rtol=rtol * 5, atol=atol * 10)
# assert torch.allclose(A.grad, A_ref.grad, rtol=rtolw, atol=atolw * 5)
# assert torch.allclose(B.grad, B_ref.grad, rtol=rtolw if not is_variable_B else rtol,
# atol=atolw if not is_variable_B else atol)
# assert torch.allclose(C.grad, C_ref.grad, rtol=rtolw if not is_variable_C else rtol,
# atol=atolw if not is_variable_C else atol)
# assert torch.allclose(D.grad, D_ref.grad, rtol=rtolw, atol=atolw)
# assert torch.allclose(delta_bias.grad, delta_bias_ref.grad, rtol=rtolw, atol=atolw)
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from mamba_ssm.ops.triton.layernorm_gated import layernorm_fn, rms_norm_ref
@pytest.mark.parametrize("norm_before_gate", [True, False])
# @pytest.mark.parametrize("norm_before_gate", [False])
@pytest.mark.parametrize("has_group", [False, True])
# @pytest.mark.parametrize("has_group", [False])
@pytest.mark.parametrize("is_rms_norm", [False, True])
# @pytest.mark.parametrize("is_rms_norm", [True])
@pytest.mark.parametrize("has_z", [False, True])
# @pytest.mark.parametrize("has_z", [True])
@pytest.mark.parametrize("has_bias", [False, True])
# @pytest.mark.parametrize("has_bias", [False])
# @pytest.mark.parametrize('dtype', [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize('dtype', [torch.float16])
# @pytest.mark.parametrize("wtype", [torch.float32, torch.float16, torch.bfloat16])
@pytest.mark.parametrize("wtype", [torch.float32])
@pytest.mark.parametrize('d', [2048, 4096])
# @pytest.mark.parametrize('d', [4096])
def test_layer_norm_gated(d, dtype, wtype, has_bias, has_z, is_rms_norm, has_group, norm_before_gate):
if not has_z and not norm_before_gate:
pytest.skip()
if not norm_before_gate and not is_rms_norm: # Reference LN isn't implemented for this case yet
pytest.skip()
device = 'cuda'
rtol, atol = (1e-5, 1e-5) if dtype == torch.float32 else (1e-2, 8e-3)
group_size = None if not has_group else 64
# set seed
torch.random.manual_seed(0)
batch = 16
seqlen = 1024
x = torch.randn(batch, seqlen, d, dtype=dtype, device=device, requires_grad=True)
if has_z:
z = torch.randn(batch, seqlen, d, dtype=dtype, device=device, requires_grad=True)
else:
z = None
weight = torch.randn(d, dtype=wtype, device=device, requires_grad=True)
if has_bias:
bias = torch.randn(d, dtype=wtype, device=device, requires_grad=True)
else:
bias = None
x_ref = x.detach().clone().requires_grad_()
x_pt = x.detach().clone().requires_grad_()
z_ref = z.detach().clone().requires_grad_() if z is not None else None
z_pt = z.detach().clone().requires_grad_() if z is not None else None
weight_ref = weight.detach().clone().requires_grad_()
weight_pt = weight.detach().clone().requires_grad_()
bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None
bias_pt = bias.detach().clone().requires_grad_() if bias is not None else None
out = layernorm_fn(x, weight, bias, z=z, eps=1e-5, group_size=group_size, norm_before_gate=norm_before_gate,
is_rms_norm=is_rms_norm)
if not is_rms_norm:
if not has_group:
out_ref = F.layer_norm(x_ref.float(), (d,), weight=weight_ref.float(), bias=bias_ref.float() if bias_ref is not None else None, eps=1e-5)
out_pt = F.layer_norm(x_pt.to(wtype), (d,), weight=weight_pt, bias=bias_pt, eps=1e-5)
else:
out_ref = rearrange(F.layer_norm(rearrange(x_ref, "... (g d) -> ... g d", d=group_size).float(), (group_size,), eps=1e-5), "... g d -> ... (g d)") * weight_ref.float()
if has_bias:
out_ref = out_ref + bias_ref.float()
out_pt = rearrange(F.layer_norm(rearrange(x_pt, "... (g d) -> ... g d", d=group_size), (group_size,), eps=1e-5), "... g d -> ... (g d)") * weight_pt
if has_bias:
out_pt = out_pt + bias_pt
if has_z and norm_before_gate:
out_ref = out_ref * F.silu(z_ref.float())
out_pt = out_pt * F.silu(z_pt)
else:
out_ref = rms_norm_ref(x_ref, weight_ref, bias_ref, z=z_ref, eps=1e-5, group_size=group_size,
norm_before_gate=norm_before_gate)
out_pt = rms_norm_ref(x_pt, weight_pt, bias_pt, z=z_pt, eps=1e-5, group_size=group_size,
norm_before_gate=norm_before_gate, upcast=False)
print(f"Max diff = {(out - out_ref).abs().max().item()}")
print(f"Max diff Pytorch = {(out_pt - out_ref).abs().max().item()}")
assert (out - out_ref).abs().max().item() <= 2 * (out_pt - out_ref).abs().max().item() + atol
g = torch.randn_like(out)
out.backward(g)
out_ref.backward(g)
out_pt.backward(g)
print(f"Max dx diff = {(x.grad - x_ref.grad).abs().max().item()}")
print(f"Max dx diff Pytorch = {(x_pt.grad - x_ref.grad).abs().max().item()}")
if has_z:
print(f"Max dz diff = {(z.grad - z_ref.grad).abs().max().item()}")
print(f"Max dz diff Pytorch = {(z_pt.grad - z_ref.grad).abs().max().item()}")
print(f"Max dw diff = {(weight.grad - weight_ref.grad).abs().max().item()}")
print(f"Max dw diff Pytorch = {(weight_pt.grad - weight_ref.grad).abs().max().item()}")
if has_bias:
print(f"Max db diff = {(bias.grad - bias_ref.grad).abs().max().item()}")
print(f"Max db diff Pytorch = {(bias_pt.grad - bias_ref.grad).abs().max().item()}")
assert (x.grad - x_ref.grad).abs().max().item() <= 2 * (x_pt.grad - x_ref.grad).abs().max().item() + atol
if has_z:
assert (z.grad - z_ref.grad).abs().max().item() <= 2 * (z_pt.grad - z_ref.grad).abs().max().item() + atol
assert (weight.grad - weight_ref.grad).abs().max().item() <= 2 * (weight_pt.grad - weight_ref.grad).abs().max().item() + atol
if has_bias:
assert (bias.grad - bias_ref.grad).abs().max().item() <= 2 * (bias_pt.grad - bias_ref.grad).abs().max().item() + atol
# Copyright (C) 2023, Tri Dao.
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from mamba_ssm.ops.triton.selective_state_update import selective_state_update, selective_state_update_ref
@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
# @pytest.mark.parametrize('itype', [torch.float16])
@pytest.mark.parametrize("has_z", [False, True])
# @pytest.mark.parametrize('has_z', [True])
@pytest.mark.parametrize("dstate", [16, 32, 64])
# @pytest.mark.parametrize("dstate", [16])
@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096])
# @pytest.mark.parametrize("dim", [2048])
def test_selective_state_update(dim, dstate, has_z, itype):
device = "cuda"
rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2)
if itype == torch.bfloat16:
rtol, atol = 1e-2, 5e-2
if torch.version.hip:
atol *= 2
# set seed
torch.random.manual_seed(0)
batch_size = 2
state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device)
x = torch.randn(batch_size, dim, device=device, dtype=itype)
dt = torch.randn(batch_size, dim, device=device, dtype=itype)
dt_bias = torch.rand(dim, device=device) - 4.0
A = -torch.rand(dim, dstate, device=device) - 1.0
B = torch.randn(batch_size, dstate, device=device)
C = torch.randn(batch_size, dstate, device=device)
D = torch.randn(dim, device=device)
if has_z:
z = torch.randn_like(x)
else:
z = None
state_ref = state.detach().clone()
out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True)
out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
assert torch.allclose(state, state_ref, rtol=rtol, atol=atol)
assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16])
# @pytest.mark.parametrize('itype', [torch.float16])
@pytest.mark.parametrize("has_z", [False, True])
# @pytest.mark.parametrize('has_z', [True])
@pytest.mark.parametrize("tie_hdim", [False, True])
# @pytest.mark.parametrize('tie_hdim', [True])
@pytest.mark.parametrize("ngroups", [1, 2, 4])
# @pytest.mark.parametrize("ngroups", [2])
@pytest.mark.parametrize("dstate", [16, 32, 64])
# @pytest.mark.parametrize("dstate", [16])
@pytest.mark.parametrize("dim", [2048, 4096])
# @pytest.mark.parametrize("dim", [2048])
def test_selective_state_update_with_heads(dim, dstate, ngroups, has_z, tie_hdim, itype):
device = "cuda"
rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 3e-2)
if itype == torch.bfloat16:
rtol, atol = 1e-2, 1e-1
# set seed
torch.random.manual_seed(0)
batch_size = 2
headdim = 64
nheads = dim // headdim
state = torch.randn(batch_size, nheads, headdim, dstate, dtype=itype, device=device)
x = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype)
if not tie_hdim:
dt = torch.randn(batch_size, nheads, headdim, device=device, dtype=itype)
dt_bias = torch.rand(nheads, headdim, device=device) - 4.0
A = -torch.rand(nheads, headdim, dstate, device=device) - 1.0
D = torch.randn(nheads, headdim, device=device)
else:
dt = repeat(torch.randn(batch_size, nheads, device=device, dtype=itype), "b h -> b h p", p=headdim)
dt_bias = repeat(torch.rand(nheads, device=device) - 4.0, "h -> h p", p=headdim)
A = repeat(-torch.rand(nheads, device=device) - 1.0, "h -> h p n", p=headdim, n=dstate)
D = repeat(torch.randn(nheads, device=device), "h -> h p", p=headdim)
B = torch.randn(batch_size, ngroups, dstate, device=device)
C = torch.randn(batch_size, ngroups, dstate, device=device)
if has_z:
z = torch.randn_like(x)
else:
z = None
state_ref = state.detach().clone()
state_og = state.detach().clone()
out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True)
out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
assert torch.allclose(state, state_ref, rtol=rtol, atol=atol)
assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange, repeat
from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state, chunk_state_ref
from mamba_ssm.ops.triton.ssd_chunk_state import _chunk_cumsum_fwd, _chunk_state_fwd
from mamba_ssm.ops.triton.ssd_chunk_state import chunk_state_varlen
from mamba_ssm.ops.triton.ssd_state_passing import state_passing, state_passing_ref
from mamba_ssm.ops.triton.ssd_state_passing import _state_passing_fwd
from mamba_ssm.ops.triton.ssd_chunk_scan import chunk_scan, chunk_scan_ref
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_chunk_scan, ssd_chunk_scan_combined_ref, ssd_selective_scan
from mamba_ssm.ops.triton.ssd_combined import mamba_split_conv1d_scan_combined, mamba_split_conv1d_scan_ref
def detach_clone(*args):
return tuple([arg.detach().clone().requires_grad_() if arg is not None else None for arg in args])
@pytest.mark.parametrize('dtype', [torch.float32, torch.float16, torch.bfloat16])
# @pytest.mark.parametrize('dtype', [torch.bfloat16])
@pytest.mark.parametrize('ngroups', [1, 2, 8, "max"])
# @pytest.mark.parametrize('ngroups', [1])
@pytest.mark.parametrize('chunk_size', [64, 128])
# @pytest.mark.parametrize('chunk_size', [128])
def test_chunk_state_varlen(chunk_size, ngroups, dtype):
device = 'cuda'
rtol, atol = (1e-2, 3e-3)
# set seed
torch.random.manual_seed(chunk_size + (ngroups if ngroups != "max" else 64))
batch = 300
seqlens = torch.randint(1, 200, (batch,), device=device)
# batch = 3
# seqlens = torch.tensor([201, 56, 5], device=device)
cu_seqlens = F.pad(seqlens.cumsum(0), (1, 0))
total_seqlen = seqlens.sum().item()
seq_idx = torch.cat([torch.full((s,), i, dtype=torch.int32, device=device) for i, s in enumerate(seqlens)], dim=0).unsqueeze(0)
dim = 4096
# dim = 64
headdim = 64
# dim = 32
dstate = 32
assert dim % headdim == 0
nheads = dim // headdim
if ngroups == "max":
ngroups = nheads
assert nheads % ngroups == 0
B = torch.randn(total_seqlen, ngroups, dstate, dtype=dtype, device=device) / 5
x = torch.randn(total_seqlen, nheads, headdim, dtype=dtype, device=device)
A = -0.1 * (torch.rand(nheads, device=device))
dt = F.softplus(torch.randn(total_seqlen, nheads, device=device, dtype=torch.float32) - 4)
dA_cumsum, dt_rounded = _chunk_cumsum_fwd(dt.unsqueeze(0), A, chunk_size)
chunk_states = _chunk_state_fwd(B.unsqueeze(0), x.unsqueeze(0), dt_rounded, dA_cumsum, seq_idx=seq_idx)
chunk_states, _ = _state_passing_fwd(rearrange(chunk_states, "... p n -> ... (p n)"), dA_cumsum[:, :, :, -1],
seq_idx=seq_idx, chunk_size=chunk_size)
chunk_states = rearrange(chunk_states, "... (p n) -> ... p n", n=dstate)
chunk_states = chunk_states.squeeze(0)
dA_cumsum = dA_cumsum.squeeze(0)
dt_rounded = dt_rounded.squeeze(0)
out = chunk_state_varlen(B, x, dt_rounded, dA_cumsum, cu_seqlens, chunk_states)
out_ref = []
for b in range(batch):
x_s = x[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0)
B_s = B[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0)
dt_s = dt[cu_seqlens[b]:cu_seqlens[b + 1]].unsqueeze(0)
dA_cumsum_s, dt_rounded_s = _chunk_cumsum_fwd(dt_s, A, chunk_size)
states = chunk_state(B_s, x_s, dt_rounded_s, dA_cumsum_s)
_, final_states = _state_passing_fwd(rearrange(states, "... p n -> ... (p n)"), dA_cumsum_s[:, :, :, -1],
chunk_size=chunk_size)
final_states = rearrange(final_states, "... (p n) -> ... p n", n=dstate)
out_ref.append(final_states)
out_ref = torch.cat(out_ref, dim=0)
print(f"Max diff = {(out - out_ref).abs().max().item()}")
assert torch.allclose(out, out_ref, rtol=rtol, atol=atol)
import torch
import torch.nn.functional as F
from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
from mamba_ssm.models.config_mamba import MambaConfig
from mamba_ssm.utils.generation import InferenceParams
import pytest
from einops import rearrange, repeat
def test_generation():
batch = 3
seqlen = 20
device = "cuda"
dtype = torch.float16
config = MambaConfig(
d_model=1024,
n_layer=4,
vocab_size=50277,
ssm_cfg=dict(layer="Mamba2"),
rms_norm=True,
residual_in_fp32=True,
fused_add_norm=True,
pad_vocab_size_multiple=16,
)
torch.manual_seed(2357)
model = MambaLMHeadModel(config, device=device, dtype=dtype)
x = torch.randint(0, 1000, (batch, seqlen), device=device, dtype=torch.long)
out_ref = model(x).logits
prompt_len = seqlen // 2
out = model.generate(
input_ids = x[:, :prompt_len], max_length=seqlen, output_scores=True, return_dict_in_generate=True,
cg=True, # Can turn off CUDA graph for easier debugging
# instead of sampling, we take output tokens from x, to get logits for testing
# For actual generation, don't pass in teacher_outputs
teacher_outputs=x,
)
out_scores = torch.stack(out.scores, dim=1)
print(f"Max diff: {(out_scores - out_ref[:, prompt_len - 1: -1]).abs().max()}")
assert torch.allclose(out_scores, out_ref[:, prompt_len - 1: -1], rtol=1e-3, atol=1e-2)
def test_generation_varlen():
seqlens = [170, 65, 100]
genlen = 20
total_seqlen = sum(seqlens)
device = "cuda"
dtype = torch.float16
config = MambaConfig(
d_model=1024,
n_layer=4,
vocab_size=50277,
ssm_cfg=dict(layer="Mamba2"),
rms_norm=True,
residual_in_fp32=True,
fused_add_norm=True,
pad_vocab_size_multiple=16,
)
torch.manual_seed(2357)
model = MambaLMHeadModel(config, device=device, dtype=dtype)
xs = [torch.randint(0, 1000, (1, seqlen), device=device, dtype=torch.long) for seqlen in seqlens]
# Reference 1: Forward pass with seq_idx
x = torch.cat(xs, dim=1)
seq_idx = torch.cat([torch.full((ids.shape[1],), i, dtype=torch.int32, device=device)
for i, ids in enumerate(xs)], dim=0).unsqueeze(0)
cu_seqlens = F.pad(torch.tensor(seqlens, device=device, dtype=torch.int32).cumsum(dim=0), (1, 0))
out_ref = model(x, seq_idx=seq_idx).logits
# Only take the last @genlen logits of each sequence
out_ref = torch.cat([out_ref[:, cu_seqlens[i + 1] - genlen - 1:cu_seqlens[i + 1] - 1]
for i in range(len(seqlens))], dim=0)
# Reference 2: Generate the last @genlen tokens of each sequence in a for loop
out_loop = []
for input_ids in xs:
out = model.generate(
input_ids=input_ids[:, :-genlen], max_length=input_ids.shape[1], output_scores=True,
return_dict_in_generate=True, cg=True, teacher_outputs=input_ids,
).scores
out_loop.append(torch.stack(out, dim=1))
out_loop = torch.cat(out_loop, dim=0)
print(f"Max diff between ref1 and ref2: {(out_loop - out_ref).abs().max()}")
# Varlen generation
input_ids = torch.cat([ids[:, :-genlen] for ids in xs], dim=1)
prompt_seqlens = [seqlen - genlen for seqlen in seqlens]
cu_seqlens = F.pad(torch.tensor(prompt_seqlens, device=device, dtype=torch.int32).cumsum(dim=0), (1, 0))
seq_idx = torch.cat([torch.full((seqlen,), i, dtype=torch.int32, device=device)
for i, seqlen in enumerate(prompt_seqlens)], dim=0).unsqueeze(0)
inference_params = InferenceParams(max_seqlen=2048, max_batch_size=len(seqlens))
scores, sequences = [], []
# Both seq_idx and cu_seqlens must be passed in for varlen generation
logits = model(input_ids, inference_params=inference_params, seq_idx=seq_idx, cu_seqlens=cu_seqlens).logits
logits = rearrange(logits[0, cu_seqlens[1:] - 1], "b d -> b 1 d")
scores.append(logits)
# In practice we should sample. In this case we take from the teacher_output for testing
sampled_tokens = rearrange(torch.stack([ids[0, -genlen] for ids in xs], dim=0), "b -> b 1")
sequences.append(sampled_tokens)
for i in range(1, genlen):
inference_params.seqlen_offset += 1
logits = model(sampled_tokens, inference_params=inference_params, num_last_tokens=1).logits
scores.append(logits)
# In practice we should sample. In this case we take from the teacher_output for testing
sampled_tokens = rearrange(torch.stack([ids[0, -genlen + i] for ids in xs], dim=0), "b -> b 1")
sequences.append(sampled_tokens)
out_varlen = torch.cat(scores, dim=1)
print(f"Max diff: {(out_varlen - out_ref).abs().max()}")
assert (out_varlen - out_ref).abs().max() < 2 * (out_loop - out_ref).abs().max()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment