Unverified Commit bd405e03 authored by drbh's avatar drbh Committed by GitHub
Browse files

Impl simple mamba model (#1480)

This draft PR is a work in progress implementation of the mamba model.
This PR currently loads weights, and produces correct logits after a
single pass.

This PR still needs to correctly integrate this model so it produces
tokens as expected, and apply optimization to avoid all copies during
runtime/unnecessary operations.

#### Helpful resources
[Mamba: Linear-Time Sequence Modeling with Selective State Spaces
(Albert Gu and Tri Dao)](https://arxiv.org/abs/2312.00752)
https://github.com/johnma2006/mamba-minimal

https://github.com/huggingface/candle/blob/main/candle-examples/examples/mamba-minimal/model.rs
https://github.com/huggingface/transformers/pull/28094



Notes: this dev work is currently targeting `state-spaces/mamba-130m`,
so if you want to test please use that model. Additionally when starting
the router the prefill needs to be limited: `cargo run --
--max-batch-prefill-tokens 768 --max-input-length 768`


## Update / Current State

Integration tests have been added and basic functionality such as model
loading is supported.

```bash
cd integration-tests
pytest -vv models/test_fused_kernel_mamba.py
```
- [x] add tests
- [x] load model
- [x] make simple request 
- [ ] resolve warmup issue
- [ ] resolve output issues


fetching models tested during dev
```bash
text-generation-server download-weights state-spaces/mamba-130m
text-generation-server download-weights state-spaces/mamba-1.4b
text-generation-server download-weights state-spaces/mamba-2.8b
```

The server can be run 
```bash
cd server
 MASTER_ADDR=127.0.0.1 MASTER_PORT=5555 python text_generation_server/cli.py serve state-spaces/mamba-2.8b
```

router
```bash
cargo run
```

make a request
```bash
curl -s localhost:3000/generate \
    -X POST \
    -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \
    -H 'Content-Type: application/json' | jq
```

response
```json
{
  "generated_text": "\n\nDeep learning is a machine learning technique that uses a deep neural network to learn from data."
}
```

---------
Co-authored-by: default avatarNicolas Patry <patry.nicolas@protonmail.com>
parent 17345402
......@@ -154,6 +154,12 @@ COPY server/Makefile-vllm Makefile
# Build specific version of vllm
RUN make build-vllm-cuda
# Build mamba kernels
FROM kernel-builder as mamba-builder
WORKDIR /usr/src
COPY server/Makefile-selective-scan Makefile
RUN make build-all
# Build megablocks
FROM kernel-builder as megablocks-builder
......@@ -205,6 +211,10 @@ COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-31
# Copy builds artifacts from vllm builder
COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages
# Copy build artifacts from mamba builder
COPY --from=mamba-builder /usr/src/mamba/build/lib.linux-x86_64-cpython-310/ /opt/conda/lib/python3.10/site-packages
COPY --from=mamba-builder /usr/src/causal-conv1d/build/lib.linux-x86_64-cpython-310/ /opt/conda/lib/python3.10/site-packages
# Install flash-attention dependencies
RUN pip install einops --no-cache-dir
......
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.3552246,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.38378906,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.140625,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.5551758,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.59033203,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.70654297,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.0410156,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.3789062,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0026435852,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.2841797,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
}
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 2502,
"logprob": null,
"text": " red"
},
{
"id": 13,
"logprob": -2.5234375,
"text": ","
},
{
"id": 8862,
"logprob": -3.4433594,
"text": " yellow"
},
{
"id": 13,
"logprob": -0.43017578,
"text": ","
},
{
"id": 209,
"logprob": -8.21875,
"text": " "
}
],
"seed": 0,
"tokens": [
{
"id": 187,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 395,
"logprob": -0.46411133,
"special": false,
"text": "and"
},
{
"id": 13735,
"logprob": -2.1132812,
"special": false,
"text": " orange"
},
{
"id": 313,
"logprob": -1.2128906,
"special": false,
"text": " ("
},
{
"id": 249,
"logprob": -2.3671875,
"special": false,
"text": "in"
},
{
"id": 253,
"logprob": 0.0,
"special": false,
"text": " the"
},
{
"id": 1340,
"logprob": -1.640625,
"special": false,
"text": " order"
},
{
"id": 597,
"logprob": -0.5488281,
"special": false,
"text": " they"
},
{
"id": 3176,
"logprob": -0.48608398,
"special": false,
"text": " appear"
},
{
"id": 275,
"logprob": 0.0,
"special": false,
"text": " in"
}
],
"top_tokens": null
},
"generated_text": "blue, red, yellow, \nand orange (in the order they appear in"
}
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.8125,
"text": " is"
},
{
"id": 18147,
"logprob": -12.828125,
"text": " Deep"
},
{
"id": 20727,
"logprob": -3.0,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1484375,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.3552246,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.38378906,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.1279297,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.5595703,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.60253906,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.7050781,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.0488281,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.3808594,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0026416779,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.2851562,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.78027344,
"text": " is"
},
{
"id": 18147,
"logprob": -12.8203125,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.9902344,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1523438,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.35351562,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.38256836,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.1269531,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.54541016,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.59765625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.7001953,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.0585938,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.3789062,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0027446747,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.2851562,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.78027344,
"text": " is"
},
{
"id": 18147,
"logprob": -12.8203125,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.9902344,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1523438,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.35351562,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.38256836,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.1269531,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.54541016,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.59765625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.7001953,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.0585938,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.3789062,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0027446747,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.2851562,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -0.78027344,
"text": " is"
},
{
"id": 18147,
"logprob": -12.8203125,
"text": " Deep"
},
{
"id": 20727,
"logprob": -2.9902344,
"text": " Learning"
},
{
"id": 32,
"logprob": -1.1523438,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 187,
"logprob": -0.35351562,
"special": false,
"text": "\n"
},
{
"id": 187,
"logprob": -0.38256836,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.1269531,
"special": false,
"text": "Deep"
},
{
"id": 4715,
"logprob": -0.54541016,
"special": false,
"text": " learning"
},
{
"id": 310,
"logprob": -0.59765625,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.7001953,
"special": false,
"text": " a"
},
{
"id": 747,
"logprob": -2.0585938,
"special": false,
"text": " new"
},
{
"id": 1511,
"logprob": -2.3789062,
"special": false,
"text": " type"
},
{
"id": 273,
"logprob": -0.0027446747,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.2851562,
"special": false,
"text": " machine"
}
],
"top_tokens": null
},
"generated_text": "\n\nDeep learning is a new type of machine"
}
]
import pytest
@pytest.fixture(scope="module")
def fused_kernel_mamba_handle(launcher):
with launcher("state-spaces/mamba-130m", num_shard=1) as handle:
yield handle
@pytest.fixture(scope="module")
async def fused_kernel_mamba(fused_kernel_mamba_handle):
await fused_kernel_mamba_handle.health(300)
return fused_kernel_mamba_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_mamba(fused_kernel_mamba, response_snapshot):
response = await fused_kernel_mamba.generate(
"What is Deep Learning?", max_new_tokens=10
)
assert response.details.generated_tokens == 10
assert response.generated_text == "\n\nDeep learning is a new type of machine"
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_mamba_all_params(fused_kernel_mamba, response_snapshot):
response = await fused_kernel_mamba.generate(
"blue, red, yellow, ",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 10
assert response.generated_text == "blue, red, yellow, \nand orange (in the order they appear in"
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_mamba_load(fused_kernel_mamba, generate_load, response_snapshot):
responses = await generate_load(fused_kernel_mamba, "What is Deep Learning?", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses[0].generated_text == "\n\nDeep learning is a new type of machine"
assert responses == response_snapshot
......@@ -161,3 +161,4 @@ flash-attention-v2/
vllm/
llm-awq/
eetq/
mamba/
......@@ -3,6 +3,7 @@ include Makefile-flash-att-v2
include Makefile-vllm
include Makefile-awq
include Makefile-eetq
include Makefile-selective-scan
unit-tests:
pytest -s -vv -m "not private" tests
......
selective_scan_commit := 2a3704fd47ba817b415627b06fd796b971fdc137
causal-conv1d:
rm -rf causal-conv1d
git clone https://github.com/Dao-AILab/causal-conv1d.git
build-causal-conv1d: causal-conv1d
cd causal-conv1d/ && git checkout v1.1.1 # known latest working version tag
cd causal-conv1d/ && CAUSAL_CONV1D_FORCE_BUILD=TRUE python setup.py build
install-causal-conv1d: build-causal-conv1d
pip uninstall causal-conv1d -y || true
cd causal-conv1d/ && pip install .
# selective-scan dependends on causal-conv1d
selective-scan:
rm -rf mamba
git clone https://github.com/state-spaces/mamba.git mamba
build-selective-scan: selective-scan
cd mamba/ && git fetch && git checkout $(selective_scan_commit)
cd mamba && python setup.py build
install-selective-scan: install-causal-conv1d build-selective-scan
pip uninstall selective-scan-cuda -y || true
cd mamba && pip install .
build-all: build-causal-conv1d build-selective-scan
\ No newline at end of file
......@@ -76,6 +76,15 @@ if FLASH_ATTENTION:
__all__.append(FlashMixtral)
__all__.append(FlashPhi)
MAMBA_AVAILABLE = True
try:
from text_generation_server.models.mamba import Mamba
except ImportError as e:
logger.warning(f"Could not import Mamba: {e}")
MAMBA_AVAILABLE = False
if MAMBA_AVAILABLE:
__all__.append(Mamba)
def get_model(
model_id: str,
......@@ -164,7 +173,25 @@ def get_model(
if speculate > 0:
logger.info(f"Using speculation {method} with {speculate} input ids.")
model_type = config_dict["model_type"]
model_type = config_dict.get("model_type", None)
if model_type is None:
# TODO: fix how we determine model type for Mamba
if "ssm_cfg" in config_dict:
# *only happens in Mamba case
model_type = "ssm"
else:
raise RuntimeError(
f"Could not determine model type for {model_id} revision {revision}"
)
if model_type == "ssm":
return Mamba(
model_id,
revision,
quantize=quantize,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == "gpt_bigcode":
if FLASH_ATTENTION:
......
import torch
import torch.distributed
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.selective_scan_interface import selective_scan_fn
from mamba_ssm.utils.generation import InferenceParams
from torch import nn
from typing import Optional, Tuple, Any
from transformers.configuration_utils import PretrainedConfig
import torch.nn.functional as F
from text_generation_server.utils.layers import (
TensorParallelEmbedding,
FastRMSNorm,
FastLinear,
)
from einops import rearrange
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
import math
class MambaConfig(PretrainedConfig):
def __init__(
self,
vocab_size=50280,
d_model=768,
d_state=16,
n_layer=32,
layer_norm_epsilon=1e-5,
tie_word_embeddings=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
expand=2,
dt_rank="auto",
**kwargs,
):
self.vocab_size = vocab_size
self.n_layer = n_layer
self.layer_norm_epsilon = layer_norm_epsilon
self.d_model = d_model
self.d_inner = d_model * 2
self.d_conv = 4
self.d_state = d_state
self.expand = expand
self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
class MambaBlock(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.layer_idx = int(prefix.split(".")[2])
self.in_proj = FastLinear.load(config, f"{prefix}.in_proj", weights, bias=False)
self.x_proj = FastLinear.load(config, f"{prefix}.x_proj", weights, bias=False)
self.dt_proj = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=True)
self.dt_proj_no_bias = FastLinear.load(config, f"{prefix}.dt_proj", weights, bias=False)
self.out_proj = FastLinear.load(config, f"{prefix}.out_proj", weights, bias=False)
self.conv1d = FastLinear.load(config, f"{prefix}.conv1d", weights, bias=True)
self.negA = -torch.exp(weights.get_tensor(f"{prefix}.A_log").float())
self.D = weights.get_tensor(f"{prefix}.D")
self.activation = "silu"
self.dt_rank = config.dt_rank
self.d_state = config.d_state
self.d_conv = config.d_conv
self.act = nn.SiLU()
# inference_params
def forward(self, hidden_states: torch.Tensor, inference_params=None):
_, seqlen, _ = hidden_states.shape
conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx]
if inference_params.seqlen_offset > 0:
out, conv_state, ssm_state = self.step(hidden_states, conv_state, ssm_state)
return out, conv_state, ssm_state
projected_states = self.in_proj(hidden_states).transpose(1,2)
x, z = projected_states.chunk(2, dim=1)
conv_state = F.pad(x, (self.d_conv - seqlen, 0))
x = causal_conv1d_fn(
x=x,
weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)),
bias=self.conv1d.bias,
activation=self.activation,
)
# We're careful here about the layout, to avoid extra transposes.
# We want dt to have d as the slowest moving dimension
# and L as the fastest moving dimension, since those are what the ssm_scan kernel expects.
x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d)
dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1)
dt = self.dt_proj.weight @ dt.t()
dt = rearrange(dt, "d (b l) -> b d l", l=seqlen)
B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous()
y, last_state = selective_scan_fn(
x,
dt,
self.negA,
B,
C,
self.D.float(),
z=z,
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
return_last_state=True,
)
y = rearrange(y, "b d l -> b l d")
attn_outputs = self.out_proj(y)
return attn_outputs, conv_state, last_state
def step(self, hidden_states, conv_state, ssm_state):
_xz = self.in_proj(hidden_states)
_x, _z = _xz.chunk(2, dim=-1) # (B D)
conv_state_new = torch.cat([conv_state, _x.transpose(1,2)], dim=-1)
conv_out = causal_conv1d_fn(
x=conv_state_new,
weight=self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)),
bias=self.conv1d.bias,
activation=self.activation
)
conv_state = conv_state_new[:, :, 1:]
bsz, seqlen, dim = hidden_states.shape
output_tensor = torch.zeros(
(bsz, seqlen, dim),
device=hidden_states.device,
dtype=hidden_states.dtype
)
for i in range(0, bsz):
x = conv_out[i:i+1,:,-1]
z = _z[i:i+1, -1, :]
x_db = self.x_proj(x)
dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1)
dt = F.linear(dt, self.dt_proj.weight)
y = selective_state_update(
ssm_state[i:i+1,:,:], x, dt, self.negA, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True
)
out = self.out_proj(y)
output_tensor[i] = out
return output_tensor, conv_state, ssm_state
class ResidualBlock(nn.Module):
def __init__(self, layer_id, config, weights):
super().__init__()
self.mamba_block = MambaBlock(prefix=f"{layer_id}.mixer", config=config, weights=weights)
self.layer_norm = FastRMSNorm.load(prefix=f"{layer_id}.norm", weights=weights, eps=config.layer_norm_epsilon)
def forward(
self,
hidden_states: torch.Tensor,
residual: Optional[torch.Tensor] = None,
inference_params: Optional[Any] = None,
):
residual = (hidden_states + residual) if residual is not None else hidden_states
shape = residual.shape
hidden_states, _ = self.layer_norm(residual.view(-1, shape[-1]))
hidden_states, conv_state, last_ssm_state = self.mamba_block(hidden_states.view(*shape), inference_params)
return hidden_states, residual, conv_state, last_ssm_state
class MambaModel(nn.Module):
def __init__(self, config, weights):
super().__init__()
prefix = "backbone"
self.embed_tokens = TensorParallelEmbedding(f"{prefix}.embedding", weights)
self.blocks = nn.ModuleList(
[ResidualBlock(f"{prefix}.layers.{i}", config, weights) for i in range(config.n_layer)]
)
self.norm_f = FastRMSNorm.load(f"{prefix}.norm_f", weights, eps=config.layer_norm_epsilon)
self.lm_head = FastLinear.load(config, f"{prefix}.embedding", weights, bias=False)
self.config = config
def forward(self, input_ids: torch.Tensor, inference_params=None, residual=None) -> Tuple[torch.Tensor, torch.Tensor, InferenceParams]:
hidden_states = self.embed_tokens(input_ids)
for block in self.blocks:
hidden_states, residual, conv_state, ssm_state = block(hidden_states, residual, inference_params)
inference_params.key_value_memory_dict[block.mamba_block.layer_idx] = (conv_state, ssm_state)
hidden_states = hidden_states + residual if residual is not None else hidden_states
hidden_states, _ = self.norm_f(hidden_states.view(-1, hidden_states.size(-1)))
hidden_states = hidden_states.view(residual.shape)
logits = self.lm_head(hidden_states)
# update the offset for the next inference using these params
inference_params.seqlen_offset += input_ids.size(1)
return logits, input_ids, inference_params
\ No newline at end of file
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment