Commit 98957dd7 authored by luopl's avatar luopl
Browse files

init

parents
Pipeline #1625 canceled with stages
# This workflow will:
# - Create a new Github release
# - Build wheels for supported architectures
# - Deploy the wheels to the Github release
# - Release the static code to PyPi
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Build wheels and deploy
on:
create:
tags:
- v*
jobs:
setup_release:
name: Create Release
runs-on: ubuntu-latest
steps:
- name: Get the tag version
id: extract_branch
run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/}
shell: bash
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ steps.extract_branch.outputs.branch }}
release_name: ${{ steps.extract_branch.outputs.branch }}
build_wheels:
name: Build Wheel
needs: setup_release
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
# Using ubuntu-20.04 instead of 22.04 for more compatibility (glibc). Ideally we'd use the
# manylinux docker image, but I haven't figured out how to install CUDA on manylinux.
os: [ubuntu-20.04]
python-version: ['3.8', '3.9', '3.10', '3.11', '3.12']
torch-version: ['2.0.1', '2.1.2', '2.2.2', '2.3.1', '2.4.0']
cuda-version: ['11.8.0', '12.2.2']
# We need separate wheels that either uses C++11 ABI (-D_GLIBCXX_USE_CXX11_ABI) or not.
# Pytorch wheels currently don't use it, but nvcr images have Pytorch compiled with C++11 ABI.
# Without this we get import error (undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationESs)
# when building without C++11 ABI and using it on nvcr images.
cxx11_abi: ['FALSE', 'TRUE']
exclude:
# Pytorch < 2.2 does not support Python 3.12
- torch-version: '2.0.1'
python-version: '3.12'
- torch-version: '2.1.2'
python-version: '3.12'
# Pytorch <= 2.0 only supports CUDA <= 11.8
- torch-version: '2.0.1'
cuda-version: '12.2.2'
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Set CUDA and PyTorch versions
run: |
echo "MATRIX_CUDA_VERSION=$(echo ${{ matrix.cuda-version }} | awk -F \. {'print $1 $2'})" >> $GITHUB_ENV
echo "MATRIX_TORCH_VERSION=$(echo ${{ matrix.torch-version }} | awk -F \. {'print $1 "." $2'})" >> $GITHUB_ENV
- name: Free up disk space
if: ${{ runner.os == 'Linux' }}
# https://github.com/easimon/maximize-build-space/blob/master/action.yml
# https://github.com/easimon/maximize-build-space/tree/test-report
run: |
sudo rm -rf /usr/share/dotnet
sudo rm -rf /opt/ghc
sudo rm -rf /opt/hostedtoolcache/CodeQL
- name: Set up swap space
if: runner.os == 'Linux'
uses: pierotofy/set-swap-space@v1.0
with:
swap-size-gb: 10
- name: Install CUDA ${{ matrix.cuda-version }}
if: ${{ matrix.cuda-version != 'cpu' }}
uses: Jimver/cuda-toolkit@v0.2.14
id: cuda-toolkit
with:
cuda: ${{ matrix.cuda-version }}
linux-local-args: '["--toolkit"]'
# default method is "local", and we're hitting some error with caching for CUDA 11.8 and 12.1
# method: ${{ (matrix.cuda-version == '11.8.0' || matrix.cuda-version == '12.1.0') && 'network' || 'local' }}
method: 'network'
# We need the cuda libraries (e.g. cuSparse, cuSolver) for compiling PyTorch extensions,
# not just nvcc
# sub-packages: '["nvcc"]'
- name: Install PyTorch ${{ matrix.torch-version }}+cu${{ matrix.cuda-version }}
run: |
pip install --upgrade pip
# If we don't install before installing Pytorch, we get error for torch 2.0.1
# ERROR: Could not find a version that satisfies the requirement setuptools>=40.8.0 (from versions: none)
pip install lit
# For some reason torch 2.2.0 on python 3.12 errors saying no setuptools
pip install setuptools
# We want to figure out the CUDA version to download pytorch
# e.g. we can have system CUDA version being 11.7 but if torch==1.12 then we need to download the wheel from cu116
# This code is ugly, maybe there's a better way to do this.
export TORCH_CUDA_VERSION=$(python -c "from os import environ as env; \
minv = {'2.0': 117, '2.1': 118, '2.2': 118, '2.3': 118, '2.4': 118}[env['MATRIX_TORCH_VERSION']]; \
maxv = {'2.0': 118, '2.1': 121, '2.2': 121, '2.3': 121, '2.4': 124}[env['MATRIX_TORCH_VERSION']]; \
print(max(min(int(env['MATRIX_CUDA_VERSION']), maxv), minv))" \
)
if [[ ${{ matrix.torch-version }} == *"dev"* ]]; then
pip install --no-cache-dir --pre torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/nightly/cu${TORCH_CUDA_VERSION}
else
pip install --no-cache-dir torch==${{ matrix.torch-version }} --index-url https://download.pytorch.org/whl/cu${TORCH_CUDA_VERSION}
fi
nvcc --version
python --version
python -c "import torch; print('PyTorch:', torch.__version__)"
python -c "import torch; print('CUDA:', torch.version.cuda)"
python -c "from torch.utils import cpp_extension; print (cpp_extension.CUDA_HOME)"
shell:
bash
- name: Build wheel
run: |
# We want setuptools >= 49.6.0 otherwise we can't compile the extension if system CUDA version is 11.7 and pytorch cuda version is 11.6
# https://github.com/pytorch/pytorch/blob/664058fa83f1d8eede5d66418abff6e20bd76ca8/torch/utils/cpp_extension.py#L810
# However this still fails so I'm using a newer version of setuptools
pip install setuptools==68.0.0
pip install ninja packaging wheel
export PATH=/usr/local/nvidia/bin:/usr/local/nvidia/lib64:$PATH
export LD_LIBRARY_PATH=/usr/local/nvidia/lib64:/usr/local/cuda/lib64:$LD_LIBRARY_PATH
# Limit MAX_JOBS otherwise the github runner goes OOM
MAX_JOBS=2 MAMBA_FORCE_BUILD="TRUE" MAMBA_FORCE_CXX11_ABI=${{ matrix.cxx11_abi}} python setup.py bdist_wheel --dist-dir=dist
tmpname=cu${MATRIX_CUDA_VERSION}torch${MATRIX_TORCH_VERSION}cxx11abi${{ matrix.cxx11_abi }}
wheel_name=$(ls dist/*whl | xargs -n 1 basename | sed "s/-/+$tmpname-/2")
ls dist/*whl |xargs -I {} mv {} dist/${wheel_name}
echo "wheel_name=${wheel_name}" >> $GITHUB_ENV
- name: Log Built Wheels
run: |
ls dist
- name: Get the tag version
id: extract_branch
run: echo ::set-output name=branch::${GITHUB_REF#refs/tags/}
- name: Get Release with tag
id: get_current_release
uses: joutvhu/get-release@v1
with:
tag_name: ${{ steps.extract_branch.outputs.branch }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Release Asset
id: upload_release_asset
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.get_current_release.outputs.upload_url }}
asset_path: ./dist/${{env.wheel_name}}
asset_name: ${{env.wheel_name}}
asset_content_type: application/*
publish_package:
name: Publish package
needs: [build_wheels]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: '3.10'
- name: Install dependencies
run: |
pip install ninja packaging setuptools wheel twine
# We don't want to download anything CUDA-related here
pip install torch --index-url https://download.pytorch.org/whl/cpu
- name: Build core package
env:
MAMBA_SKIP_CUDA_BUILD: "TRUE"
run: |
python setup.py sdist --dist-dir=dist
- name: Deploy
env:
TWINE_USERNAME: "__token__"
TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
run: |
python -m twine upload dist/*
*__pycache__/
*.egg-info/
build/
**.so
*.hip
*_hip.*
\ No newline at end of file
[submodule "3rdparty/lm-evaluation-harness"]
path = 3rdparty/lm-evaluation-harness
url = https://github.com/EleutherAI/lm-evaluation-harness/
Tri Dao, tri@tridao.me
Albert Gu, agu@andrew.cmu.edu
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2023 Tri Dao, Albert Gu
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# Mamba2
## 论文
Mamba
`Mamba: Linear-Time Sequence Modeling with Selective State Spaces`
- https://arxiv.org/abs/2312.00752
Mamba2
`Transformers are SSMs: Generalized Models and Efficient Algorithms Through Structured State Space Duality`
- https://arxiv.org/abs/2405.21060
## 模型结构
许多二次时间架构,如线性注意力、门控卷积和循环模型,以及结构化状态空间模型(SSMs)已经被开发出来,以解决Transformer在长序列上的计算效率低下问题,
但它们在语言等重要模态上的表现不如注意力。作者发现这些模型的一个关键弱点是它们无法执行基于内容的推理,并进行了一些改进。
首先,仅仅让SSM参数作为输入的函数就能解决它们在离散模态上的弱点,使模型能够根据当前标记在序列长度维度上有选择性地传播或遗忘信息。其次,尽管这种变化阻碍了高效卷积的使用,
本文在循环模式下设计了一个硬件感知的并行算法,将这些选择性的SSM整合到一个简化的端到端神经网络架构中,
去掉了注意力机制甚至MLP模块(Mamba)。Mamba具有快速推理(吞吐量比Transformer高5倍)和序列长度的线性扩展能力,其在处理百万级别长度序列的真实数据时表现有所提升。
<div align=center>
<img src="./assets/selection.png"/>
</div>
虽然Transformers一直是深度学习在语言建模成功的主要架构,但诸如Mamba等状态空间模型(SSM)最近被证明在小到中等规模上可以媲美甚至超越Transformers。
作者展示了这些模型家族实际上关系紧密,并开发了一个理论联系的丰富框架,将SSM与注意力机制的变体通过一种研究充分的结构化半分离矩阵类的各种分解联系在一起。
状态空间对偶(SSD)框架使作者能够设计出一种新架构(Mamba-2),其核心层是对Mamba选择性SSM的改进,速度提升了2-8倍,同时在语言建模方面仍能与Transformers竞争。
<div align=center>
<img src="./assets/ssd_algorithm.png"/>
</div>
## 算法原理
SSM 是用于描述这些状态表示并根据某些输入预测其下一个状态可能是什么的模型,其架构如下图。 一般SSMs包括以下组成,映射输入序列x(t),到潜在状态表示h(t), 并导出预测输出序列y(t)。
<div align=center>
<img src="./assets/ssm.png"/>
</div>
将大多数SSM架构比如H3的基础块,与现代神经网络比如transformer中普遍存在的门控MLP相结合,组成新的Mamba块,重复这个块,与归一化和残差连接结合,便构成了Mamba架构。
<div align=center>
<img src="./assets/mamba.png"/>
</div>
在Mamba-2中,SSD层被视为从𝐴, 𝑋, 𝐵, 𝐶 → 𝑌的映射。 因此,有必要在块的开头通过单个投影并行生成𝐴, 𝑋,𝐵, 𝐶。
<div align=center>
<img src="./assets/mamba2.png"/>
</div>
## 环境配置
### Docker(方法一)
此处提供[光源](https://www.sourcefind.cn/#/service-details)拉取docker镜像的地址与使用步骤,以及[光合](https://developer.hpccube.com/tool/)开发者社区深度学习库下载地址
```
docker pull image.sourcefind.cn:5000/dcu/admin/base/pytorch:2.1.0-ubuntu20.04-dtk24.04.1-py3.10
docker run -it --shm-size=128G -v /path/your_code_data/:/path/your_code_data/ -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video --name mamba2 <your IMAGE ID> bash # <your IMAGE ID>为以上拉取的docker的镜像ID替换,本镜像为:a4dd5be0ca23
pip install wheel -i https://mirrors.aliyun.com/pypi/simple/
cd /path/your_code_data/mamba2_pytorch
pip install . --no-build-isolation --no-deps
pip install lm-eval==0.4.2 -i https://mirrors.aliyun.com/pypi/simple/
#mamba2需要安装causal-conv1d
git clone https://github.com/Dao-AILab/causal-conv1d.git
cd causal-conv1d
pip install e .
```
### Dockerfile(方法二)
此处提供dockerfile的使用方法
```
docker build --no-cache -t mamba2:latest .
docker run -it --shm-size=128G -v /path/your_code_data/:/path/your_code_data/ -v /opt/hyhal:/opt/hyhal:ro --privileged=true --device=/dev/kfd --device=/dev/dri/ --group-add video --name mamba2 mamba2 bash
cd /path/your_code_data/mamba2_pytorch
pip install . --no-build-isolation --no-deps
pip install lm-eval==0.4.2 -i https://mirrors.aliyun.com/pypi/simple/
#mamba2需要安装causal-conv1d
git clone https://github.com/Dao-AILab/causal-conv1d.git
cd causal-conv1d
pip install e .
```
### Anaconda(方法三)
此处提供本地配置、编译的详细步骤,例如:
关于本项目DCU显卡所需的特殊深度学习库可从[光合](https://developer.hpccube.com/tool/)开发者社区下载安装。
```
#DTK驱动:dtk24.04
# python:python3.10
# torch: 2.1.0
# torchvision: 0.16.0
# triton
conda create -n mamba python=3.10
conda activate mamba
```
`Tips:以上dtk驱动、python、torch等DCU相关工具版本需要严格一一对应`
其它依赖环境安装如下:
```
cd /path/your_code_data/mamba2_pytorch
pip install . --no-build-isolation --no-deps
pip install lm-eval==0.4.2 -i https://mirrors.aliyun.com/pypi/simple/
#mamba2需要安装causal-conv1d
git clone https://github.com/Dao-AILab/causal-conv1d.git
cd causal-conv1d
pip install e .
```
## 数据集
运行推理时会自动连接huggingface下载最新数据集于缓存目录
```
#数据集格式
piqa/
└── plain_text
└── 1.1.0
├── 6c611c1a9bf220943c4174e117d3b660859665baf1d43156230116185312d011
│ ├── dataset_info.json
│ ├── piqa-test.arrow
│ ├── piqa-train.arrow
│ └── piqa-validation.arrow
├── 6c611c1a9bf220943c4174e117d3b660859665baf1d43156230116185312d011_builder.lock
└── 6c611c1a9bf220943c4174e117d3b660859665baf1d43156230116185312d011.incomplete_info.lock
...
```
也可下载离线数据,放于缓存目录~/.cache/huggingface/hub/datasets/,根据自己的缓存地址存放:
数据集SCNet快速下载链接[datasets](http://113.200.138.88:18080/aidatasets/mamba2_data_test)
或者使用modelscope下载相关数据集
```
#数据集下载示例
#需要pip install modelscope安装modelscope
from modelscope.msdatasets import MsDataset
ds = MsDataset.load('opencompass/piqa')
```
## 训练
## 推理
运行推理时会自动连接huggingface下载模型文件,也可使用modelscope提前下载相关模型文件到缓存目录, 使用本地修改pretrained=/path_to_model/model_name
模型权重SCNet下载链接[models](http://113.200.138.88:18080/aimodels/mamba2)
### 单机单卡
Evaluate:
#To run evaluations on Mamba-1 models
```
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
```
#To run evaluations on Mamba-2 models:
```
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/transformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
```
Inference :
To test generation latency (e.g. batch size = 1) with mamba:
```
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2
```
With Mamba-2:
```
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba2-2.7b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2
```
### 多卡推理
多卡推理使用accelerate,样例如下:
```
HIP_VISIBLE_DEVICES=0,1 accelerate launch -m lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
```
## result
state-spaces/mamba-2.8b result:
<div align=center>
<img src="./assets/mamba_result.png"/>
</div>
state-spaces/mamba2-2.7b result:
<div align=center>
<img src="./assets/mamba2_result.png"/>
</div>
### 精度
使用2张DCU-K100 AI卡推理
mamba_ssm (pretrained=state-spaces/mamba-130m), gen_kwargs: (None), limit: None, num_fewshot: None, batch_size: 256
| Tasks |Version|Filter|n-shot| Metric | Value | |Stderr|
|--------------|------:|------|-----:|----------|------:|---|-----:|
|winogrande | 1|none | 0|acc | 0.5217|± |0.0140|
|piqa | 1|none | 0|acc | 0.6458|± |0.0112|
| | |none | 0|acc_norm | 0.6306|± |0.0113|
|openbookqa | 1|none | 0|acc | 0.1700|± |0.0168|
| | |none | 0|acc_norm | 0.2880|± |0.0203|
|lambada_openai| 1|none | 0|perplexity|16.0456|± |0.5091|
| | |none | 0|acc | 0.4428|± |0.0069|
|hellaswag | 1|none | 0|acc | 0.3079|± |0.0046|
| | |none | 0|acc_norm | 0.3522|± |0.0048|
|arc_easy | 1|none | 0|acc | 0.4794|± |0.0103|
| | |none | 0|acc_norm | 0.4205|± |0.0101|
|arc_challenge | 1|none | 0|acc | 0.1988|± |0.0117|
| | |none | 0|acc_norm | 0.2457|± |0.0126|
mamba_ssm (pretrained=state-spaces/mamba2-2.7b)
| Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|
|--------------|------:|------|-----:|----------|-----:|---|-----:|
|winogrande | 1|none | 0|acc |0.6385|± |0.0135|
|piqa | 1|none | 0|acc |0.7628|± |0.0099|
| | |none | 0|acc_norm |0.7617|± |0.0099|
|openbookqa | 1|none | 0|acc |0.2940|± |0.0204|
| | |none | 0|acc_norm |0.3880|± |0.0218|
|lambada_openai| 1|none | 0|perplexity|4.0934|± |0.0888|
| | |none | 0|acc |0.6951|± |0.0064|
|hellaswag | 1|none | 0|acc |0.4961|± |0.0050|
| | |none | 0|acc_norm |0.6660|± |0.0047|
|arc_easy | 1|none | 0|acc |0.6957|± |0.0094|
| | |none | 0|acc_norm |0.6481|± |0.0098|
|arc_challenge | 1|none | 0|acc |0.3328|± |0.0138|
| | |none | 0|acc_norm |0.3626|± |0.0140|
mamba_ssm (pretrained=state-spaces/mamba2attn-2.7b), gen_kwargs: (None), limit: None, num_fewshot: None, batch_size: 256
| Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|
|--------------|------:|------|-----:|----------|-----:|---|-----:|
|winogrande | 1|none | 0|acc |0.6519|± |0.0134|
|piqa | 1|none | 0|acc |0.7573|± |0.0100|
| | |none | 0|acc_norm |0.7584|± |0.0100|
|openbookqa | 1|none | 0|acc |0.3040|± |0.0206|
| | |none | 0|acc_norm |0.3900|± |0.0218|
|lambada_openai| 1|none | 0|perplexity|3.8497|± |0.0810|
| | |none | 0|acc |0.7105|± |0.0063|
|hellaswag | 1|none | 0|acc |0.5029|± |0.0050|
| | |none | 0|acc_norm |0.6776|± |0.0047|
|arc_easy | 1|none | 0|acc |0.6987|± |0.0094|
| | |none | 0|acc_norm |0.6633|± |0.0097|
|arc_challenge | 1|none | 0|acc |0.3447|± |0.0139|
| | |none | 0|acc_norm |0.3797|± |0.0142|
## 应用场景
### 算法类别
`智能问答`
### 热点应用行业
`科研,制造,医疗,家居,教育`
## 源码仓库及问题反馈
- https://developer.hpccube.com/codes/modelzoo/mamba2_pytorch
## 参考资料
- https://github.com/state-spaces/mamba
# Mamba
![Mamba](assets/selection.png "Selective State Space")
> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\
> Albert Gu*, Tri Dao*\
> Paper: https://arxiv.org/abs/2312.00752
![Mamba-2](assets/ssd_algorithm.png "State Space Dual Model")
> **Transformers are SSMs: Generalized Models and Efficient Algorithms**\
> **Through Structured State Space Duality**\
> Tri Dao*, Albert Gu*\
> Paper: https://arxiv.org/abs/2405.21060
## About
Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers.
It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4),
with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention).
## Installation
- [Option] `pip install causal-conv1d>=1.4.0`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block.
- `pip install mamba-ssm`: the core Mamba package.
- `pip install mamba-ssm[causal-conv1d]`: To install core Mamba package and causal-conv1d.
- `pip install mamba-ssm[dev]`: To install core Mamba package and dev depdencies.
It can also be built from source with `pip install .` from this repository.
If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`.
Other requirements:
- Linux
- NVIDIA GPU
- PyTorch 1.12+
- CUDA 11.6+
For AMD cards, see additional prerequisites below.
## Usage
We expose several levels of interface with the Mamba model.
### Selective SSM
Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2).
Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py).
### Mamba Block
The main module of this repository is the Mamba architecture block wrapping the selective SSM.
Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py).
Usage:
``` python
import torch
from mamba_ssm import Mamba
batch, length, dim = 2, 64, 16
x = torch.randn(batch, length, dim).to("cuda")
model = Mamba(
# This module uses roughly 3 * expand * d_model^2 parameters
d_model=dim, # Model dimension d_model
d_state=16, # SSM state expansion factor
d_conv=4, # Local convolution width
expand=2, # Block expansion factor
).to("cuda")
y = model(x)
assert y.shape == x.shape
```
### Mamba-2
The Mamba-2 block is implemented at [modules/mamba2.py](mamba_ssm/modules/mamba2.py).
A simpler version is at [modules/mamba2_simple.py](mamba_ssm/modules/mamba2_simple.py)
The usage is similar to Mamba(-1):
``` python
from mamba_ssm import Mamba2
model = Mamba2(
# This module uses roughly 3 * expand * d_model^2 parameters
d_model=dim, # Model dimension d_model
d_state=64, # SSM state expansion factor, typically 64 or 128
d_conv=4, # Local convolution width
expand=2, # Block expansion factor
).to("cuda")
y = model(x)
assert y.shape == x.shape
```
#### SSD
A minimal version of the inner SSD module (Listing 1 from the Mamba-2 paper) with conversion between "discrete" and "continuous" SSM versions
is at [modules/ssd_minimal.py](mamba_ssm/modules/ssd_minimal.py).
### Mamba Language Model
Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head.
Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py).
This is an example of how to integrate Mamba into an end-to-end neural network.
This example is used in the generation scripts below.
## Pretrained Models
Pretrained models are uploaded to
[Hugging Face](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`,
`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`, `mamba2-130m`, `mamba2-370m`,
`mamba2-780m`, `mamba2-1.3b`, `mamba2-2.7b`, `transformerpp-2.7b`, `mamba2attn-2.7b`, trained on 300B tokens on the Pile, as well as `mamba-2.8b-slimpj`
(trained on 600B tokens on the SlimPajama dataset).
The models will be autodownloaded by the generation script below.
These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models:
| Parameters | Layers | Model dim. |
|------------|--------|------------|
| 130M | 24 | 768 |
| 370M | 48 | 1024 |
| 790M | 48 | 1536 |
| 1.4B | 48 | 2048 |
| 2.8B | 64 | 2560 |
(The layer count of Mamba doubles that of a Transformer with similar size, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.)
Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.).
Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models.
## Evaluations
To run zero-shot evaluations of models (corresponding to Table 3 of the paper),
we use the
[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness)
library.
1. Install `lm-evaluation-harness` by `pip install lm-eval==0.4.2`.
2. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo):
``` sh
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64
```
To reproduce the results on the `mamba-2.8b-slimpj` model reported in the blogposts:
``` sh
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa,race,truthfulqa_mc2 --device cuda --batch_size 256
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba-2.8b-slimpj --tasks mmlu --num_fewshot 5 --device cuda --batch_size 256
```
To run evaluations on Mamba-2 models, simply replace the model names:
``` sh
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/transformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
lm_eval --model mamba_ssm --model_args pretrained=state-spaces/mamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256
```
Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process.
## Inference
The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py)
1. autoloads a model from the Hugging Face Hub,
2. generates completions of a user-specified prompt,
3. benchmarks the inference speed of this generation.
Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature.
### Examples
To test generation latency (e.g. batch size = 1) with different sampling strategies:
``` sh
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2
python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --minp 0.05 --topk 0 --temperature 0.7 --repetition-penalty 1.2
```
To test generation throughput with random prompts (e.g. large batch size):
``` sh
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 64
python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 64
```
With Mamba-2, you just need to change the model name:
``` sh
python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba2-2.7b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2
```
## Troubleshooting
### Precision
Our models were trained using PyTorch [AMP](https://pytorch.org/docs/stable/amp.html) for mixed precision. AMP keeps model parameters in float32 and casts to half precision when necessary.
On the other hand, other frameworks like DeepSpeed store parameters in float16 and upcasts when necessary (e.g. for optimizer accumulation).
We've observed that higher precision for the main model parameters may be necessary, because SSMs are sensitive to their recurrent dynamics. If you are experiencing instabilities,
as a first step please try a framework storing parameters in fp32 (such as AMP).
### Initialization
Some parts of the model have initializations inherited from prior work on S4 models.
For [example](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L102), the $\Delta$ parameter has a targeted range by initializing the bias of its linear projection.
However, some frameworks may have post-initialization hooks (e.g. setting all bias terms in `nn.Linear` modules to zero).
If this is the case, you may have to add custom logic (e.g. this [line](https://github.com/state-spaces/mamba/blob/f0affcf69f06d1d06cef018ff640bf080a11c421/mamba_ssm/modules/mamba_simple.py#L104) turns off re-initializing in our trainer, but would be a no-op in any other framework)
that is specific to the training framework.
## Additional Prerequisites for AMD cards
### Patching ROCm
If you are on ROCm 6.0, run the following steps to avoid errors during compilation. This is not required for ROCm 6.1 onwards.
1. Locate your ROCm installation directory. This is typically found at `/opt/rocm/`, but may vary depending on your installation.
2. Apply the Patch. Run with `sudo` in case you encounter permission issues.
```bash
patch /opt/rocm/include/hip/amd_detail/amd_hip_bf16.h < rocm_patch/rocm6_0.patch
```
## Citation
If you use this codebase, or otherwise find our work valuable, please cite Mamba:
```
@article{mamba,
title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces},
author={Gu, Albert and Dao, Tri},
journal={arXiv preprint arXiv:2312.00752},
year={2023}
}
@inproceedings{mamba2,
title={Transformers are {SSM}s: Generalized Models and Efficient Algorithms Through Structured State Space Duality},
author={Dao, Tri and Gu, Albert},
booktitle={International Conference on Machine Learning (ICML)},
year={2024}
}
```
# Copyright (c) 2023, Tri Dao, Albert Gu.
import argparse
import time
import json
import torch
import torch.nn.functional as F
from einops import rearrange
from transformers import AutoTokenizer, AutoModelForCausalLM
from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel
parser = argparse.ArgumentParser(description="Generation benchmarking")
parser.add_argument("--model-name", type=str, default="state-spaces/mamba-130m")
parser.add_argument("--prompt", type=str, default=None)
parser.add_argument("--promptlen", type=int, default=100)
parser.add_argument("--genlen", type=int, default=100)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--topk", type=int, default=1)
parser.add_argument("--topp", type=float, default=1.0)
parser.add_argument("--minp", type=float, default=0.0)
parser.add_argument("--repetition-penalty", type=float, default=1.0)
parser.add_argument("--batch", type=int, default=1)
args = parser.parse_args()
repeats = 3
device = "cuda"
dtype = torch.float16
print(f"Loading model {args.model_name}")
is_mamba = args.model_name.startswith("state-spaces/mamba") or args.model_name.startswith("state-spaces/transformerpp")
if is_mamba:
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
model = MambaLMHeadModel.from_pretrained(args.model_name, device=device, dtype=dtype)
else:
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
model = AutoModelForCausalLM.from_pretrained(args.model_name, device_map={"": device}, torch_dtype=dtype)
model.eval()
print(f"Number of parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
torch.random.manual_seed(0)
if args.prompt is None:
input_ids = torch.randint(1, 1000, (args.batch, args.promptlen), dtype=torch.long, device="cuda")
attn_mask = torch.ones_like(input_ids, dtype=torch.long, device="cuda")
else:
tokens = tokenizer(args.prompt, return_tensors="pt")
input_ids = tokens.input_ids.to(device=device)
attn_mask = tokens.attention_mask.to(device=device)
max_length = input_ids.shape[1] + args.genlen
if is_mamba:
fn = lambda: model.generate(
input_ids=input_ids,
max_length=max_length,
cg=True,
return_dict_in_generate=True,
output_scores=True,
enable_timing=False,
temperature=args.temperature,
top_k=args.topk,
top_p=args.topp,
min_p=args.minp,
repetition_penalty=args.repetition_penalty,
)
else:
fn = lambda: model.generate(
input_ids=input_ids,
attention_mask=attn_mask,
max_length=max_length,
return_dict_in_generate=True,
pad_token_id=tokenizer.eos_token_id,
do_sample=True,
temperature=args.temperature,
top_k=args.topk,
top_p=args.topp,
repetition_penalty=args.repetition_penalty,
)
out = fn()
if args.prompt is not None:
print(tokenizer.batch_decode(out.sequences.tolist()))
torch.cuda.synchronize()
start = time.time()
for _ in range(repeats):
fn()
torch.cuda.synchronize()
print(f"Prompt length: {len(input_ids[0])}, generation length: {len(out.sequences[0]) - len(input_ids[0])}")
print(f"{args.model_name} prompt processing + decoding time: {(time.time() - start) / repeats * 1000:.0f}ms")
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#pragma once
#ifndef USE_ROCM
#include <cub/config.cuh>
#include <cub/util_ptx.cuh>
#include <cub/util_type.cuh>
#include <cub/block/block_raking_layout.cuh>
// #include <cub/detail/uninitialized_copy.cuh>
#else
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "uninitialized_copy.cuh"
/**
* Perform a reverse sequential reduction over \p LENGTH elements of the \p input array. The aggregate is returned.
*/
template <
int LENGTH,
typename T,
typename ReductionOp>
__device__ __forceinline__ T ThreadReverseReduce(const T (&input)[LENGTH], ReductionOp reduction_op) {
static_assert(LENGTH > 0);
T retval = input[LENGTH - 1];
#pragma unroll
for (int i = LENGTH - 2; i >= 0; --i) { retval = reduction_op(retval, input[i]); }
return retval;
}
/**
* Perform a sequential inclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned.
*/
template <
int LENGTH,
typename T,
typename ScanOp>
__device__ __forceinline__ T ThreadReverseScanInclusive(
const T (&input)[LENGTH],
T (&output)[LENGTH],
ScanOp scan_op,
const T postfix)
{
T inclusive = postfix;
#pragma unroll
for (int i = LENGTH - 1; i >= 0; --i) {
inclusive = scan_op(inclusive, input[i]);
output[i] = inclusive;
}
return inclusive;
}
/**
* Perform a sequential exclusive postfix reverse scan over the statically-sized \p input array, seeded with the specified \p postfix. The aggregate is returned.
*/
template <
int LENGTH,
typename T,
typename ScanOp>
__device__ __forceinline__ T ThreadReverseScanExclusive(
const T (&input)[LENGTH],
T (&output)[LENGTH],
ScanOp scan_op,
const T postfix)
{
// Careful, output maybe be aliased to input
T exclusive = postfix;
T inclusive;
#pragma unroll
for (int i = LENGTH - 1; i >= 0; --i) {
inclusive = scan_op(exclusive, input[i]);
output[i] = exclusive;
exclusive = inclusive;
}
return inclusive;
}
/**
* \brief WarpReverseScan provides SHFL-based variants of parallel postfix scan of items partitioned across a CUDA thread warp.
*
* LOGICAL_WARP_THREADS must be a power-of-two
*/
template <
typename T, ///< Data type being scanned
int LOGICAL_WARP_THREADS ///< Number of threads per logical warp
>
struct WarpReverseScan {
//---------------------------------------------------------------------
// Constants and type definitions
//---------------------------------------------------------------------
/// Whether the logical warp size and the PTX warp size coincide
// In hipcub, warp_threads is defined as HIPCUB_WARP_THREADS ::rocprim::warp_size()
// While in cub, it's defined as a macro that takes a redundant unused argument.
#ifndef USE_ROCM
#define WARP_THREADS CUB_WARP_THREADS(0)
#else
#define WARP_THREADS HIPCUB_WARP_THREADS
#endif
static constexpr bool IS_ARCH_WARP = (LOGICAL_WARP_THREADS == WARP_THREADS);
/// The number of warp scan steps
static constexpr int STEPS = cub::Log2<LOGICAL_WARP_THREADS>::VALUE;
static_assert(LOGICAL_WARP_THREADS == 1 << STEPS);
//---------------------------------------------------------------------
// Thread fields
//---------------------------------------------------------------------
/// Lane index in logical warp
unsigned int lane_id;
/// Logical warp index in 32-thread physical warp
unsigned int warp_id;
/// 32-thread physical warp member mask of logical warp
unsigned int member_mask;
//---------------------------------------------------------------------
// Construction
//---------------------------------------------------------------------
/// Constructor
explicit __device__ __forceinline__
WarpReverseScan()
: lane_id(cub::LaneId())
, warp_id(IS_ARCH_WARP ? 0 : (lane_id / LOGICAL_WARP_THREADS))
, member_mask(cub::WarpMask<LOGICAL_WARP_THREADS>(warp_id))
{
if (!IS_ARCH_WARP) {
lane_id = lane_id % LOGICAL_WARP_THREADS;
}
}
/// Broadcast
__device__ __forceinline__ T Broadcast(
T input, ///< [in] The value to broadcast
int src_lane) ///< [in] Which warp lane is to do the broadcasting
{
return cub::ShuffleIndex<LOGICAL_WARP_THREADS>(input, src_lane, member_mask);
}
/// Inclusive scan
template <typename ScanOpT>
__device__ __forceinline__ void InclusiveReverseScan(
T input, ///< [in] Calling thread's input item.
T &inclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOpT scan_op) ///< [in] Binary scan operator
{
inclusive_output = input;
#pragma unroll
for (int STEP = 0; STEP < STEPS; STEP++) {
int offset = 1 << STEP;
T temp = cub::ShuffleDown<LOGICAL_WARP_THREADS>(
inclusive_output, offset, LOGICAL_WARP_THREADS - 1, member_mask
);
// Perform scan op if from a valid peer
inclusive_output = static_cast<int>(lane_id) >= LOGICAL_WARP_THREADS - offset
? inclusive_output : scan_op(temp, inclusive_output);
}
}
/// Exclusive scan
// Get exclusive from inclusive
template <typename ScanOpT>
__device__ __forceinline__ void ExclusiveReverseScan(
T input, ///< [in] Calling thread's input item.
T &exclusive_output, ///< [out] Calling thread's output item. May be aliased with \p input.
ScanOpT scan_op, ///< [in] Binary scan operator
T &warp_aggregate) ///< [out] Warp-wide aggregate reduction of input items.
{
T inclusive_output;
InclusiveReverseScan(input, inclusive_output, scan_op);
warp_aggregate = cub::ShuffleIndex<LOGICAL_WARP_THREADS>(inclusive_output, 0, member_mask);
// initial value unknown
exclusive_output = cub::ShuffleDown<LOGICAL_WARP_THREADS>(
inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask
);
}
/**
* \brief Computes both inclusive and exclusive reverse scans using the specified binary scan functor across the calling warp. Because no initial value is supplied, the \p exclusive_output computed for the last <em>warp-lane</em> is undefined.
*/
template <typename ScanOpT>
__device__ __forceinline__ void ReverseScan(
T input, ///< [in] Calling thread's input item.
T &inclusive_output, ///< [out] Calling thread's inclusive-scan output item.
T &exclusive_output, ///< [out] Calling thread's exclusive-scan output item.
ScanOpT scan_op) ///< [in] Binary scan operator
{
InclusiveReverseScan(input, inclusive_output, scan_op);
// initial value unknown
exclusive_output = cub::ShuffleDown<LOGICAL_WARP_THREADS>(
inclusive_output, 1, LOGICAL_WARP_THREADS - 1, member_mask
);
}
};
/**
* \brief BlockReverseScan provides variants of raking-based parallel postfix scan across a CUDA thread block.
*/
template <
typename T, ///< Data type being scanned
int BLOCK_DIM_X, ///< The thread block length in threads along the X dimension
bool MEMOIZE=false ///< Whether or not to buffer outer raking scan partials to incur fewer shared memory reads at the expense of higher register pressure
>
struct BlockReverseScan {
//---------------------------------------------------------------------
// Types and constants
//---------------------------------------------------------------------
/// Constants
/// The thread block size in threads
static constexpr int BLOCK_THREADS = BLOCK_DIM_X;
/// Layout type for padded thread block raking grid
using BlockRakingLayout = cub::BlockRakingLayout<T, BLOCK_THREADS>;
// The number of reduction elements is not a multiple of the number of raking threads for now
static_assert(BlockRakingLayout::UNGUARDED);
/// Number of raking threads
static constexpr int RAKING_THREADS = BlockRakingLayout::RAKING_THREADS;
/// Number of raking elements per warp synchronous raking thread
static constexpr int SEGMENT_LENGTH = BlockRakingLayout::SEGMENT_LENGTH;
/// Cooperative work can be entirely warp synchronous
static constexpr bool WARP_SYNCHRONOUS = (int(BLOCK_THREADS) == int(RAKING_THREADS));
/// WarpReverseScan utility type
using WarpReverseScan = WarpReverseScan<T, RAKING_THREADS>;
/// Shared memory storage layout type
struct _TempStorage {
typename BlockRakingLayout::TempStorage raking_grid; ///< Padded thread block raking grid
};
/// Alias wrapper allowing storage to be unioned
struct TempStorage : cub::Uninitialized<_TempStorage> {};
//---------------------------------------------------------------------
// Per-thread fields
//---------------------------------------------------------------------
// Thread fields
_TempStorage &temp_storage;
unsigned int linear_tid;
T cached_segment[SEGMENT_LENGTH];
//---------------------------------------------------------------------
// Utility methods
//---------------------------------------------------------------------
/// Performs upsweep raking reduction, returning the aggregate
template <typename ScanOp>
__device__ __forceinline__ T Upsweep(ScanOp scan_op) {
T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid);
// Read data into registers
#pragma unroll
for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; }
T raking_partial = cached_segment[SEGMENT_LENGTH - 1];
#pragma unroll
for (int i = SEGMENT_LENGTH - 2; i >= 0; --i) {
raking_partial = scan_op(raking_partial, cached_segment[i]);
}
return raking_partial;
}
/// Performs exclusive downsweep raking scan
template <typename ScanOp>
__device__ __forceinline__ void ExclusiveDownsweep(
ScanOp scan_op,
T raking_partial)
{
T *smem_raking_ptr = BlockRakingLayout::RakingPtr(temp_storage.raking_grid, linear_tid);
// Read data back into registers
if (!MEMOIZE) {
#pragma unroll
for (int i = 0; i < SEGMENT_LENGTH; ++i) { cached_segment[i] = smem_raking_ptr[i]; }
}
ThreadReverseScanExclusive(cached_segment, cached_segment, scan_op, raking_partial);
// Write data back to smem
#pragma unroll
for (int i = 0; i < SEGMENT_LENGTH; ++i) { smem_raking_ptr[i] = cached_segment[i]; }
}
//---------------------------------------------------------------------
// Constructors
//---------------------------------------------------------------------
/// Constructor
__device__ __forceinline__ BlockReverseScan(
TempStorage &temp_storage)
:
temp_storage(temp_storage.Alias()),
linear_tid(cub::RowMajorTid(BLOCK_DIM_X, 1, 1))
{}
/// Computes an exclusive thread block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes one input element. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs.
template <
typename ScanOp,
typename BlockPostfixCallbackOp>
__device__ __forceinline__ void ExclusiveReverseScan(
T input, ///< [in] Calling thread's input item
T &exclusive_output, ///< [out] Calling thread's output item (may be aliased to \p input)
ScanOp scan_op, ///< [in] Binary scan operator
BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a thread block-wide postfix to be applied to all inputs.
{
if (WARP_SYNCHRONOUS) {
// Short-circuit directly to warp-synchronous scan
T block_aggregate;
WarpReverseScan warp_scan;
warp_scan.ExclusiveReverseScan(input, exclusive_output, scan_op, block_aggregate);
// Obtain warp-wide postfix in lane0, then broadcast to other lanes
T block_postfix = block_postfix_callback_op(block_aggregate);
block_postfix = warp_scan.Broadcast(block_postfix, 0);
exclusive_output = linear_tid == BLOCK_THREADS - 1 ? block_postfix : scan_op(block_postfix, exclusive_output);
} else {
// Place thread partial into shared memory raking grid
T *placement_ptr = BlockRakingLayout::PlacementPtr(temp_storage.raking_grid, linear_tid);
detail::uninitialized_copy(placement_ptr, input);
cub::CTA_SYNC();
// Reduce parallelism down to just raking threads
if (linear_tid < RAKING_THREADS) {
WarpReverseScan warp_scan;
// Raking upsweep reduction across shared partials
T upsweep_partial = Upsweep(scan_op);
// Warp-synchronous scan
T exclusive_partial, block_aggregate;
warp_scan.ExclusiveReverseScan(upsweep_partial, exclusive_partial, scan_op, block_aggregate);
// Obtain block-wide postfix in lane0, then broadcast to other lanes
T block_postfix = block_postfix_callback_op(block_aggregate);
block_postfix = warp_scan.Broadcast(block_postfix, 0);
// Update postfix with warpscan exclusive partial
T downsweep_postfix = linear_tid == RAKING_THREADS - 1
? block_postfix : scan_op(block_postfix, exclusive_partial);
// Exclusive raking downsweep scan
ExclusiveDownsweep(scan_op, downsweep_postfix);
}
cub::CTA_SYNC();
// Grab thread postfix from shared memory
exclusive_output = *placement_ptr;
// // Compute warp scan in each warp.
// // The exclusive output from the last lane in each warp is invalid.
// T inclusive_output;
// WarpReverseScan warp_scan;
// warp_scan.ReverseScan(input, inclusive_output, exclusive_output, scan_op);
// // Compute the warp-wide postfix and block-wide aggregate for each warp. Warp postfix for the last warp is invalid.
// T block_aggregate;
// T warp_postfix = ComputeWarpPostfix(scan_op, inclusive_output, block_aggregate);
// // Apply warp postfix to our lane's partial
// if (warp_id != 0) {
// exclusive_output = scan_op(warp_postfix, exclusive_output);
// if (lane_id == 0) { exclusive_output = warp_postfix; }
// }
// // Use the first warp to determine the thread block postfix, returning the result in lane0
// if (warp_id == 0) {
// T block_postfix = block_postfix_callback_op(block_aggregate);
// if (lane_id == 0) {
// // Share the postfix with all threads
// detail::uninitialized_copy(&temp_storage.block_postfix,
// block_postfix);
// exclusive_output = block_postfix; // The block postfix is the exclusive output for tid0
// }
// }
// cub::CTA_SYNC();
// // Incorporate thread block postfix into outputs
// T block_postfix = temp_storage.block_postfix;
// if (linear_tid > 0) { exclusive_output = scan_op(block_postfix, exclusive_output); }
}
}
/**
* \brief Computes an inclusive block-wide postfix scan using the specified binary \p scan_op functor. Each thread contributes an array of consecutive input elements. the call-back functor \p block_postfix_callback_op is invoked by the first warp in the block, and the value returned by <em>lane</em><sub>0</sub> in that warp is used as the "seed" value that logically postfixes the thread block's scan inputs. Also provides every thread with the block-wide \p block_aggregate of all inputs.
*/
template <
int ITEMS_PER_THREAD,
typename ScanOp,
typename BlockPostfixCallbackOp>
__device__ __forceinline__ void InclusiveReverseScan(
T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items
T (&output)[ITEMS_PER_THREAD], ///< [out] Calling thread's output items (may be aliased to \p input)
ScanOp scan_op, ///< [in] Binary scan functor
BlockPostfixCallbackOp &block_postfix_callback_op) ///< [in-out] <b>[<em>warp</em><sub>0</sub> only]</b> Call-back functor for specifying a block-wide postfix to be applied to the logical input sequence.
{
// Reduce consecutive thread items in registers
T thread_postfix = ThreadReverseReduce(input, scan_op);
// Exclusive thread block-scan
ExclusiveReverseScan(thread_postfix, thread_postfix, scan_op, block_postfix_callback_op);
// Inclusive scan in registers with postfix as seed
ThreadReverseScanInclusive(input, output, scan_op, thread_postfix);
}
};
\ No newline at end of file
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <torch/extension.h>
#include <vector>
#include "selective_scan.h"
#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")")
#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \
if (ITYPE == at::ScalarType::Half) { \
using input_t = at::Half; \
__VA_ARGS__(); \
} else if (ITYPE == at::ScalarType::BFloat16) { \
using input_t = at::BFloat16; \
__VA_ARGS__(); \
} else if (ITYPE == at::ScalarType::Float) { \
using input_t = float; \
__VA_ARGS__(); \
} else { \
AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \
}
#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \
if (WTYPE == at::ScalarType::Half) { \
using weight_t = at::Half; \
__VA_ARGS__(); \
} else if (WTYPE == at::ScalarType::BFloat16) { \
using weight_t = at::BFloat16; \
__VA_ARGS__(); \
} else if (WTYPE == at::ScalarType::Float) { \
using weight_t = float; \
__VA_ARGS__(); \
} else { \
AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \
}
#define DISPATCH_WTYPE_FLOAT_AND_COMPLEX(WTYPE, NAME, ...) \
if (WTYPE == at::ScalarType::Float) { \
using weight_t = float; \
__VA_ARGS__(); \
} else if (WTYPE == at::ScalarType::ComplexFloat) { \
using weight_t = c10::complex<float>; \
__VA_ARGS__(); \
} else { \
AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \
}
template<typename input_t, typename weight_t>
void selective_scan_fwd_cuda(SSMParamsBase &params, cudaStream_t stream);
template <typename input_t, typename weight_t>
void selective_scan_bwd_cuda(SSMParamsBwd &params, cudaStream_t stream);
void set_ssm_params_fwd(SSMParamsBase &params,
// sizes
const size_t batch,
const size_t dim,
const size_t seqlen,
const size_t dstate,
const size_t n_groups,
const size_t n_chunks,
const bool is_variable_B,
const bool is_variable_C,
// device pointers
const at::Tensor u,
const at::Tensor delta,
const at::Tensor A,
const at::Tensor B,
const at::Tensor C,
const at::Tensor out,
const at::Tensor z,
const at::Tensor out_z,
void* D_ptr,
void* delta_bias_ptr,
void* x_ptr,
bool has_z,
bool delta_softplus) {
// Reset the parameters
memset(&params, 0, sizeof(params));
params.batch = batch;
params.dim = dim;
params.seqlen = seqlen;
params.dstate = dstate;
params.n_groups = n_groups;
params.n_chunks = n_chunks;
params.dim_ngroups_ratio = dim / n_groups;
params.delta_softplus = delta_softplus;
params.is_variable_B = is_variable_B;
params.is_variable_C = is_variable_C;
// Set the pointers and strides.
params.u_ptr = u.data_ptr();
params.delta_ptr = delta.data_ptr();
params.A_ptr = A.data_ptr();
params.B_ptr = B.data_ptr();
params.C_ptr = C.data_ptr();
params.D_ptr = D_ptr;
params.delta_bias_ptr = delta_bias_ptr;
params.out_ptr = out.data_ptr();
params.x_ptr = x_ptr;
params.z_ptr = has_z ? z.data_ptr() : nullptr;
params.out_z_ptr = has_z ? out_z.data_ptr() : nullptr;
// All stride are in elements, not bytes.
params.A_d_stride = A.stride(0);
params.A_dstate_stride = A.stride(1);
if (!is_variable_B) {
params.B_d_stride = B.stride(0);
} else {
params.B_batch_stride = B.stride(0);
params.B_group_stride = B.stride(1);
}
params.B_dstate_stride = !is_variable_B ? B.stride(1) : B.stride(2);
if (!is_variable_C) {
params.C_d_stride = C.stride(0);
} else {
params.C_batch_stride = C.stride(0);
params.C_group_stride = C.stride(1);
}
params.C_dstate_stride = !is_variable_C ? C.stride(1) : C.stride(2);
params.u_batch_stride = u.stride(0);
params.u_d_stride = u.stride(1);
params.delta_batch_stride = delta.stride(0);
params.delta_d_stride = delta.stride(1);
if (has_z) {
params.z_batch_stride = z.stride(0);
params.z_d_stride = z.stride(1);
params.out_z_batch_stride = out_z.stride(0);
params.out_z_d_stride = out_z.stride(1);
}
params.out_batch_stride = out.stride(0);
params.out_d_stride = out.stride(1);
}
void set_ssm_params_bwd(SSMParamsBwd &params,
// sizes
const size_t batch,
const size_t dim,
const size_t seqlen,
const size_t dstate,
const size_t n_groups,
const size_t n_chunks,
const bool is_variable_B,
const bool is_variable_C,
// device pointers
const at::Tensor u,
const at::Tensor delta,
const at::Tensor A,
const at::Tensor B,
const at::Tensor C,
const at::Tensor z,
const at::Tensor out,
const at::Tensor out_z,
void* D_ptr,
void* delta_bias_ptr,
void* x_ptr,
const at::Tensor dout,
const at::Tensor du,
const at::Tensor ddelta,
const at::Tensor dA,
const at::Tensor dB,
const at::Tensor dC,
const at::Tensor dz,
void* dD_ptr,
void* ddelta_bias_ptr,
bool has_z,
bool delta_softplus,
bool recompute_out_z) {
// Pass in "dout" instead of "out", we're not gonna use "out" unless we have z
set_ssm_params_fwd(params, batch, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C,
u, delta, A, B, C, has_z ? out : dout,
has_z ? z : dout,
// If not recompute_out_z, pass dout instead of out_z.
// This won't be used by the bwd kernel
recompute_out_z ? out_z : dout,
D_ptr, delta_bias_ptr, x_ptr, has_z, delta_softplus);
if (!recompute_out_z) { params.out_z_ptr = nullptr; }
// Set the pointers and strides.
params.dout_ptr = dout.data_ptr();
params.du_ptr = du.data_ptr();
params.dA_ptr = dA.data_ptr();
params.dB_ptr = dB.data_ptr();
params.dC_ptr = dC.data_ptr();
params.dD_ptr = dD_ptr;
params.ddelta_ptr = ddelta.data_ptr();
params.ddelta_bias_ptr = ddelta_bias_ptr;
params.dz_ptr = has_z ? dz.data_ptr() : nullptr;
// All stride are in elements, not bytes.
params.dout_batch_stride = dout.stride(0);
params.dout_d_stride = dout.stride(1);
params.dA_d_stride = dA.stride(0);
params.dA_dstate_stride = dA.stride(1);
if (!is_variable_B) {
params.dB_d_stride = dB.stride(0);
} else {
params.dB_batch_stride = dB.stride(0);
params.dB_group_stride = dB.stride(1);
}
params.dB_dstate_stride = !is_variable_B ? dB.stride(1) : dB.stride(2);
if (!is_variable_C) {
params.dC_d_stride = dC.stride(0);
} else {
params.dC_batch_stride = dC.stride(0);
params.dC_group_stride = dC.stride(1);
}
params.dC_dstate_stride = !is_variable_C ? dC.stride(1) : dC.stride(2);
params.du_batch_stride = du.stride(0);
params.du_d_stride = du.stride(1);
params.ddelta_batch_stride = ddelta.stride(0);
params.ddelta_d_stride = ddelta.stride(1);
if (has_z) {
params.dz_batch_stride = dz.stride(0);
params.dz_d_stride = dz.stride(1);
}
}
std::vector<at::Tensor>
selective_scan_fwd(const at::Tensor &u, const at::Tensor &delta,
const at::Tensor &A, const at::Tensor &B, const at::Tensor &C,
const c10::optional<at::Tensor> &D_,
const c10::optional<at::Tensor> &z_,
const c10::optional<at::Tensor> &delta_bias_,
bool delta_softplus) {
auto input_type = u.scalar_type();
auto weight_type = A.scalar_type();
TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat);
const bool is_variable_B = B.dim() >= 3;
const bool is_variable_C = C.dim() >= 3;
const bool is_complex = weight_type == at::ScalarType::ComplexFloat;
TORCH_CHECK(delta.scalar_type() == input_type);
TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type));
TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type));
TORCH_CHECK(u.is_cuda());
TORCH_CHECK(delta.is_cuda());
TORCH_CHECK(A.is_cuda());
TORCH_CHECK(B.is_cuda());
TORCH_CHECK(C.is_cuda());
TORCH_CHECK(u.stride(-1) == 1 || u.size(-1) == 1);
TORCH_CHECK(delta.stride(-1) == 1 || delta.size(-1) == 1);
const auto sizes = u.sizes();
const int batch_size = sizes[0];
const int dim = sizes[1];
const int seqlen = sizes[2];
const int dstate = A.size(1);
const int n_groups = is_variable_B ? B.size(1) : 1;
TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256");
CHECK_SHAPE(u, batch_size, dim, seqlen);
CHECK_SHAPE(delta, batch_size, dim, seqlen);
CHECK_SHAPE(A, dim, dstate);
if (!is_variable_B) {
CHECK_SHAPE(B, dim, dstate);
} else {
CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2);
TORCH_CHECK(B.stride(-1) == 1 || B.size(-1) == 1);
}
if (!is_variable_C) {
CHECK_SHAPE(C, dim, dstate);
} else {
CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2);
TORCH_CHECK(C.stride(-1) == 1 || C.size(-1) == 1);
}
if (D_.has_value()) {
auto D = D_.value();
TORCH_CHECK(D.scalar_type() == at::ScalarType::Float);
TORCH_CHECK(D.is_cuda());
TORCH_CHECK(D.stride(-1) == 1 || D.size(-1) == 1);
CHECK_SHAPE(D, dim);
}
if (delta_bias_.has_value()) {
auto delta_bias = delta_bias_.value();
TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float);
TORCH_CHECK(delta_bias.is_cuda());
TORCH_CHECK(delta_bias.stride(-1) == 1 || delta_bias.size(-1) == 1);
CHECK_SHAPE(delta_bias, dim);
}
at::Tensor z, out_z;
const bool has_z = z_.has_value();
if (has_z) {
z = z_.value();
TORCH_CHECK(z.scalar_type() == input_type);
TORCH_CHECK(z.is_cuda());
TORCH_CHECK(z.stride(-1) == 1 || z.size(-1) == 1);
CHECK_SHAPE(z, batch_size, dim, seqlen);
out_z = torch::empty_like(z);
}
const int n_chunks = (seqlen + 2048 - 1) / 2048;
// const int n_chunks = (seqlen + 1024 - 1) / 1024;
// at::Tensor out = torch::empty_like(u);
// Right now u has BHL layout and delta has HBL layout, and we want out to have HBL layout
at::Tensor out = torch::empty_like(delta);
at::Tensor x;
x = torch::empty({batch_size, dim, n_chunks, dstate * 2}, u.options().dtype(weight_type));
SSMParamsBase params;
set_ssm_params_fwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C,
u, delta, A, B, C, out, z, out_z,
D_.has_value() ? D_.value().data_ptr() : nullptr,
delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr,
x.data_ptr(),
has_z,
delta_softplus);
// Otherwise the kernel will be launched from cuda:0 device
// Cast to char to avoid compiler warning about narrowing
at::cuda::CUDAGuard device_guard{(char)u.get_device()};
auto stream = at::cuda::getCurrentCUDAStream().stream();
DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_fwd", [&] {
DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_fwd", [&] {
selective_scan_fwd_cuda<input_t, weight_t>(params, stream);
});
});
std::vector<at::Tensor> result = {out, x};
if (has_z) { result.push_back(out_z); }
return result;
}
std::vector<at::Tensor>
selective_scan_bwd(const at::Tensor &u, const at::Tensor &delta,
const at::Tensor &A, const at::Tensor &B, const at::Tensor &C,
const c10::optional<at::Tensor> &D_,
const c10::optional<at::Tensor> &z_,
const c10::optional<at::Tensor> &delta_bias_,
const at::Tensor &dout,
const c10::optional<at::Tensor> &x_,
const c10::optional<at::Tensor> &out_,
c10::optional<at::Tensor> &dz_,
bool delta_softplus,
bool recompute_out_z) {
auto input_type = u.scalar_type();
auto weight_type = A.scalar_type();
TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16);
TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat);
const bool is_variable_B = B.dim() >= 3;
const bool is_variable_C = C.dim() >= 3;
const bool is_complex = weight_type == at::ScalarType::ComplexFloat;
TORCH_CHECK(delta.scalar_type() == input_type);
TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type));
TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type));
TORCH_CHECK(dout.scalar_type() == input_type);
TORCH_CHECK(u.is_cuda());
TORCH_CHECK(delta.is_cuda());
TORCH_CHECK(A.is_cuda());
TORCH_CHECK(B.is_cuda());
TORCH_CHECK(C.is_cuda());
TORCH_CHECK(dout.is_cuda());
TORCH_CHECK(u.stride(-1) == 1 || u.size(-1) == 1);
TORCH_CHECK(delta.stride(-1) == 1 || delta.size(-1) == 1);
TORCH_CHECK(dout.stride(-1) == 1 || dout.size(-1) == 1);
const auto sizes = u.sizes();
const int batch_size = sizes[0];
const int dim = sizes[1];
const int seqlen = sizes[2];
const int dstate = A.size(1);
const int n_groups = is_variable_B ? B.size(1) : 1;
TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256");
CHECK_SHAPE(u, batch_size, dim, seqlen);
CHECK_SHAPE(delta, batch_size, dim, seqlen);
CHECK_SHAPE(A, dim, dstate);
if (!is_variable_B) {
CHECK_SHAPE(B, dim, dstate);
} else {
CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2);
TORCH_CHECK(B.stride(-1) == 1 || B.size(-1) == 1);
}
if (!is_variable_C) {
CHECK_SHAPE(C, dim, dstate);
} else {
CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2);
TORCH_CHECK(C.stride(-1) == 1 || C.size(-1) == 1);
}
CHECK_SHAPE(dout, batch_size, dim, seqlen);
if (D_.has_value()) {
auto D = D_.value();
TORCH_CHECK(D.scalar_type() == at::ScalarType::Float);
TORCH_CHECK(D.is_cuda());
TORCH_CHECK(D.stride(-1) == 1 || D.size(-1) == 1);
CHECK_SHAPE(D, dim);
}
if (delta_bias_.has_value()) {
auto delta_bias = delta_bias_.value();
TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float);
TORCH_CHECK(delta_bias.is_cuda());
TORCH_CHECK(delta_bias.stride(-1) == 1 || delta_bias.size(-1) == 1);
CHECK_SHAPE(delta_bias, dim);
}
at::Tensor z, out, dz, out_z;
const bool has_z = z_.has_value();
if (has_z) {
z = z_.value();
TORCH_CHECK(z.scalar_type() == input_type);
TORCH_CHECK(z.is_cuda());
TORCH_CHECK(z.stride(-1) == 1 || z.size(-1) == 1);
CHECK_SHAPE(z, batch_size, dim, seqlen);
TORCH_CHECK(out_.has_value());
out = out_.value();
TORCH_CHECK(out.scalar_type() == input_type);
TORCH_CHECK(out.is_cuda());
TORCH_CHECK(out.stride(-1) == 1 || out.size(-1) == 1);
CHECK_SHAPE(out, batch_size, dim, seqlen);
if (dz_.has_value()) {
dz = dz_.value();
TORCH_CHECK(dz.scalar_type() == input_type);
TORCH_CHECK(dz.is_cuda());
TORCH_CHECK(dz.stride(-1) == 1 || dz.size(-1) == 1);
CHECK_SHAPE(dz, batch_size, dim, seqlen);
} else {
dz = torch::empty_like(z);
}
if (recompute_out_z) {
out_z = torch::empty_like(out);
}
}
const int n_chunks = (seqlen + 2048 - 1) / 2048;
// const int n_chunks = (seqlen + 1024 - 1) / 1024;
if (n_chunks > 1) { TORCH_CHECK(x_.has_value()); }
if (x_.has_value()) {
auto x = x_.value();
TORCH_CHECK(x.scalar_type() == weight_type);
TORCH_CHECK(x.is_cuda());
TORCH_CHECK(x.is_contiguous());
CHECK_SHAPE(x, batch_size, dim, n_chunks, 2 * dstate);
}
at::Tensor du = torch::empty_like(u);
at::Tensor ddelta = torch::empty_like(delta);
at::Tensor dA = torch::zeros_like(A);
at::Tensor dB = !is_variable_B ? torch::zeros_like(B) : torch::zeros_like(B, B.options().dtype(torch::kFloat32));
at::Tensor dC = !is_variable_C ? torch::zeros_like(C) : torch::zeros_like(C, C.options().dtype(torch::kFloat32));
at::Tensor dD;
if (D_.has_value()) { dD = torch::zeros_like(D_.value()); }
at::Tensor ddelta_bias;
if (delta_bias_.has_value()) { ddelta_bias = torch::zeros_like(delta_bias_.value()); }
SSMParamsBwd params;
set_ssm_params_bwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C,
u, delta, A, B, C, z, out, out_z,
D_.has_value() ? D_.value().data_ptr() : nullptr,
delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr,
x_.has_value() ? x_.value().data_ptr() : nullptr,
dout, du, ddelta, dA, dB, dC, dz,
D_.has_value() ? dD.data_ptr() : nullptr,
delta_bias_.has_value() ? ddelta_bias.data_ptr() : nullptr,
has_z, delta_softplus, recompute_out_z);
// Otherwise the kernel will be launched from cuda:0 device
// Cast to char to avoid compiler warning about narrowing
at::cuda::CUDAGuard device_guard{(char)u.get_device()};
auto stream = at::cuda::getCurrentCUDAStream().stream();
DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_bwd", [&] {
DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_bwd", [&] {
selective_scan_bwd_cuda<input_t, weight_t>(params, stream);
});
});
std::vector<at::Tensor> result = {du, ddelta, dA, dB.to(B.dtype()), dC.to(C.dtype()), dD, ddelta_bias};
if (has_z) { result.push_back(dz); }
if (recompute_out_z) { result.push_back(out_z); }
return result;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("fwd", &selective_scan_fwd, "Selective scan forward");
m.def("bwd", &selective_scan_bwd, "Selective scan backward");
}
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#pragma once
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SSMScanParamsBase {
using index_t = uint32_t;
int batch, seqlen, n_chunks;
index_t a_batch_stride;
index_t b_batch_stride;
index_t out_batch_stride;
// Common data pointers.
void *__restrict__ a_ptr;
void *__restrict__ b_ptr;
void *__restrict__ out_ptr;
void *__restrict__ x_ptr;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
struct SSMParamsBase {
using index_t = uint32_t;
int batch, dim, seqlen, dstate, n_groups, n_chunks;
int dim_ngroups_ratio;
bool is_variable_B;
bool is_variable_C;
bool delta_softplus;
index_t A_d_stride;
index_t A_dstate_stride;
index_t B_batch_stride;
index_t B_d_stride;
index_t B_dstate_stride;
index_t B_group_stride;
index_t C_batch_stride;
index_t C_d_stride;
index_t C_dstate_stride;
index_t C_group_stride;
index_t u_batch_stride;
index_t u_d_stride;
index_t delta_batch_stride;
index_t delta_d_stride;
index_t z_batch_stride;
index_t z_d_stride;
index_t out_batch_stride;
index_t out_d_stride;
index_t out_z_batch_stride;
index_t out_z_d_stride;
// Common data pointers.
void *__restrict__ A_ptr;
void *__restrict__ B_ptr;
void *__restrict__ C_ptr;
void *__restrict__ D_ptr;
void *__restrict__ u_ptr;
void *__restrict__ delta_ptr;
void *__restrict__ delta_bias_ptr;
void *__restrict__ out_ptr;
void *__restrict__ x_ptr;
void *__restrict__ z_ptr;
void *__restrict__ out_z_ptr;
};
struct SSMParamsBwd: public SSMParamsBase {
index_t dout_batch_stride;
index_t dout_d_stride;
index_t dA_d_stride;
index_t dA_dstate_stride;
index_t dB_batch_stride;
index_t dB_group_stride;
index_t dB_d_stride;
index_t dB_dstate_stride;
index_t dC_batch_stride;
index_t dC_group_stride;
index_t dC_d_stride;
index_t dC_dstate_stride;
index_t du_batch_stride;
index_t du_d_stride;
index_t dz_batch_stride;
index_t dz_d_stride;
index_t ddelta_batch_stride;
index_t ddelta_d_stride;
// Common data pointers.
void *__restrict__ dout_ptr;
void *__restrict__ dA_ptr;
void *__restrict__ dB_ptr;
void *__restrict__ dC_ptr;
void *__restrict__ dD_ptr;
void *__restrict__ du_ptr;
void *__restrict__ dz_ptr;
void *__restrict__ ddelta_ptr;
void *__restrict__ ddelta_bias_ptr;
};
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
// Split into multiple files to compile in paralell
#include "selective_scan_bwd_kernel.cuh"
template void selective_scan_bwd_cuda<at::BFloat16, complex_t>(SSMParamsBwd &params, cudaStream_t stream);
\ No newline at end of file
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
// Split into multiple files to compile in paralell
#include "selective_scan_bwd_kernel.cuh"
template void selective_scan_bwd_cuda<at::BFloat16, float>(SSMParamsBwd &params, cudaStream_t stream);
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment