Unverified Commit 57e50f8d authored by Muyang Li's avatar Muyang Li Committed by GitHub
Browse files

style: upgrade the linter (#339)

* style: reformated codes

* style: reformated codes
parent b737368d
......@@ -8,11 +8,9 @@ import numpy as np
import torch
from diffusers import FluxPipeline
from diffusers.image_processor import PipelineImageInput
from diffusers.pipelines.flux.pipeline_flux import calculate_shift, EXAMPLE_DOC_STRING, retrieve_timesteps
from diffusers.pipelines.flux.pipeline_flux import EXAMPLE_DOC_STRING, calculate_shift, retrieve_timesteps
from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
from diffusers.utils import (
replace_example_docstring,
)
from diffusers.utils import replace_example_docstring
from facexlib.parsing import init_parsing_model
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
from huggingface_hub import hf_hub_download, snapshot_download
......
[build-system]
requires = [
"setuptools",
"torch>=2.5",
"wheel",
"ninja",
]
build-backend = "setuptools.build_meta"
[tool.isort]
profile = "black"
known_first_party = ["nunchaku"]
line_length = 120
[tool.setuptools.packages.find]
include = ["nunchaku"]
[tool.black]
line-length = 120
target-version = ['py311']
[tool.ruff]
line-length = 140
[tool.ruff.lint]
select = ["E", "W", "F"]
ignore = ["F401"]
line-length = 120
[project]
dynamic = ["version"]
......@@ -29,3 +22,15 @@ dependencies = [
"huggingface_hub",
]
requires-python = ">=3.10"
[build-system]
requires = [
"setuptools",
"torch>=2.5",
"wheel",
"ninja",
]
build-backend = "setuptools.build_meta"
[tool.setuptools.packages.find]
include = ["nunchaku"]
......@@ -6,7 +6,7 @@ import sys
import setuptools
import torch
from packaging import version as packaging_version
from torch.utils.cpp_extension import BuildExtension, CUDA_HOME, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension
class CustomBuildExtension(BuildExtension):
......
......@@ -13,12 +13,9 @@
using spdlog::fmt_lib::format;
using namespace nunchaku;
Tensor forward_mlp(GEMM_W4A4 &fc1, GEMM_W4A4 &fc2, Tensor norm_hidden_states) {
Tensor ff_output = fc2.forward_quant(
std::get<GEMM_W4A4::QuantizedActivation>(fc1.forward(norm_hidden_states, GEMM_W4A4::FuseOptions::GELU_QUANT, &fc2))
);
Tensor ff_output = fc2.forward_quant(std::get<GEMM_W4A4::QuantizedActivation>(
fc1.forward(norm_hidden_states, GEMM_W4A4::FuseOptions::GELU_QUANT, &fc2)));
return ff_output;
}
......@@ -27,7 +24,6 @@ Tensor forward_mlp(GEMM_W4A4 &fc1, GEMM_W4A4 &fc2, Tensor norm_hidden_states) {
// return ff_output;
// }
Tensor forward_fc(GEMM_W4A4 &fc, Tensor x) {
return fc.forward(x);
// return std::get<Tensor>(fc.forward(x));
......@@ -37,16 +33,9 @@ Tensor forward_fc(GEMM_W4A4 &fc, Tensor x) {
// return fc.forward(x);
// }
AdaLayerNormZeroSingle::AdaLayerNormZeroSingle(int dim, Tensor::ScalarType dtype, Device device) :
dim(dim),
linear(dim, 3 * dim, true, dtype, device),
norm(dim, 1e-6, false, dtype, device)
{
registerChildren
(linear, "linear")
(norm, "norm")
;
AdaLayerNormZeroSingle::AdaLayerNormZeroSingle(int dim, Tensor::ScalarType dtype, Device device)
: dim(dim), linear(dim, 3 * dim, true, dtype, device), norm(dim, 1e-6, false, dtype, device) {
registerChildren(linear, "linear")(norm, "norm");
}
AdaLayerNormZeroSingle::Output AdaLayerNormZeroSingle::forward(Tensor x, Tensor emb) {
......@@ -66,15 +55,10 @@ AdaLayerNormZeroSingle::Output AdaLayerNormZeroSingle::forward(Tensor x, Tensor
return Output{norm_x, gate_msa};
}
AdaLayerNormZero::AdaLayerNormZero(int dim, bool pre_only, Tensor::ScalarType dtype, Device device) :
dim(dim), pre_only(pre_only),
linear(dim, pre_only ? 2 * dim : 6 * dim, true, dtype, device),
norm(dim, 1e-6, false, dtype, device)
{
registerChildren
(linear, "linear")
(norm, "norm")
;
AdaLayerNormZero::AdaLayerNormZero(int dim, bool pre_only, Tensor::ScalarType dtype, Device device)
: dim(dim), pre_only(pre_only), linear(dim, pre_only ? 2 * dim : 6 * dim, true, dtype, device),
norm(dim, 1e-6, false, dtype, device) {
registerChildren(linear, "linear")(norm, "norm");
}
AdaLayerNormZero::Output AdaLayerNormZero::forward(Tensor x, Tensor emb) {
......@@ -111,10 +95,8 @@ AdaLayerNormZero::Output AdaLayerNormZero::forward(Tensor x, Tensor emb) {
}
}
Attention::Attention(int num_heads, int dim_head, Device device) :
num_heads(num_heads), dim_head(dim_head), force_fp16(false)
{
Attention::Attention(int num_heads, int dim_head, Device device)
: num_heads(num_heads), dim_head(dim_head), force_fp16(false) {
headmask_type = Tensor::allocate({num_heads}, Tensor::INT32, Device::cpu());
for (int i = 0; i < num_heads; i++) {
headmask_type.data_ptr<int32_t>()[i] = i + 1;
......@@ -135,11 +117,7 @@ Tensor Attention::forward(Tensor qkv) {
Tensor k = reshaped.slice(2, num_heads, num_heads * 2);
Tensor v = reshaped.slice(2, num_heads * 2, num_heads * 3);
Tensor raw_attn_output = mha_fwd(q, k, v,
0.0f,
pow(q.shape[-1], (-0.5)),
false, -1, -1, false
).front();
Tensor raw_attn_output = mha_fwd(q, k, v, 0.0f, pow(q.shape[-1], (-0.5)), false, -1, -1, false).front();
assert(raw_attn_output.shape[0] == batch_size);
assert(raw_attn_output.shape[1] == num_tokens);
......@@ -176,9 +154,9 @@ Tensor Attention::forward(Tensor qkv, Tensor pool_qkv, float sparsityRatio) {
pool_qkv = pool_qkv.view({batch_size, pool_tokens, 3, num_heads, dim_head});
pool_qkv = pool_qkv.transpose(1, 2).transpose(2, 3); // [batch_size, 3, num_heads, poolTokens, dim_head]
for (int i = 0; i < batch_size; i++) {
Tensor pool_q = pool_qkv.slice(0, i, i+1).slice(1, 0, 1);
Tensor pool_k = pool_qkv.slice(0, i, i+1).slice(1, 1, 2);
Tensor pool_s = pool_score.slice(0, i, i+1);
Tensor pool_q = pool_qkv.slice(0, i, i + 1).slice(1, 0, 1);
Tensor pool_k = pool_qkv.slice(0, i, i + 1).slice(1, 1, 2);
Tensor pool_s = pool_score.slice(0, i, i + 1);
gemm_batched_fp16(pool_q, pool_k, pool_s);
}
}
......@@ -222,10 +200,13 @@ Tensor Attention::forward(Tensor qkv, Tensor pool_qkv, float sparsityRatio) {
spdlog::debug("q,k,v={}", q.shape.str());
Tensor raw_attn_output = mha_fwd_block(
q, k, v,
cu_seqlens, cu_seqlens,
POOL_SIZE, POOL_SIZE,
Tensor raw_attn_output = mha_fwd_block(q,
k,
v,
cu_seqlens,
cu_seqlens,
POOL_SIZE,
POOL_SIZE,
headmask_type,
{},
blockmask,
......@@ -233,8 +214,12 @@ Tensor Attention::forward(Tensor qkv, Tensor pool_qkv, float sparsityRatio) {
num_tokens,
0.0f,
pow(q.shape[-1], (-0.5)),
false, false, false, -1, -1
).front();
false,
false,
false,
-1,
-1)
.front();
debug("raw_attn_output", raw_attn_output);
......@@ -291,30 +276,22 @@ void Attention::setForceFP16(Module *module, bool value) {
});
}
FluxSingleTransformerBlock::FluxSingleTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, int mlp_ratio, bool use_fp4, Tensor::ScalarType dtype, Device device) :
dim(dim),
dim_head(attention_head_dim / num_attention_heads),
num_heads(num_attention_heads),
mlp_hidden_dim(dim * mlp_ratio),
norm(dim, dtype, device),
FluxSingleTransformerBlock::FluxSingleTransformerBlock(int dim,
int num_attention_heads,
int attention_head_dim,
int mlp_ratio,
bool use_fp4,
Tensor::ScalarType dtype,
Device device)
: dim(dim), dim_head(attention_head_dim / num_attention_heads), num_heads(num_attention_heads),
mlp_hidden_dim(dim * mlp_ratio), norm(dim, dtype, device),
mlp_fc1(dim, mlp_hidden_dim, true, use_fp4, dtype, device),
mlp_fc2(mlp_hidden_dim, dim, true, use_fp4, dtype, device),
qkv_proj(dim, dim * 3, true, use_fp4, dtype, device),
norm_q(dim_head, 1e-6, false, dtype, device),
norm_k(dim_head, 1e-6, false, dtype, device),
mlp_fc2(mlp_hidden_dim, dim, true, use_fp4, dtype, device), qkv_proj(dim, dim * 3, true, use_fp4, dtype, device),
norm_q(dim_head, 1e-6, false, dtype, device), norm_k(dim_head, 1e-6, false, dtype, device),
attn(num_attention_heads, attention_head_dim / num_attention_heads, device),
out_proj(dim, dim, true, use_fp4, dtype, device)
{
registerChildren
(norm, "norm")
(mlp_fc1, "mlp_fc1")
(mlp_fc2, "mlp_fc2")
(qkv_proj, "qkv_proj")
(norm_q, "norm_q")
(norm_k, "norm_k")
(attn, "attn")
(out_proj, "out_proj")
;
out_proj(dim, dim, true, use_fp4, dtype, device) {
registerChildren(norm, "norm")(mlp_fc1, "mlp_fc1")(mlp_fc2, "mlp_fc2")(qkv_proj, "qkv_proj")(norm_q, "norm_q")(
norm_k, "norm_k")(attn, "attn")(out_proj, "out_proj");
}
Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Tensor rotary_emb) {
......@@ -335,12 +312,18 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
debug("rotary_emb", rotary_emb);
if (attnImpl == AttentionImpl::FlashAttention2) {
Tensor qkv = Tensor::allocate({batch_size, num_tokens, dim * 3}, norm_hidden_states.scalar_type(), norm_hidden_states.device());
Tensor qkv = Tensor::allocate(
{batch_size, num_tokens, dim * 3}, norm_hidden_states.scalar_type(), norm_hidden_states.device());
// qkv_proj.forward(norm_hidden_states, qkv, {});
// debug("qkv_raw", qkv);
for (int i = 0; i < batch_size; i++) {
qkv_proj.forward(norm_hidden_states.slice(0, i, i+1), qkv.slice(0, i, i+1), {}, norm_q.weight, norm_k.weight, rotary_emb);
qkv_proj.forward(norm_hidden_states.slice(0, i, i + 1),
qkv.slice(0, i, i + 1),
{},
norm_q.weight,
norm_k.weight,
rotary_emb);
}
debug("qkv", qkv);
// Tensor qkv = forward_fc(qkv_proj, norm_hidden_states);
......@@ -353,16 +336,23 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
const int num_tokens_pad = ceilDiv(num_tokens, 256) * 256;
Tensor q = Tensor::allocate({batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
Tensor k = Tensor::allocate({batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
Tensor v = Tensor::allocate({batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
Tensor q = Tensor::allocate(
{batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
Tensor k = Tensor::allocate(
{batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
Tensor v = Tensor::allocate(
{batch_size, num_heads, num_tokens_pad, dim_head}, Tensor::FP16, norm_hidden_states.device());
for (int i = 0; i < batch_size; i++) {
qkv_proj.forward(
norm_hidden_states.slice(0, i, i+1), {}, {}, norm_q.weight, norm_k.weight, rotary_emb,
q.slice(0, i, i+1),
k.slice(0, i, i+1),
v.slice(0, i, i+1),
qkv_proj.forward(norm_hidden_states.slice(0, i, i + 1),
{},
{},
norm_q.weight,
norm_k.weight,
rotary_emb,
q.slice(0, i, i + 1),
k.slice(0, i, i + 1),
v.slice(0, i, i + 1),
num_tokens);
}
......@@ -370,7 +360,9 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
debug("packed_k", k);
debug("packed_v", v);
Tensor o = Tensor::allocate({batch_size, num_tokens_pad, num_heads * dim_head}, norm_hidden_states.scalar_type(), norm_hidden_states.device());
Tensor o = Tensor::allocate({batch_size, num_tokens_pad, num_heads * dim_head},
norm_hidden_states.scalar_type(),
norm_hidden_states.device());
kernels::attention_fp16(q, k, v, o, pow(dim_head, (-0.5)));
......@@ -378,16 +370,14 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
attn_output = o.slice(1, 0, num_tokens);
} else {
attn_output = Tensor::allocate({batch_size, num_tokens, num_heads * dim_head}, o.scalar_type(), o.device());
checkCUDA(cudaMemcpy2DAsync(
attn_output.data_ptr(),
checkCUDA(cudaMemcpy2DAsync(attn_output.data_ptr(),
attn_output.stride(0) * attn_output.scalar_size(),
o.data_ptr(),
o.stride(0) * o.scalar_size(),
attn_output.stride(0) * attn_output.scalar_size(),
batch_size,
cudaMemcpyDeviceToDevice,
getCurrentCUDAStream()
));
getCurrentCUDAStream()));
}
} else {
assert(false);
......@@ -395,8 +385,6 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
debug("raw_attn_output", attn_output);
attn_output = forward_fc(out_proj, attn_output);
debug("attn_output", attn_output);
......@@ -414,54 +402,40 @@ Tensor FluxSingleTransformerBlock::forward(Tensor hidden_states, Tensor temb, Te
return hidden_states;
}
JointTransformerBlock::JointTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, bool context_pre_only, bool use_fp4, Tensor::ScalarType dtype, Device device) :
dim(dim),
dim_head(attention_head_dim / num_attention_heads),
num_heads(num_attention_heads),
context_pre_only(context_pre_only),
norm1(dim, false, dtype, device),
norm1_context(dim, context_pre_only, dtype, device),
qkv_proj(dim, dim * 3, true, use_fp4, dtype, device),
qkv_proj_context(dim, dim * 3, true, use_fp4, dtype, device),
norm_q(dim_head, 1e-6, false, dtype, device),
norm_k(dim_head, 1e-6, false, dtype, device),
norm_added_q(dim_head, 1e-6, false, dtype, device),
JointTransformerBlock::JointTransformerBlock(int dim,
int num_attention_heads,
int attention_head_dim,
bool context_pre_only,
bool use_fp4,
Tensor::ScalarType dtype,
Device device)
: dim(dim), dim_head(attention_head_dim / num_attention_heads), num_heads(num_attention_heads),
context_pre_only(context_pre_only), norm1(dim, false, dtype, device),
norm1_context(dim, context_pre_only, dtype, device), qkv_proj(dim, dim * 3, true, use_fp4, dtype, device),
qkv_proj_context(dim, dim * 3, true, use_fp4, dtype, device), norm_q(dim_head, 1e-6, false, dtype, device),
norm_k(dim_head, 1e-6, false, dtype, device), norm_added_q(dim_head, 1e-6, false, dtype, device),
norm_added_k(dim_head, 1e-6, false, dtype, device),
attn(num_attention_heads, attention_head_dim / num_attention_heads, device),
out_proj(dim, dim, true, use_fp4, dtype, device),
out_proj_context(dim, dim, true, use_fp4, dtype, device),
norm2(dim, 1e-6, false, dtype, device),
norm2_context(dim, 1e-6, false, dtype, device),
mlp_fc1(dim, dim * 4, true, use_fp4, dtype, device),
mlp_fc2(dim * 4, dim, true, use_fp4, dtype, device),
out_proj(dim, dim, true, use_fp4, dtype, device), out_proj_context(dim, dim, true, use_fp4, dtype, device),
norm2(dim, 1e-6, false, dtype, device), norm2_context(dim, 1e-6, false, dtype, device),
mlp_fc1(dim, dim * 4, true, use_fp4, dtype, device), mlp_fc2(dim * 4, dim, true, use_fp4, dtype, device),
mlp_context_fc1(dim, dim * 4, true, use_fp4, dtype, device),
mlp_context_fc2(dim * 4, dim, true, use_fp4, dtype, device)
{
registerChildren
(norm1, "norm1")
(norm1_context, "norm1_context")
(qkv_proj, "qkv_proj")
(qkv_proj_context, "qkv_proj_context")
(norm_q, "norm_q")
(norm_k, "norm_k")
(norm_added_q, "norm_added_q")
(norm_added_k, "norm_added_k")
(attn, "attn")
(out_proj, "out_proj")
(out_proj_context, "out_proj_context")
(norm2, "norm2")
(norm2_context, "norm2_context")
(mlp_fc1, "mlp_fc1")
(mlp_fc2, "mlp_fc2")
(mlp_context_fc1, "mlp_context_fc1")
(mlp_context_fc2, "mlp_context_fc2")
;
mlp_context_fc2(dim * 4, dim, true, use_fp4, dtype, device) {
registerChildren(norm1, "norm1")(norm1_context, "norm1_context")(qkv_proj, "qkv_proj")(qkv_proj_context,
"qkv_proj_context")(
norm_q, "norm_q")(norm_k, "norm_k")(norm_added_q, "norm_added_q")(norm_added_k, "norm_added_k")(attn, "attn")(
out_proj, "out_proj")(out_proj_context, "out_proj_context")(norm2, "norm2")(norm2_context, "norm2_context")(
mlp_fc1, "mlp_fc1")(mlp_fc2, "mlp_fc2")(mlp_context_fc1, "mlp_context_fc1")(mlp_context_fc2, "mlp_context_fc2");
}
// hidden_states: [Batch, Width * Height, dim]
// encoder_hidden_states: [Batch, Token, dim]
std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor temb, Tensor rotary_emb, Tensor rotary_emb_context, float sparsityRatio) {
std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
Tensor rotary_emb,
Tensor rotary_emb_context,
float sparsityRatio) {
int batch_size = hidden_states.shape[0];
assert(encoder_hidden_states.shape[0] == batch_size);
......@@ -469,14 +443,16 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
nvtxRangePushA("AdaNorm");
int num_tokens_img = hidden_states.shape[1];
int num_tokens_txt = encoder_hidden_states.shape[1];
assert(hidden_states.shape[2] == dim);
assert(encoder_hidden_states.shape[2] == dim);
spdlog::debug("hidden_states={} encoder_hidden_states={} temb={}", hidden_states.shape.str(), encoder_hidden_states.shape.str(), temb.shape.str());
spdlog::debug("hidden_states={} encoder_hidden_states={} temb={}",
hidden_states.shape.str(),
encoder_hidden_states.shape.str(),
temb.shape.str());
spdlog::debug("batch_size={} num_tokens_img={} num_tokens_txt={}", batch_size, num_tokens_img, num_tokens_txt);
auto norm1_output = norm1.forward(hidden_states, temb);
......@@ -512,22 +488,28 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
const bool blockSparse = sparsityRatio > 0;
const int poolTokens = num_tokens_img / POOL_SIZE + num_tokens_txt / POOL_SIZE;
concat = Tensor::allocate({batch_size, num_tokens_img + num_tokens_txt, dim * 3}, norm1_output.x.scalar_type(), norm1_output.x.device());
concat = Tensor::allocate({batch_size, num_tokens_img + num_tokens_txt, dim * 3},
norm1_output.x.scalar_type(),
norm1_output.x.device());
pool = blockSparse
? Tensor::allocate({batch_size, poolTokens, dim * 3}, norm1_output.x.scalar_type(), norm1_output.x.device())
pool = blockSparse ? Tensor::allocate({batch_size, poolTokens, dim * 3},
norm1_output.x.scalar_type(),
norm1_output.x.device())
: Tensor{};
for (int i = 0; i < batch_size; i++) {
// img first
Tensor qkv = concat.slice(0, i, i + 1).slice(1, 0, num_tokens_img);
Tensor qkv_context = concat.slice(0, i, i + 1).slice(1, num_tokens_img, num_tokens_img + num_tokens_txt);
Tensor qkv_context =
concat.slice(0, i, i + 1).slice(1, num_tokens_img, num_tokens_img + num_tokens_txt);
Tensor pool_qkv = pool.valid()
? pool.slice(0, i, i + 1).slice(1, 0, num_tokens_img / POOL_SIZE)
: Tensor{};
Tensor pool_qkv =
pool.valid() ? pool.slice(0, i, i + 1).slice(1, 0, num_tokens_img / POOL_SIZE) : Tensor{};
Tensor pool_qkv_context = pool.valid()
? pool.slice(0, i, i + 1).slice(1, num_tokens_img / POOL_SIZE, num_tokens_img / POOL_SIZE + num_tokens_txt / POOL_SIZE)
? pool.slice(0, i, i + 1)
.slice(1,
num_tokens_img / POOL_SIZE,
num_tokens_img / POOL_SIZE + num_tokens_txt / POOL_SIZE)
: Tensor{};
// qkv_proj.forward(norm1_output.x.slice(0, i, i + 1), qkv);
......@@ -535,7 +517,8 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
debug("rotary_emb", rotary_emb);
qkv_proj.forward(norm1_output.x.slice(0, i, i + 1), qkv, pool_qkv, norm_q.weight, norm_k.weight, rotary_emb);
qkv_proj.forward(
norm1_output.x.slice(0, i, i + 1), qkv, pool_qkv, norm_q.weight, norm_k.weight, rotary_emb);
debug("qkv", qkv);
// qkv_proj_context.forward(norm1_context_output.x.slice(0, i, i + 1), qkv_context);
......@@ -543,7 +526,12 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
debug("rotary_emb_context", rotary_emb_context);
qkv_proj_context.forward(norm1_context_output.x.slice(0, i, i + 1), qkv_context, pool_qkv_context, norm_added_q.weight, norm_added_k.weight, rotary_emb_context);
qkv_proj_context.forward(norm1_context_output.x.slice(0, i, i + 1),
qkv_context,
pool_qkv_context,
norm_added_q.weight,
norm_added_k.weight,
rotary_emb_context);
debug("qkv_context", qkv_context);
}
......@@ -578,28 +566,40 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
{
nvtxRangePushA("qkv_proj");
concat_q = Tensor::allocate({batch_size, num_heads, num_tokens_img_pad + num_tokens_txt_pad, dim_head}, Tensor::FP16, norm1_output.x.device());
concat_q = Tensor::allocate({batch_size, num_heads, num_tokens_img_pad + num_tokens_txt_pad, dim_head},
Tensor::FP16,
norm1_output.x.device());
concat_k = Tensor::empty_like(concat_q);
concat_v = Tensor::empty_like(concat_q);
for (int i = 0; i < batch_size; i++) {
// img first
auto sliceImg = [&](Tensor x) {
return x.slice(0, i, i+1).slice(2, 0, num_tokens_img_pad);
};
auto sliceImg = [&](Tensor x) { return x.slice(0, i, i + 1).slice(2, 0, num_tokens_img_pad); };
auto sliceTxt = [&](Tensor x) {
return x.slice(0, i, i+1).slice(2, num_tokens_img_pad, num_tokens_img_pad + num_tokens_txt_pad);
return x.slice(0, i, i + 1).slice(2, num_tokens_img_pad, num_tokens_img_pad + num_tokens_txt_pad);
};
qkv_proj.forward(
norm1_output.x.slice(0, i, i + 1), {}, {}, norm_q.weight, norm_k.weight, rotary_emb,
sliceImg(concat_q), sliceImg(concat_k), sliceImg(concat_v), num_tokens_img
);
qkv_proj_context.forward(
norm1_context_output.x.slice(0, i, i + 1), {}, {}, norm_added_q.weight, norm_added_k.weight, rotary_emb_context,
sliceTxt(concat_q), sliceTxt(concat_k), sliceTxt(concat_v), num_tokens_txt
);
qkv_proj.forward(norm1_output.x.slice(0, i, i + 1),
{},
{},
norm_q.weight,
norm_k.weight,
rotary_emb,
sliceImg(concat_q),
sliceImg(concat_k),
sliceImg(concat_v),
num_tokens_img);
qkv_proj_context.forward(norm1_context_output.x.slice(0, i, i + 1),
{},
{},
norm_added_q.weight,
norm_added_k.weight,
rotary_emb_context,
sliceTxt(concat_q),
sliceTxt(concat_k),
sliceTxt(concat_v),
num_tokens_txt);
}
debug("concat_q", concat_q);
......@@ -609,7 +609,9 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
nvtxRangePop();
}
raw_attn_output = Tensor::allocate({batch_size, num_tokens_img_pad + num_tokens_txt_pad, num_heads * dim_head}, norm1_output.x.scalar_type(), norm1_output.x.device());
raw_attn_output = Tensor::allocate({batch_size, num_tokens_img_pad + num_tokens_txt_pad, num_heads * dim_head},
norm1_output.x.scalar_type(),
norm1_output.x.device());
nvtxRangePushA("Attention");
......@@ -617,7 +619,8 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
nvtxRangePop();
raw_attn_output = raw_attn_output.view({batch_size, num_tokens_img_pad + num_tokens_txt_pad, num_heads, dim_head});
raw_attn_output =
raw_attn_output.view({batch_size, num_tokens_img_pad + num_tokens_txt_pad, num_heads, dim_head});
} else {
assert(false);
}
......@@ -633,25 +636,28 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
Tensor raw_attn_output_split;
if (batch_size == 1) {
raw_attn_output_split = raw_attn_output.slice(1, 0, num_tokens_img).reshape({batch_size, num_tokens_img, num_heads * dim_head});
raw_attn_output_split =
raw_attn_output.slice(1, 0, num_tokens_img).reshape({batch_size, num_tokens_img, num_heads * dim_head});
} else {
raw_attn_output_split = Tensor::allocate({batch_size, num_tokens_img, num_heads * dim_head}, raw_attn_output.scalar_type(), raw_attn_output.device());
checkCUDA(cudaMemcpy2DAsync(
raw_attn_output_split.data_ptr(),
raw_attn_output_split = Tensor::allocate({batch_size, num_tokens_img, num_heads * dim_head},
raw_attn_output.scalar_type(),
raw_attn_output.device());
checkCUDA(cudaMemcpy2DAsync(raw_attn_output_split.data_ptr(),
num_tokens_img * num_heads * dim_head * raw_attn_output_split.scalar_size(),
raw_attn_output.data_ptr(),
(num_tokens_img_pad + num_tokens_txt_pad) * num_heads * dim_head * raw_attn_output.scalar_size(),
(num_tokens_img_pad + num_tokens_txt_pad) * num_heads * dim_head *
raw_attn_output.scalar_size(),
num_tokens_img * num_heads * dim_head * raw_attn_output_split.scalar_size(),
batch_size,
cudaMemcpyDeviceToDevice,
stream));
}
spdlog::debug("raw_attn_output_split={}", raw_attn_output_split.shape.str());
debug("img.raw_attn_output_split", raw_attn_output_split);
Tensor attn_output = forward_fc(out_proj, raw_attn_output_split); // std::get<Tensor>(out_proj.forward(raw_attn_output_split));
Tensor attn_output =
forward_fc(out_proj, raw_attn_output_split); // std::get<Tensor>(out_proj.forward(raw_attn_output_split));
debug("img.attn_output", attn_output);
#if 1
......@@ -691,7 +697,7 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
}
if (context_pre_only) {
return { hidden_states, encoder_hidden_states };
return {hidden_states, encoder_hidden_states};
}
{
......@@ -701,25 +707,30 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
Tensor raw_attn_output_split;
if (batch_size == 1) {
raw_attn_output_split = raw_attn_output.slice(1, num_tokens_img_pad, num_tokens_img_pad + num_tokens_txt).reshape({batch_size, num_tokens_txt, num_heads * dim_head});
raw_attn_output_split = raw_attn_output.slice(1, num_tokens_img_pad, num_tokens_img_pad + num_tokens_txt)
.reshape({batch_size, num_tokens_txt, num_heads * dim_head});
} else {
raw_attn_output_split = Tensor::allocate({batch_size, num_tokens_txt, num_heads * dim_head}, raw_attn_output.scalar_type(), raw_attn_output.device());
checkCUDA(cudaMemcpy2DAsync(
raw_attn_output_split.data_ptr(),
raw_attn_output_split = Tensor::allocate({batch_size, num_tokens_txt, num_heads * dim_head},
raw_attn_output.scalar_type(),
raw_attn_output.device());
checkCUDA(cudaMemcpy2DAsync(raw_attn_output_split.data_ptr(),
num_tokens_txt * num_heads * dim_head * raw_attn_output_split.scalar_size(),
raw_attn_output.data_ptr<char>() + num_tokens_img_pad * num_heads * dim_head * raw_attn_output_split.scalar_size(),
(num_tokens_img_pad + num_tokens_txt_pad) * num_heads * dim_head * raw_attn_output.scalar_size(),
raw_attn_output.data_ptr<char>() + num_tokens_img_pad * num_heads * dim_head *
raw_attn_output_split.scalar_size(),
(num_tokens_img_pad + num_tokens_txt_pad) * num_heads * dim_head *
raw_attn_output.scalar_size(),
num_tokens_txt * num_heads * dim_head * raw_attn_output_split.scalar_size(),
batch_size,
cudaMemcpyDeviceToDevice,
stream));
}
spdlog::debug("raw_attn_output_split={}", raw_attn_output_split.shape.str());
debug("context.raw_attn_output_split", raw_attn_output_split);
Tensor attn_output = forward_fc(out_proj_context, raw_attn_output_split); // std::get<Tensor>(out_proj_context.forward(raw_attn_output_split));
Tensor attn_output =
forward_fc(out_proj_context,
raw_attn_output_split); // std::get<Tensor>(out_proj_context.forward(raw_attn_output_split));
debug("context.attn_output", attn_output);
#if 1
......@@ -743,9 +754,9 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
auto norm_hidden_states = encoder_hidden_states;
#endif
// Tensor ff_output = mlp_context_fc2.forward(GELU::forward(mlp_context_fc1.forward(norm_hidden_states)));
// Tensor ff_output = mlp_context_fc2.forward_quant(quant_static_fuse_gelu(mlp_context_fc1.forward(norm_hidden_states), 1.0));
// Tensor ff_output =
// mlp_context_fc2.forward_quant(quant_static_fuse_gelu(mlp_context_fc1.forward(norm_hidden_states), 1.0));
debug("context.ff_input", norm_hidden_states);
Tensor ff_output = forward_mlp(mlp_context_fc1, mlp_context_fc2, norm_hidden_states);
debug("context.ff_output", ff_output);
......@@ -762,12 +773,14 @@ std::tuple<Tensor, Tensor> JointTransformerBlock::forward(Tensor hidden_states,
nvtxRangePop();
return { hidden_states, encoder_hidden_states };
return {hidden_states, encoder_hidden_states};
}
FluxModel::FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Device device) : dtype(dtype), offload(offload) {
FluxModel::FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Device device)
: dtype(dtype), offload(offload) {
for (int i = 0; i < 19; i++) {
transformer_blocks.push_back(std::make_unique<JointTransformerBlock>(3072, 24, 3072, false, use_fp4, dtype, device));
transformer_blocks.push_back(
std::make_unique<JointTransformerBlock>(3072, 24, 3072, false, use_fp4, dtype, device));
registerChildren(*transformer_blocks.back(), format("transformer_blocks.{}", i));
if (offload && i > 0) { // don't offload first block
transformer_blocks.back()->setLazyLoad(true);
......@@ -775,7 +788,8 @@ FluxModel::FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Devic
}
}
for (int i = 0; i < 38; i++) {
single_transformer_blocks.push_back(std::make_unique<FluxSingleTransformerBlock>(3072, 24, 3072, 4, use_fp4, dtype, device));
single_transformer_blocks.push_back(
std::make_unique<FluxSingleTransformerBlock>(3072, 24, 3072, 4, use_fp4, dtype, device));
registerChildren(*single_transformer_blocks.back(), format("single_transformer_blocks.{}", i));
if (offload) {
single_transformer_blocks.back()->setLazyLoad(true);
......@@ -784,8 +798,7 @@ FluxModel::FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Devic
}
}
Tensor FluxModel::forward(
Tensor hidden_states,
Tensor FluxModel::forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
Tensor rotary_emb_img,
......@@ -806,14 +819,17 @@ Tensor FluxModel::forward(
Tensor concat;
auto compute = [&](int layer) {
if (skip_first_layer && size_t(layer) == 0) return;
if (skip_first_layer && size_t(layer) == 0)
return;
if (size_t(layer) < transformer_blocks.size()) {
auto &block = transformer_blocks.at(layer);
std::tie(hidden_states, encoder_hidden_states) = block->forward(hidden_states, encoder_hidden_states, temb, rotary_emb_img, rotary_emb_context, 0.0f);
std::tie(hidden_states, encoder_hidden_states) =
block->forward(hidden_states, encoder_hidden_states, temb, rotary_emb_img, rotary_emb_context, 0.0f);
if (controlnet_block_samples.valid()) {
const int num_controlnet_block_samples = controlnet_block_samples.shape[0];
int interval_control = ceilDiv(transformer_blocks.size(), static_cast<size_t>(num_controlnet_block_samples));
int interval_control =
ceilDiv(transformer_blocks.size(), static_cast<size_t>(num_controlnet_block_samples));
int block_index = layer / interval_control;
// Xlabs ControlNet
// block_index = layer % num_controlnet_block_samples;
......@@ -833,11 +849,12 @@ Tensor FluxModel::forward(
concat = Tensor::allocate({batch_size, txt_tokens + img_tokens, 3072}, dtype, device);
for (int i = 0; i < batch_size; i++) {
concat.slice(0, i, i + 1).slice(1, 0, txt_tokens).copy_(encoder_hidden_states.slice(0, i, i + 1));
concat.slice(0, i, i + 1).slice(1, txt_tokens, txt_tokens + img_tokens).copy_(hidden_states.slice(0, i, i + 1));
concat.slice(0, i, i + 1)
.slice(1, txt_tokens, txt_tokens + img_tokens)
.copy_(hidden_states.slice(0, i, i + 1));
}
hidden_states = concat;
encoder_hidden_states = {};
}
auto &block = single_transformer_blocks.at(layer - transformer_blocks.size());
......@@ -845,7 +862,8 @@ Tensor FluxModel::forward(
if (controlnet_single_block_samples.valid()) {
const int num_controlnet_single_block_samples = controlnet_single_block_samples.shape[0];
int interval_control = ceilDiv(single_transformer_blocks.size(), static_cast<size_t>(num_controlnet_single_block_samples));
int interval_control =
ceilDiv(single_transformer_blocks.size(), static_cast<size_t>(num_controlnet_single_block_samples));
int block_index = (layer - transformer_blocks.size()) / interval_control;
// Xlabs ControlNet
// block_index = layer % num_controlnet_single_block_samples
......@@ -892,8 +910,7 @@ Tensor FluxModel::forward(
return hidden_states;
}
std::tuple<Tensor, Tensor> FluxModel::forward_layer(
size_t layer,
std::tuple<Tensor, Tensor> FluxModel::forward_layer(size_t layer,
Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
......@@ -902,21 +919,13 @@ std::tuple<Tensor, Tensor> FluxModel::forward_layer(
Tensor controlnet_block_samples,
Tensor controlnet_single_block_samples) {
if (layer < transformer_blocks.size()){
if (layer < transformer_blocks.size()) {
std::tie(hidden_states, encoder_hidden_states) = transformer_blocks.at(layer)->forward(
hidden_states,
encoder_hidden_states,
temb,
rotary_emb_img,
rotary_emb_context, 0.0f);
}
else {
std::tie(hidden_states, encoder_hidden_states) = transformer_blocks.at(layer - transformer_blocks.size())->forward(
hidden_states,
encoder_hidden_states,
temb,
rotary_emb_img,
rotary_emb_context, 0.0f);
hidden_states, encoder_hidden_states, temb, rotary_emb_img, rotary_emb_context, 0.0f);
} else {
std::tie(hidden_states, encoder_hidden_states) =
transformer_blocks.at(layer - transformer_blocks.size())
->forward(hidden_states, encoder_hidden_states, temb, rotary_emb_img, rotary_emb_context, 0.0f);
}
const int txt_tokens = encoder_hidden_states.shape[1];
......@@ -934,7 +943,8 @@ std::tuple<Tensor, Tensor> FluxModel::forward_layer(
} else if (layer >= transformer_blocks.size() && controlnet_single_block_samples.valid()) {
const int num_controlnet_single_block_samples = controlnet_single_block_samples.shape[0];
int interval_control = ceilDiv(single_transformer_blocks.size(), static_cast<size_t>(num_controlnet_single_block_samples));
int interval_control =
ceilDiv(single_transformer_blocks.size(), static_cast<size_t>(num_controlnet_single_block_samples));
int block_index = (layer - transformer_blocks.size()) / interval_control;
// Xlabs ControlNet
// block_index = layer % num_controlnet_single_block_samples
......@@ -944,7 +954,7 @@ std::tuple<Tensor, Tensor> FluxModel::forward_layer(
hidden_states.slice(1, txt_tokens, txt_tokens + img_tokens).copy_(slice);
}
return { hidden_states, encoder_hidden_states };
return {hidden_states, encoder_hidden_states};
}
void FluxModel::setAttentionImpl(AttentionImpl impl) {
......@@ -955,6 +965,6 @@ void FluxModel::setAttentionImpl(AttentionImpl impl) {
block->attnImpl = impl;
}
}
void FluxModel::set_residual_callback(std::function<Tensor(const Tensor&)> cb) {
void FluxModel::set_residual_callback(std::function<Tensor(const Tensor &)> cb) {
residual_callback = std::move(cb);
}
......@@ -7,7 +7,7 @@
#include "layernorm.h"
#include <pybind11/functional.h>
namespace pybind11 {
class function;
class function;
}
enum class AttentionImpl {
......@@ -49,6 +49,7 @@ public:
Tensor scale_mlp;
Tensor gate_mlp;
};
public:
AdaLayerNormZero(int dim, bool pre_only, Tensor::ScalarType dtype, Device device);
Output forward(Tensor x, Tensor emb);
......@@ -87,7 +88,13 @@ public:
static constexpr bool USE_4BIT = true;
using GEMM = std::conditional_t<USE_4BIT, GEMM_W4A4, GEMM_W8A8>;
FluxSingleTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, int mlp_ratio, bool use_fp4, Tensor::ScalarType dtype, Device device);
FluxSingleTransformerBlock(int dim,
int num_attention_heads,
int attention_head_dim,
int mlp_ratio,
bool use_fp4,
Tensor::ScalarType dtype,
Device device);
Tensor forward(Tensor hidden_states, Tensor temb, Tensor rotary_emb);
public:
......@@ -113,8 +120,19 @@ public:
static constexpr bool USE_4BIT = true;
using GEMM = std::conditional_t<USE_4BIT, GEMM_W4A4, GEMM_W8A8>;
JointTransformerBlock(int dim, int num_attention_heads, int attention_head_dim, bool context_pre_only, bool use_fp4, Tensor::ScalarType dtype, Device device);
std::tuple<Tensor, Tensor> forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor temb, Tensor rotary_emb, Tensor rotary_emb_context, float sparsityRatio);
JointTransformerBlock(int dim,
int num_attention_heads,
int attention_head_dim,
bool context_pre_only,
bool use_fp4,
Tensor::ScalarType dtype,
Device device);
std::tuple<Tensor, Tensor> forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
Tensor rotary_emb,
Tensor rotary_emb_context,
float sparsityRatio);
public:
const int dim;
......@@ -143,8 +161,7 @@ private:
class FluxModel : public Module {
public:
FluxModel(bool use_fp4, bool offload, Tensor::ScalarType dtype, Device device);
Tensor forward(
Tensor hidden_states,
Tensor forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
Tensor rotary_emb_img,
......@@ -153,8 +170,7 @@ public:
Tensor controlnet_block_samples,
Tensor controlnet_single_block_samples,
bool skip_first_layer = false);
std::tuple<Tensor, Tensor> forward_layer(
size_t layer,
std::tuple<Tensor, Tensor> forward_layer(size_t layer,
Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor temb,
......@@ -164,14 +180,16 @@ public:
Tensor controlnet_single_block_samples);
void setAttentionImpl(AttentionImpl impl);
void set_residual_callback(std::function<Tensor(const Tensor&)> cb);
void set_residual_callback(std::function<Tensor(const Tensor &)> cb);
public:
const Tensor::ScalarType dtype;
std::vector<std::unique_ptr<JointTransformerBlock>> transformer_blocks;
std::vector<std::unique_ptr<FluxSingleTransformerBlock>> single_transformer_blocks;
std::function<Tensor(const Tensor&)> residual_callback;
std::function<Tensor(const Tensor &)> residual_callback;
private:
bool offload;
};
......@@ -9,16 +9,12 @@
using namespace nunchaku;
GEMM_F16::GEMM_F16(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device) :
in_features(in_features), out_features(out_features)
{
GEMM_F16::GEMM_F16(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device)
: in_features(in_features), out_features(out_features) {
this->weight = Tensor::allocate({out_features, in_features}, dtype, device);
this->bias = use_bias ? Tensor::allocate({out_features}, dtype, device) : Tensor{};
registerParams
(weight, "weight", ParamFlags::LazyLoad)
(bias, "bias")
;
registerParams(weight, "weight", ParamFlags::LazyLoad)(bias, "bias");
}
Tensor GEMM_F16::forward(Tensor x) {
......@@ -26,9 +22,9 @@ Tensor GEMM_F16::forward(Tensor x) {
return out;
}
GEMV_AWQ::GEMV_AWQ(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device) :
in_features(in_features), out_features(out_features), group_size(64), lora_rank(0), lora_scale(1.0f), device(device)
{
GEMV_AWQ::GEMV_AWQ(int in_features, int out_features, bool use_bias, Tensor::ScalarType dtype, Device device)
: in_features(in_features), out_features(out_features), group_size(64), lora_rank(0), lora_scale(1.0f),
device(device) {
this->qweight = Tensor::allocate({out_features / 4, ceilDiv(in_features, 8) * 4}, Tensor::INT32, device);
this->wscales = Tensor::allocate({ceilDiv(in_features, group_size), out_features}, dtype, device);
this->wzeros = Tensor::allocate({ceilDiv(in_features, group_size), out_features}, dtype, device);
......@@ -38,14 +34,8 @@ GEMV_AWQ::GEMV_AWQ(int in_features, int out_features, bool use_bias, Tensor::Sca
this->lora_down = Tensor::allocate({lora_rank, in_features}, dtype, device, true);
this->lora_up = Tensor::allocate({out_features, lora_rank}, dtype, device, true);
registerParams
(qweight, "qweight", ParamFlags::LazyLoad)
(wscales, "wscales")
(wzeros, "wzeros")
(bias, "bias")
(lora_down, "lora_down", ParamFlags::Optional)
(lora_up, "lora_up", ParamFlags::Optional)
;
registerParams(qweight, "qweight", ParamFlags::LazyLoad)(wscales, "wscales")(wzeros, "wzeros")(bias, "bias")(
lora_down, "lora_down", ParamFlags::Optional)(lora_up, "lora_up", ParamFlags::Optional);
}
void GEMV_AWQ::loadParam(std::string key, Tensor &dst, Tensor src) {
......@@ -95,15 +85,12 @@ Tensor GEMV_AWQ::forward(Tensor x) {
return out;
}
#define NO_LORA_FUSION 0
GEMM_W4A4::GEMM_W4A4(int in_features, int out_features, bool bias, bool use_fp4, Tensor::ScalarType dtype, Device device) :
in_features(in_features), out_features(out_features),
in_features_pad(ceilDiv(in_features, 128) * 128), out_features_pad(ceilDiv(out_features, 128) * 128),
use_fp4(use_fp4),
lora_rank(0), dtype(dtype), device(device)
{
GEMM_W4A4::GEMM_W4A4(
int in_features, int out_features, bool bias, bool use_fp4, Tensor::ScalarType dtype, Device device)
: in_features(in_features), out_features(out_features), in_features_pad(ceilDiv(in_features, 128) * 128),
out_features_pad(ceilDiv(out_features, 128) * 128), use_fp4(use_fp4), lora_rank(0), dtype(dtype), device(device) {
this->qweight = Tensor::allocate({out_features_pad, in_features_pad / 2}, Tensor::INT8, device, true);
if (use_fp4) {
this->wscales = Tensor::allocate({in_features_pad / 16, out_features_pad}, Tensor::FP8_E4M3, device, true);
......@@ -125,16 +112,9 @@ GEMM_W4A4::GEMM_W4A4(int in_features, int out_features, bool bias, bool use_fp4,
this->wcscales = Tensor::allocate({0}, dtype, device, true);
registerParams
(qweight, "qweight", ParamFlags::LazyLoad)
(wscales, "wscales")
(this->bias, "bias")
(lora_down, "lora_down", ParamFlags::Optional)
(lora_up, "lora_up", ParamFlags::Optional)
(smooth, "smooth")
(wtscale, "wtscale", ParamFlags::Optional)
(wcscales, "wcscales", ParamFlags::Optional)
;
registerParams(qweight, "qweight", ParamFlags::LazyLoad)(wscales, "wscales")(this->bias, "bias")(
lora_down, "lora_down", ParamFlags::Optional)(lora_up, "lora_up", ParamFlags::Optional)(smooth, "smooth")(
wtscale, "wtscale", ParamFlags::Optional)(wcscales, "wcscales", ParamFlags::Optional);
#if NO_LORA_FUSION
checkCUBLAS(cublasCreate(&handle));
......@@ -181,11 +161,21 @@ Tensor GEMM_W4A4::forward_silu(Tensor x) {
return std::get<Tensor>(this->forward(x, FuseOptions::SILU, nullptr));
}
std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward(Tensor x, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
std::variant<Tensor, GEMM_W4A4::QuantizedActivation>
GEMM_W4A4::forward(Tensor x, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
return forward_quant(quantize(x, false), fuse, nextGEMM);
}
void GEMM_W4A4::forward(Tensor x, Tensor out, Tensor pool, Tensor norm_q, Tensor norm_k, Tensor rotary_emb, Tensor out_q, Tensor out_k, Tensor out_v, int numTokens) {
void GEMM_W4A4::forward(Tensor x,
Tensor out,
Tensor pool,
Tensor norm_q,
Tensor norm_k,
Tensor rotary_emb,
Tensor out_q,
Tensor out_k,
Tensor out_v,
int numTokens) {
QuantizedActivation qact = quantize(x, false);
#if !NO_LORA_FUSION
......@@ -198,17 +188,59 @@ void GEMM_W4A4::forward(Tensor x, Tensor out, Tensor pool, Tensor norm_q, Tensor
debug("gemm.nolora.out", out);
#endif
kernels::gemm_w4a4(
qact.act, qweight, out, {}, qact.ascales, wscales, {}, pool, qact.lora_act, this->lora_up, {}, {}, norm_q, norm_k, rotary_emb, this->bias, {}, {}, {}, qact.is_unsigned, this->lora_scales, false,
use_fp4, *this->wtscale.data_ptr<float>(), wcscales.numel() > 0 ? wcscales: Tensor{},
out_q, out_k, out_v, numTokens
);
kernels::gemm_w4a4(qact.act,
qweight,
out,
{},
qact.ascales,
wscales,
{},
pool,
qact.lora_act,
this->lora_up,
{},
{},
norm_q,
norm_k,
rotary_emb,
this->bias,
{},
{},
{},
qact.is_unsigned,
this->lora_scales,
false,
use_fp4,
*this->wtscale.data_ptr<float>(),
wcscales.numel() > 0 ? wcscales : Tensor{},
out_q,
out_k,
out_v,
numTokens);
debug("gemm.out", out);
#else
const int M = (int)qact.act.numel() / qact.act.shape[-1];
kernels::gemm_w4a4(qact.act, qweight, out, {}, qact.ascales, wscales, {}, pool, {}, {}, {}, {}, norm_q, norm_k, rotary_emb, this->bias, {}, qact.is_unsigned, this->lora_scales);
kernels::gemm_w4a4(qact.act,
qweight,
out,
{},
qact.ascales,
wscales,
{},
pool,
{},
{},
{},
{},
norm_q,
norm_k,
rotary_emb,
this->bias,
{},
qact.is_unsigned,
this->lora_scales);
nvtxRangePushA("LoraUp");
......@@ -216,10 +248,12 @@ void GEMM_W4A4::forward(Tensor x, Tensor out, Tensor pool, Tensor norm_q, Tensor
static const half zero = 0.0;
// lora_up: [M, R] * [OC, R] => [M, OC]
// cublas view: [OC, R] * [M, R]^T
checkCUBLAS(cublasHgemm(
handle,
CUBLAS_OP_T, CUBLAS_OP_N,
this->out_features, M, this->lora_rank,
checkCUBLAS(cublasHgemm(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
this->out_features,
M,
this->lora_rank,
&one,
this->lora_up.data_ptr<half>(),
this->lora_rank,
......@@ -233,7 +267,8 @@ void GEMM_W4A4::forward(Tensor x, Tensor out, Tensor pool, Tensor norm_q, Tensor
#endif
}
std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(QuantizedActivation qact, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
std::variant<Tensor, GEMM_W4A4::QuantizedActivation>
GEMM_W4A4::forward_quant(QuantizedActivation qact, FuseOptions fuse, GEMM_W4A4 *nextGEMM) {
Tensor out;
QuantizedActivation qout;
......@@ -280,11 +315,35 @@ std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(Qu
}
#endif
kernels::gemm_w4a4(
qact.act, qweight, out, qout.act, qact.ascales, wscales, qout.ascales, {}, qact.lora_act, this->lora_up, next_lora, qout.lora_act, {}, {}, {}, this->bias, next_smooth, {}, {}, qact.is_unsigned, this->lora_scales, fuse == FuseOptions::SILU,
use_fp4, *this->wtscale.data_ptr<float>(), wcscales.numel() > 0 ? wcscales: Tensor{},
{}, {}, {}, 0
);
kernels::gemm_w4a4(qact.act,
qweight,
out,
qout.act,
qact.ascales,
wscales,
qout.ascales,
{},
qact.lora_act,
this->lora_up,
next_lora,
qout.lora_act,
{},
{},
{},
this->bias,
next_smooth,
{},
{},
qact.is_unsigned,
this->lora_scales,
fuse == FuseOptions::SILU,
use_fp4,
*this->wtscale.data_ptr<float>(),
wcscales.numel() > 0 ? wcscales : Tensor{},
{},
{},
{},
0);
if (fuse == FuseOptions::EMPTY || fuse == FuseOptions::SILU) {
debug("gemm.out", out);
......@@ -294,7 +353,6 @@ std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(Qu
debug("gemm.lora_act_out", qout.lora_act);
}
#else
if (!out.valid()) {
auto shape = TensorShape(qact.act.shape.dataExtent);
......@@ -302,7 +360,25 @@ std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(Qu
out = Tensor::allocate(shape, Tensor::FP16, qweight.device());
}
kernels::gemm_w4a4(qact.act, qweight, out, qout.act, qact.ascales, wscales, qout.ascales, {}, {}, {}, {}, {}, {}, {}, {}, this->bias, next_smooth, qact.is_unsigned, this->lora_scales);
kernels::gemm_w4a4(qact.act,
qweight,
out,
qout.act,
qact.ascales,
wscales,
qout.ascales,
{},
{},
{},
{},
{},
{},
{},
{},
this->bias,
next_smooth,
qact.is_unsigned,
this->lora_scales);
nvtxRangePushA("LoraUp");
......@@ -312,10 +388,12 @@ std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(Qu
// lora_up: [M, R] * [OC, R]^T => [M, OC]
// cublas view: [R, OC]^T * [R, M] => [OC, M]
// lora_up layout wrong?
checkCUBLAS(cublasHgemm(
handle,
CUBLAS_OP_T, CUBLAS_OP_N,
this->out_features, M, this->lora_rank,
checkCUBLAS(cublasHgemm(handle,
CUBLAS_OP_T,
CUBLAS_OP_N,
this->out_features,
M,
this->lora_rank,
&one,
this->lora_up.data_ptr<half>(),
this->lora_rank,
......@@ -332,10 +410,12 @@ std::variant<Tensor, GEMM_W4A4::QuantizedActivation> GEMM_W4A4::forward_quant(Qu
// IC is for next lora (OC of this layer)
// lora_down: [M, IC] * [IC, R] => [M, R]
// cublas view: [R, IC] * [IC, M] => [R, M]
checkCUBLAS(cublasHgemm(
handle,
CUBLAS_OP_N, CUBLAS_OP_N,
this->lora_rank, M, this->out_features,
checkCUBLAS(cublasHgemm(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
this->lora_rank,
M,
this->out_features,
&one,
next_lora.data_ptr<half>(),
this->lora_rank,
......@@ -383,7 +463,8 @@ GEMM_W4A4::QuantizedActivation GEMM_W4A4::quantize(Tensor x, bool fuse_glu) {
debug("quantize.x", x);
debug("quantize.smooth", this->smooth);
kernels::quantize_w4a4_act_fuse_lora(x, qact.act, qact.ascales, this->lora_down, qact.lora_act, this->smooth, fuse_glu, use_fp4);
kernels::quantize_w4a4_act_fuse_lora(
x, qact.act, qact.ascales, this->lora_down, qact.lora_act, this->smooth, fuse_glu, use_fp4);
debug("quantize.qact", qact.act);
debug("quantize.ascales", qact.ascales);
......@@ -396,10 +477,12 @@ GEMM_W4A4::QuantizedActivation GEMM_W4A4::quantize(Tensor x, bool fuse_glu) {
// lora_down: [M, IC] * [IC, R] => [M, R]
// cublas view: [R, IC] * [IC, M]
checkCUBLAS(cublasHgemm(
handle,
CUBLAS_OP_N, CUBLAS_OP_N,
this->lora_rank, M, this->in_features,
checkCUBLAS(cublasHgemm(handle,
CUBLAS_OP_N,
CUBLAS_OP_N,
this->lora_rank,
M,
this->in_features,
&one,
lora_down.data_ptr<half>(),
this->lora_rank,
......@@ -418,18 +501,13 @@ GEMM_W4A4::QuantizedActivation GEMM_W4A4::quantize(Tensor x, bool fuse_glu) {
return qact;
}
GEMM_W8A8::GEMM_W8A8(int in_features, int out_features, bool bias, Tensor::ScalarType dtype, Device device) :
in_features(in_features), out_features(out_features), dtype(dtype)
{
GEMM_W8A8::GEMM_W8A8(int in_features, int out_features, bool bias, Tensor::ScalarType dtype, Device device)
: in_features(in_features), out_features(out_features), dtype(dtype) {
this->qweight = Tensor::allocate({out_features, in_features}, Tensor::INT8, device);
this->wscales = Tensor::allocate({out_features}, dtype, device);
this->bias = bias ? Tensor::allocate({out_features}, dtype, device, true) : Tensor{};
registerParams
(qweight, "qweight", ParamFlags::LazyLoad)
(wscales, "wscales")
(this->bias, "bias")
;
registerParams(qweight, "qweight", ParamFlags::LazyLoad)(wscales, "wscales")(this->bias, "bias");
}
GEMM_W8A8::QuantizedActivation GEMM_W8A8::quantize(Tensor x, bool fuse_glu) {
......@@ -461,16 +539,11 @@ Tensor GEMM_W8A8::forward_quant(QuantizedActivation qact) {
return out;
}
DWCONV::DWCONV(int in_features, bool use_bias, Tensor::ScalarType dtype, Device device) :
in_features(in_features)
{
DWCONV::DWCONV(int in_features, bool use_bias, Tensor::ScalarType dtype, Device device) : in_features(in_features) {
this->weight = Tensor::allocate({in_features, 3, 3, 1}, dtype, device);
this->bias = use_bias ? Tensor::allocate({in_features}, dtype, device) : Tensor{};
registerParams
(this->weight, "weight")
(this->bias, "bias")
;
registerParams(this->weight, "weight")(this->bias, "bias");
}
Tensor DWCONV::forward(Tensor x) {
......
......@@ -37,6 +37,7 @@ public:
float lora_scale;
const Device device;
public:
Tensor qweight;
Tensor wscales;
......@@ -69,12 +70,18 @@ public:
Tensor forward(Tensor x);
Tensor forward_silu(Tensor x);
std::variant<Tensor, QuantizedActivation> forward(Tensor x, FuseOptions fuse, GEMM_W4A4 *nextGEMM = nullptr);
void forward(
Tensor x, Tensor out,
Tensor pool = {}, Tensor norm_q = {}, Tensor norm_k = {}, Tensor rotary_emb = {},
Tensor out_q = {}, Tensor out_k = {}, Tensor out_v = {}, int numTokens = 0
);
std::variant<Tensor, QuantizedActivation> forward_quant(QuantizedActivation qact, FuseOptions fuse, GEMM_W4A4 *nextGEMM = nullptr);
void forward(Tensor x,
Tensor out,
Tensor pool = {},
Tensor norm_q = {},
Tensor norm_k = {},
Tensor rotary_emb = {},
Tensor out_q = {},
Tensor out_k = {},
Tensor out_v = {},
int numTokens = 0);
std::variant<Tensor, QuantizedActivation>
forward_quant(QuantizedActivation qact, FuseOptions fuse, GEMM_W4A4 *nextGEMM = nullptr);
Tensor forward_quant(QuantizedActivation qact);
public:
......@@ -118,13 +125,16 @@ public:
Tensor act;
Tensor ascales;
};
public:
GEMM_W8A8(int in_features, int out_features, bool bias, Tensor::ScalarType dtype, Device device);
public:
QuantizedActivation quantize(Tensor x, bool fuse_glu);
Tensor forward_quant(QuantizedActivation qact);
Tensor forward(Tensor x) { return forward_quant(quantize(x, false)); }
Tensor forward(Tensor x) {
return forward_quant(quantize(x, false));
}
public:
const int in_features;
......
......@@ -108,7 +108,8 @@ public:
dst = Tensor::allocate(lazy.shape, lazy.type, lazy.device);
if (!src.valid() && !checkFlag(param.flags, ParamFlags::Optional)) {
throw std::runtime_error(spdlog::fmt_lib::format("Lazy load: Tensor {} has no src", m->getPrefix() + key));
throw std::runtime_error(
spdlog::fmt_lib::format("Lazy load: Tensor {} has no src", m->getPrefix() + key));
}
m->loadParam(key, dst, src);
}
......@@ -127,14 +128,10 @@ public:
});
}
void setLazyLoad(bool val) {
traverse([val](Module *m) {
m->enabledLazyLoad = val;
});
traverse([val](Module *m) { m->enabledLazyLoad = val; });
}
void setAutoCastFP16(bool val) {
traverse([val](Module *m) {
m->enabledAutoCastFP16 = val;
});
traverse([val](Module *m) { m->enabledAutoCastFP16 = val; });
}
protected:
......@@ -143,7 +140,8 @@ protected:
Tensor::FP16,
Tensor::BF16,
};
if (enabledAutoCastFP16 && dst.scalar_type() != src.scalar_type() && whitelist.contains(dst.scalar_type()) && whitelist.contains(src.scalar_type())) {
if (enabledAutoCastFP16 && dst.scalar_type() != src.scalar_type() && whitelist.contains(dst.scalar_type()) &&
whitelist.contains(src.scalar_type())) {
copyWithCast(dst, src);
} else {
dst.copy_(src);
......@@ -227,8 +225,7 @@ struct LayerOffloadHelper {
std::unique_ptr<CUDAEventWrapper> eventLoadDone;
LayerOffloadHelper(bool offload, int numLayers, func_t funcCompute, func_t funcLoad, func_t funcUnload)
: offload(offload), numLayers(numLayers), funcCompute(funcCompute), funcLoad(funcLoad), funcUnload(funcUnload)
{
: offload(offload), numLayers(numLayers), funcCompute(funcCompute), funcLoad(funcLoad), funcUnload(funcUnload) {
if (offload) {
streamCompute = std::make_unique<CUDAStreamWrapper>();
streamLoad = std::make_unique<CUDAStreamWrapper>();
......@@ -305,11 +302,11 @@ private:
}
}
#ifdef _WIN32
#ifdef _WIN32
return true;
#else
#else
return false;
#endif
#endif
}
void workaroundFlush() {
if (!needWorkaround) {
......
......@@ -10,18 +10,11 @@
using spdlog::fmt_lib::format;
using namespace nunchaku;
SanaLinearAttention::SanaLinearAttention(int dim, bool bias, bool pag, bool use_fp4, Tensor::ScalarType dtype, Device device) :
dim(dim),
dim_pad(ceilDiv(dim, 128) * 128),
qkv_proj(dim, dim_pad * 3, bias, use_fp4, dtype, device),
out_proj(dim_pad, dim, bias, use_fp4, dtype, device),
pag_to_v(std::nullopt)
{
registerChildren
(qkv_proj, "qkv_proj")
(out_proj, "out_proj")
;
SanaLinearAttention::SanaLinearAttention(
int dim, bool bias, bool pag, bool use_fp4, Tensor::ScalarType dtype, Device device)
: dim(dim), dim_pad(ceilDiv(dim, 128) * 128), qkv_proj(dim, dim_pad * 3, bias, use_fp4, dtype, device),
out_proj(dim_pad, dim, bias, use_fp4, dtype, device), pag_to_v(std::nullopt) {
registerChildren(qkv_proj, "qkv_proj")(out_proj, "out_proj");
if (pag) {
pag_to_v.emplace(dim, dim_pad, bias, use_fp4, dtype, device);
......@@ -57,21 +50,35 @@ Tensor SanaLinearAttention::forward(Tensor x, Tensor out) {
Tensor q = Tensor::allocate({batch_size, num_tokens_pad, dim_pad}, x.dtype(), x.device());
Tensor vk = Tensor::allocate({batch_size, num_heads, HEAD_DIM + 1, HEAD_DIM}, Tensor::FP32, x.device());
kernels::gemm_w4a4(
qact.act,
kernels::gemm_w4a4(qact.act,
qkv_proj.qweight,
{},
{},
qact.ascales,
qkv_proj.wscales,
{}, {}, qact.lora_act, qkv_proj.lora_up, {}, {}, {}, {}, {}, qkv_proj.bias, {},
vk, q,
qact.is_unsigned, qkv_proj.lora_scales, false,
{},
{},
qact.lora_act,
qkv_proj.lora_up,
{},
{},
{},
{},
{},
qkv_proj.bias,
{},
vk,
q,
qact.is_unsigned,
qkv_proj.lora_scales,
false,
qkv_proj.use_fp4,
*qkv_proj.wtscale.data_ptr<float>(),
qkv_proj.wcscales.numel() > 0 ? qkv_proj.wcscales : Tensor{},
{}, {}, {}, 0
);
{},
{},
{},
0);
debug("vk", vk);
debug("q", q);
......@@ -88,7 +95,6 @@ Tensor SanaLinearAttention::forward(Tensor x, Tensor out) {
q = q_unpad;
}
// kernels::gemm_w8a8_fuse_litela(qact.act, qkv.qweight, q, vk, qact.ascales, qkv.wscales);
// return out_proj.forward(q);
......@@ -129,17 +135,13 @@ Tensor SanaLinearAttention::forward_pag(Tensor x, bool cfg) {
return out;
}
MultiHeadCrossAttention::MultiHeadCrossAttention(int num_heads, int head_dim, bool use_fp4, Tensor::ScalarType dtype, Device device) :
num_heads(num_heads), head_dim(head_dim),
MultiHeadCrossAttention::MultiHeadCrossAttention(
int num_heads, int head_dim, bool use_fp4, Tensor::ScalarType dtype, Device device)
: num_heads(num_heads), head_dim(head_dim),
q_linear(num_heads * head_dim, num_heads * head_dim, true, use_fp4, dtype, device),
kv_linear(num_heads * head_dim, num_heads * head_dim * 2, true, dtype, device),
out_proj(num_heads * head_dim, num_heads * head_dim, true, use_fp4, dtype, device)
{
registerChildren
(q_linear, "q_linear")
(kv_linear, "kv_linear")
(out_proj, "out_proj")
;
out_proj(num_heads * head_dim, num_heads * head_dim, true, use_fp4, dtype, device) {
registerChildren(q_linear, "q_linear")(kv_linear, "kv_linear")(out_proj, "out_proj");
}
Tensor MultiHeadCrossAttention::forward(Tensor x, Tensor cond, Tensor cu_seqlens_img, Tensor cu_seqlens_txt) {
......@@ -161,16 +163,22 @@ Tensor MultiHeadCrossAttention::forward(Tensor x, Tensor cond, Tensor cu_seqlens
Tensor k = kv.slice(1, 0, num_heads);
Tensor v = kv.slice(1, num_heads, num_heads * 2);
Tensor attn_output = mha_varlen_fwd(
q, k, v,
cu_seqlens_img, cu_seqlens_txt,
num_tokens_img, num_tokens_txt,
Tensor attn_output = mha_varlen_fwd(q,
k,
v,
cu_seqlens_img,
cu_seqlens_txt,
num_tokens_img,
num_tokens_txt,
0.0f,
pow(q.shape[-1], (-0.5)),
false, false,
-1, -1,
false
).front().view({batch_size, num_tokens_img, num_heads * head_dim});
false,
false,
-1,
-1,
false)
.front()
.view({batch_size, num_tokens_img, num_heads * head_dim});
// Tensor attn_output = mha_fwd(q, k, v,
// 0.0f,
......@@ -181,17 +189,13 @@ Tensor MultiHeadCrossAttention::forward(Tensor x, Tensor cond, Tensor cu_seqlens
return out_proj.forward(attn_output);
}
SanaGLUMBConv::SanaGLUMBConv(int in_features, int hidden_features, bool use_fp4, Tensor::ScalarType dtype, Device device) :
in_features(in_features), hidden_features(hidden_features),
SanaGLUMBConv::SanaGLUMBConv(
int in_features, int hidden_features, bool use_fp4, Tensor::ScalarType dtype, Device device)
: in_features(in_features), hidden_features(hidden_features),
inverted_conv(in_features, hidden_features * 2, true, use_fp4, dtype, device),
depth_conv(hidden_features * 2, true, dtype, device),
point_conv(hidden_features, in_features, false, use_fp4, dtype, device)
{
registerChildren
(inverted_conv, "inverted_conv")
(depth_conv, "depth_conv")
(point_conv, "point_conv")
;
point_conv(hidden_features, in_features, false, use_fp4, dtype, device) {
registerChildren(inverted_conv, "inverted_conv")(depth_conv, "depth_conv")(point_conv, "point_conv");
}
Tensor SanaGLUMBConv::forward(Tensor x, int H, int W) {
......@@ -208,28 +212,34 @@ Tensor SanaGLUMBConv::forward(Tensor x, int H, int W) {
return point_conv.forward_quant(qact);
}
SanaLinearTransformerBlock::SanaLinearTransformerBlock(int hidden_size, int intermediate_size, int num_cross_attention_heads, bool pag, bool use_fp4, Tensor::ScalarType dtype, Device device) :
hidden_size(hidden_size), num_cross_attention_heads(num_cross_attention_heads),
SanaLinearTransformerBlock::SanaLinearTransformerBlock(int hidden_size,
int intermediate_size,
int num_cross_attention_heads,
bool pag,
bool use_fp4,
Tensor::ScalarType dtype,
Device device)
: hidden_size(hidden_size), num_cross_attention_heads(num_cross_attention_heads),
attn(hidden_size, false, pag, use_fp4, dtype, device),
cross_attn(num_cross_attention_heads, hidden_size / num_cross_attention_heads, use_fp4, dtype, device),
ff(hidden_size, intermediate_size, use_fp4, dtype, device),
norm1(hidden_size, 1e-6, false, dtype, device),
norm2(hidden_size, 1e-6, false, dtype, device)
{
ff(hidden_size, intermediate_size, use_fp4, dtype, device), norm1(hidden_size, 1e-6, false, dtype, device),
norm2(hidden_size, 1e-6, false, dtype, device) {
this->scale_shift_table = Tensor::allocate({6, hidden_size}, dtype, device);
registerChildren
(attn, "attn")
(cross_attn, "cross_attn")
(ff, "ff")
;
registerChildren(attn, "attn")(cross_attn, "cross_attn")(ff, "ff");
registerParams
(this->scale_shift_table, "scale_shift_table")
;
registerParams(this->scale_shift_table, "scale_shift_table");
}
Tensor SanaLinearTransformerBlock::forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor timestep, Tensor cu_seqlens_img, Tensor cu_seqlens_txt, int H, int W, bool pag, bool cfg) {
Tensor SanaLinearTransformerBlock::forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor timestep,
Tensor cu_seqlens_img,
Tensor cu_seqlens_txt,
int H,
int W,
bool pag,
bool cfg) {
nvtxRangePushA("SanaLinearTransformerBlock");
......@@ -311,9 +321,7 @@ Tensor SanaLinearTransformerBlock::forward(Tensor hidden_states, Tensor encoder_
return hidden_states;
}
SanaModel::SanaModel(SanaConfig config, Tensor::ScalarType dtype, Device device) :
config(config)
{
SanaModel::SanaModel(SanaConfig config, Tensor::ScalarType dtype, Device device) : config(config) {
const int inner_dim = config.num_attention_heads * config.attention_head_dim;
for (int i = 0; i < config.num_layers; i++) {
transformer_blocks.push_back(std::make_unique<SanaLinearTransformerBlock>(
......@@ -322,20 +330,34 @@ SanaModel::SanaModel(SanaConfig config, Tensor::ScalarType dtype, Device device)
config.num_cross_attention_heads,
std::find(config.pag_layers.begin(), config.pag_layers.end(), i) != config.pag_layers.end(),
config.use_fp4,
dtype, device
));
dtype,
device));
registerChildren(*transformer_blocks.back(), format("transformer_blocks.{}", i));
}
}
Tensor SanaModel::forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor timestep, Tensor cu_seqlens_img, Tensor cu_seqlens_txt, int H, int W, bool pag, bool cfg, bool skip_first_layer) {
Tensor SanaModel::forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor timestep,
Tensor cu_seqlens_img,
Tensor cu_seqlens_txt,
int H,
int W,
bool pag,
bool cfg,
bool skip_first_layer) {
for (int i = (skip_first_layer ? 1 : 0); i < config.num_layers; i++) {
auto &&block = transformer_blocks[i];
hidden_states = block->forward(
hidden_states, encoder_hidden_states, timestep, cu_seqlens_img, cu_seqlens_txt, H, W,
pag && std::find(config.pag_layers.begin(), config.pag_layers.end(), i) != config.pag_layers.end(),
cfg
);
hidden_states = block->forward(hidden_states,
encoder_hidden_states,
timestep,
cu_seqlens_img,
cu_seqlens_txt,
H,
W,
pag && std::find(config.pag_layers.begin(), config.pag_layers.end(), i) !=
config.pag_layers.end(),
cfg);
}
return hidden_states;
}
......@@ -57,9 +57,23 @@ private:
class SanaLinearTransformerBlock : public Module {
public:
SanaLinearTransformerBlock(int hidden_size, int intermediate_size, int num_cross_attention_heads, bool pag, bool use_fp4, Tensor::ScalarType dtype, Device device);
Tensor forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor timestep, Tensor cu_seqlens_img, Tensor cu_seqlens_txt, int H, int W, bool pag, bool cfg);
SanaLinearTransformerBlock(int hidden_size,
int intermediate_size,
int num_cross_attention_heads,
bool pag,
bool use_fp4,
Tensor::ScalarType dtype,
Device device);
Tensor forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor timestep,
Tensor cu_seqlens_img,
Tensor cu_seqlens_txt,
int H,
int W,
bool pag,
bool cfg);
public:
const int hidden_size;
......@@ -89,7 +103,16 @@ struct SanaConfig {
class SanaModel : public Module {
public:
SanaModel(SanaConfig config, Tensor::ScalarType dtype, Device device);
Tensor forward(Tensor hidden_states, Tensor encoder_hidden_states, Tensor timestep, Tensor cu_seqlens_img, Tensor cu_seqlens_txt, int H, int W, bool pag, bool cfg, bool skip_first_layer);
Tensor forward(Tensor hidden_states,
Tensor encoder_hidden_states,
Tensor timestep,
Tensor cu_seqlens_img,
Tensor cu_seqlens_txt,
int H,
int W,
bool pag,
bool cfg,
bool skip_first_layer);
public:
const SanaConfig config;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment