Commit 9b0e3a30 authored by cmx's avatar cmx
Browse files

first commit

parent fe5cd1fc
Pipeline #3450 failed with stages
in 0 seconds
import torch
import triton
from utils import QUANTILES
from utils import SingleBenchmarkRunInput
from utils import SingleBenchmarkRunOutput
from utils import _test_memory
from utils import parse_benchmark_script_args
from utils import run_benchmarks
from liger_kernel.transformers.sparsemax import LigerSparsemax
from liger_kernel.utils import infer_device
device = infer_device()
def torch_sparsemax(input_tensor: torch.Tensor, dim: int = -1) -> torch.Tensor:
input_dims = input_tensor.dim()
if dim < 0:
dim = input_dims + dim
input_sorted, _ = torch.sort(input_tensor, dim=dim, descending=True)
cumsum_input = torch.cumsum(input_sorted, dim=dim)
input_size = input_tensor.size(dim)
range_tensor = torch.arange(1, input_size + 1, device=input_tensor.device, dtype=input_tensor.dtype)
shape = [1] * input_dims
shape[dim] = input_size
range_tensor = range_tensor.view(shape)
k_bound = 1 + range_tensor * input_sorted
support = k_bound > cumsum_input
k = support.sum(dim=dim, keepdim=True).clamp(min=1)
support_sum = (input_sorted * support).sum(dim=dim, keepdim=True)
tau = (support_sum - 1) / k
return torch.clamp(input_tensor - tau, min=0)
class TorchSparsemax(torch.nn.Module):
def __init__(self, dim: int = -1):
super().__init__()
self.dim = dim
def forward(self, x: torch.Tensor) -> torch.Tensor:
return torch_sparsemax(x, dim=self.dim)
def bench_speed_sparsemax(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
V = input.x
provider = input.kernel_provider
mode = input.kernel_operation_mode
extra_benchmark_config = input.extra_benchmark_config
B = extra_benchmark_config["B"]
T = extra_benchmark_config["T"]
dim = extra_benchmark_config["dim"]
dtype = extra_benchmark_config["dtype"]
x_shape = (B * T, V)
torch_sparsemax_module = TorchSparsemax(dim=dim).to(device)
liger_sparsemax_module = LigerSparsemax(dim=dim).to(device)
x = torch.randn(x_shape, dtype=dtype, device=device)
dy = torch.randn_like(x)
x.requires_grad_(True)
# utility functions
def y_fwd():
if provider == "liger":
return liger_sparsemax_module(x)
elif provider == "torch":
return torch_sparsemax_module(x)
if mode == "forward":
ms_50, ms_20, ms_80 = triton.testing.do_bench(
y_fwd,
grad_to_none=[x],
rep=500,
quantiles=QUANTILES,
)
elif mode == "backward":
y = y_fwd()
ms_50, ms_20, ms_80 = triton.testing.do_bench(
lambda: y.backward(dy, retain_graph=True),
grad_to_none=[x],
rep=500,
quantiles=QUANTILES,
)
elif mode == "full":
def full():
y = y_fwd()
y.backward(dy, retain_graph=True)
ms_50, ms_20, ms_80 = triton.testing.do_bench(
full,
grad_to_none=[x],
rep=500,
quantiles=QUANTILES,
)
return SingleBenchmarkRunOutput(
y_20=ms_20,
y_50=ms_50,
y_80=ms_80,
)
def bench_memory_sparsemax(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
V = input.x
provider = input.kernel_provider
extra_benchmark_config = input.extra_benchmark_config
B = extra_benchmark_config["B"]
T = extra_benchmark_config["T"]
dim = extra_benchmark_config["dim"]
dtype = extra_benchmark_config["dtype"]
x_shape = (B * T, V)
torch_sparsemax_module = TorchSparsemax(dim=dim).to(device)
liger_sparsemax_module = LigerSparsemax(dim=dim).to(device)
x = torch.randn(x_shape, dtype=dtype, device=device)
dy = torch.randn_like(x)
x.requires_grad_(True)
# utility functions
def y_fwd():
if provider == "liger":
return liger_sparsemax_module(x)
elif provider == "torch":
return torch_sparsemax_module(x)
def full():
y = y_fwd()
y.backward(dy, retain_graph=True)
mem_50, mem_20, mem_80 = _test_memory(full, quantiles=QUANTILES)
return SingleBenchmarkRunOutput(
y_20=mem_20,
y_50=mem_50,
y_80=mem_80,
)
if __name__ == "__main__":
args = parse_benchmark_script_args()
common_configs = {
"kernel_name": "sparsemax",
"x_name": "V",
"x_label": "feature size",
"x_values": [2**i for i in range(10, 16)],
"kernel_providers": ["liger", "torch"],
"extra_benchmark_configs": [{"B": 4, "T": 512, "dim": -1, "dtype": torch.float32}],
"overwrite": args.overwrite,
}
run_benchmarks(
bench_test_fn=bench_speed_sparsemax,
kernel_operation_modes=["forward", "full", "backward"],
metric_name="speed",
metric_unit="ms",
**common_configs,
)
run_benchmarks(
bench_test_fn=bench_memory_sparsemax,
kernel_operation_modes=["full"],
metric_name="memory",
metric_unit="MB",
**common_configs,
)
import math
import torch
from benchmark_model_configs import compute_seq_len_sweep_config
from benchmark_model_configs import estimate_kernel_peak_memory
from benchmark_model_configs import get_benchmark_model_config
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import LlamaMLP
from utils import SingleBenchmarkRunInput
from utils import SingleBenchmarkRunOutput
from utils import parse_benchmark_script_args
from utils import run_benchmarks
from utils import run_memory_benchmark
from utils import run_speed_benchmark
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
from liger_kernel.utils import infer_device
device = infer_device()
def _setup_swiglu(input: SingleBenchmarkRunInput):
"""Create input tensor and SwiGLU layer from benchmark config."""
cfg = input.extra_benchmark_config
llama_config = LlamaConfig(
hidden_size=cfg["hidden_size"],
intermediate_size=cfg["intermediate_size"],
hidden_act=cfg["hidden_act"],
)
x = torch.randn(
cfg["bsz"],
input.x,
cfg["hidden_size"],
device=device,
dtype=cfg["dtype"],
requires_grad=True,
)
if input.kernel_provider == "liger":
layer = LigerSwiGLUMLP(config=llama_config).to(device).to(cfg["dtype"])
elif input.kernel_provider == "huggingface":
layer = LlamaMLP(config=llama_config).to(device).to(cfg["dtype"])
else:
raise ValueError(f"Invalid provider: {input.kernel_provider} for SwiGLU")
return x, layer
def bench_speed_swiglu(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
x, layer = _setup_swiglu(input)
return run_speed_benchmark(lambda: layer(x), input.kernel_operation_mode, [x])
def bench_memory_swiglu(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
x, layer = _setup_swiglu(input)
return run_memory_benchmark(lambda: layer(x), input.kernel_operation_mode)
if __name__ == "__main__":
args = parse_benchmark_script_args()
model = get_benchmark_model_config(args.model)
probe_seq_len = 1024
def _probe():
probe_input = SingleBenchmarkRunInput(
x=probe_seq_len,
kernel_provider="huggingface",
extra_benchmark_config={
"bsz": 1,
"hidden_size": model.hidden_size,
"intermediate_size": model.intermediate_size,
"hidden_act": "silu",
"dtype": model.dtype,
},
)
x, layer = _setup_swiglu(probe_input)
return layer(x)
peak_bytes = estimate_kernel_peak_memory(probe_fn=_probe)
kernel_bpt = peak_bytes // probe_seq_len
config = compute_seq_len_sweep_config(model, kernel_bytes_per_token=kernel_bpt)
common_configs = {
"kernel_name": "swiglu",
"x_name": "T",
"x_label": "sequence length",
"x_values": [2**i for i in range(10, int(math.log2(config.seq_len)) + 1)],
"kernel_providers": ["liger", "huggingface"],
"extra_benchmark_configs": [
{
"bsz": config.batch_size,
"hidden_size": model.hidden_size,
"intermediate_size": model.intermediate_size,
"hidden_act": "silu",
"dtype": model.dtype,
}
],
"overwrite": args.overwrite,
}
run_benchmarks(
bench_test_fn=bench_speed_swiglu,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="speed",
metric_unit="ms",
**common_configs,
)
run_benchmarks(
bench_test_fn=bench_memory_swiglu,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="memory",
metric_unit="MB",
**common_configs,
)
import math
import torch
import torch.nn as nn
import triton
from transformers.models.llama.configuration_llama import LlamaConfig
from transformers.models.llama.modeling_llama import LlamaMLP
from utils import QUANTILES
from utils import SingleBenchmarkRunInput
from utils import SingleBenchmarkRunOutput
from utils import _test_memory
from utils import parse_benchmark_script_args
from utils import run_benchmarks
from liger_kernel.transformers.geglu import LigerGEGLUMLP
from liger_kernel.transformers.swiglu import LigerSwiGLUMLP
from liger_kernel.transformers.tiled_mlp import LigerTiledGEGLUMLP
from liger_kernel.transformers.tiled_mlp import LigerTiledSwiGLUMLP
from liger_kernel.utils import infer_device
device = infer_device()
# DeepSpeed TiledMLP implementation
# Based on: https://github.com/deepspeedai/DeepSpeed/blob/v0.18.2/deepspeed/runtime/sequence_parallel/ulysses_sp.py#L838
class DeepSpeedTiledMLP(torch.autograd.Function):
"""
DeepSpeed's TiledMLP implementation for fair comparison.
This is the actual DeepSpeed algorithm that performs tiled MLP computation
to massively reduce memory usage with very long sequence lengths.
This module re-computes forward in the backward, so forward occurs twice per iteration.
"""
@staticmethod
def forward(ctx, fn, self, x, shards, compute_params) -> torch.Tensor:
ctx.fn = fn
ctx.self = self
ctx.shards = shards
ctx.compute_params = [p for p in compute_params if p.requires_grad] if compute_params else []
ctx.save_for_backward(x)
# x.shape could be [bs, seqlen, hidden_size] or [seqlen, hidden_size] (moe experts)
x_shards = list(torch.chunk(x, chunks=shards, dim=-2))
with torch.no_grad():
output_shards = [fn(self, x_shard) for x_shard in x_shards]
output_unsharded = torch.cat(output_shards, dim=-2)
return output_unsharded
@staticmethod
def backward(ctx, *grads):
fn = ctx.fn
(x,) = ctx.saved_tensors
self = ctx.self
shards = ctx.shards
compute_params = ctx.compute_params
x_requires_grad = x.requires_grad
x = x.detach()
# detach() unsets x.requires_grad, so restore it
x.requires_grad_(x_requires_grad)
# x.shape could be [bs, seqlen, hidden_size] or [seqlen, hidden_size] (moe experts)
hidden_size = x.shape[-1]
x_shape_orig = x.shape
# flatten bs+seqlen to avoid having stride issues when narrowing into seqlen w/ bs>1
x = x.view(-1, hidden_size)
incoming_grad = grads[0].view(-1, hidden_size)
x_grad = torch.zeros_like(x)
x_shards = list(torch.chunk(x, chunks=shards, dim=0))
for i, x_shard in enumerate(x_shards):
# Tell deepspeed not to add a new grad to its ipg bucket until the last shard is run
# XXX: DDP, FSDP will need something similar to make it work
if compute_params:
if i + 1 < shards:
for param in compute_params:
if hasattr(param, "ds_grad_is_ready"):
param.ds_grad_is_ready = False
else:
# last shard, can add the grad
for param in compute_params:
if hasattr(param, "ds_grad_is_ready"):
param.ds_grad_is_ready = True
x_shard.requires_grad_(x_requires_grad)
# if seqlen is not exactly divisible by shards the last step will be shorter than shard_step
shard_step = x_shards[i].shape[0]
shard_offset = i * x_shards[0].shape[0]
x_shard.grad = x_grad.narrow(0, shard_offset, shard_step).view_as(x_shard)
incoming_grad_shard = incoming_grad.narrow(0, shard_offset, shard_step).view_as(x_shard)
with torch.enable_grad():
output = fn(self, x_shard)
torch.autograd.backward(output, incoming_grad_shard)
# unflatten
x_grad = x_grad.view(x_shape_orig)
return (None, None, x_grad, None, None)
# DeepSpeed TiledMLP wrapper to match our interface
class DeepSpeedTiledMLPWrapper(nn.Module):
"""
Wrapper for DeepSpeed's TiledMLP to match the interface used in benchmarks.
Uses the DeepSpeed TiledMLP algorithm for memory-efficient MLP computation.
"""
def __init__(self, config, num_shards=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.num_shards = num_shards
self.mlp = LlamaMLP(config=config)
def forward(self, x):
# Calculate num_shards if not provided
num_shards = self.num_shards
if num_shards is None:
hidden_size = x.shape[-1]
seqlen = x.shape[-2]
num_shards = math.ceil(seqlen / hidden_size)
num_shards = max(1, num_shards)
# Collect compute parameters for DeepSpeed ZeRO compatibility
compute_params = [
self.mlp.down_proj.weight,
self.mlp.gate_proj.weight,
self.mlp.up_proj.weight,
]
# Define the MLP forward function for DeepSpeed TiledMLP
def mlp_forward(mlp_module, x_input):
return mlp_module.down_proj(mlp_module.act_fn(mlp_module.gate_proj(x_input)) * mlp_module.up_proj(x_input))
# Use DeepSpeed's TiledMLP implementation
return DeepSpeedTiledMLP.apply(
mlp_forward,
self.mlp,
x,
num_shards,
compute_params,
)
def bench_speed_tiled_mlp(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
seq_len = input.x
bsz = input.extra_benchmark_config["bsz"]
hidden_size = input.extra_benchmark_config["hidden_size"]
intermediate_size = input.extra_benchmark_config["intermediate_size"]
hidden_act = input.extra_benchmark_config["hidden_act"]
dtype = input.extra_benchmark_config["dtype"]
num_shards = input.extra_benchmark_config.get("num_shards", None)
activation_type = input.extra_benchmark_config["activation_type"]
provider = input.kernel_provider
mode = input.kernel_operation_mode
llama_config = LlamaConfig(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
)
x_shape = (bsz, seq_len, hidden_size)
# initialize input
x = torch.randn(*x_shape, device=device, dtype=dtype, requires_grad=True)
if activation_type == "geglu":
if provider == "huggingface":
layer = LlamaMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger":
layer = LigerGEGLUMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger_tiled":
layer = LigerTiledGEGLUMLP(config=llama_config, num_shards=num_shards).to(device).to(dtype)
elif provider == "deepspeed_tiled":
layer = DeepSpeedTiledMLPWrapper(config=llama_config, num_shards=num_shards).to(device).to(dtype)
else:
raise ValueError(f"Invalid provider: {provider} for GEGLU")
elif activation_type == "swiglu":
if provider == "huggingface":
layer = LlamaMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger":
layer = LigerSwiGLUMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger_tiled":
layer = LigerTiledSwiGLUMLP(config=llama_config, num_shards=num_shards).to(device).to(dtype)
elif provider == "deepspeed_tiled":
layer = DeepSpeedTiledMLPWrapper(config=llama_config, num_shards=num_shards).to(device).to(dtype)
else:
raise ValueError(f"Invalid provider: {provider} for SwiGLU")
else:
raise ValueError(f"Invalid activation_type: {activation_type}")
def fwd():
return layer(x)
if mode == "forward":
ms_50, ms_20, ms_80 = triton.testing.do_bench(
fwd,
grad_to_none=[x],
rep=10,
quantiles=QUANTILES,
)
elif mode == "backward":
do = torch.randn_like(x)
y = fwd()
ms_50, ms_20, ms_80 = triton.testing.do_bench(
lambda: y.backward(do, retain_graph=True),
grad_to_none=[x],
rep=10,
quantiles=QUANTILES,
)
else:
def full():
y = fwd()
y.backward(torch.randn_like(y), retain_graph=True)
ms_50, ms_20, ms_80 = triton.testing.do_bench(
full,
grad_to_none=[x],
rep=10,
quantiles=QUANTILES,
)
return SingleBenchmarkRunOutput(
y_20=ms_20,
y_50=ms_50,
y_80=ms_80,
)
def bench_memory_tiled_mlp(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
seq_len = input.x
bsz = input.extra_benchmark_config["bsz"]
hidden_size = input.extra_benchmark_config["hidden_size"]
intermediate_size = input.extra_benchmark_config["intermediate_size"]
hidden_act = input.extra_benchmark_config["hidden_act"]
dtype = input.extra_benchmark_config["dtype"]
num_shards = input.extra_benchmark_config.get("num_shards", None)
activation_type = input.extra_benchmark_config["activation_type"]
provider = input.kernel_provider
mode = input.kernel_operation_mode
llama_config = LlamaConfig(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
)
x_shape = (bsz, seq_len, hidden_size)
# initialize input
x = torch.randn(*x_shape, device=device, dtype=dtype, requires_grad=True)
if activation_type == "geglu":
if provider == "huggingface":
layer = LlamaMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger":
layer = LigerGEGLUMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger_tiled":
layer = LigerTiledGEGLUMLP(config=llama_config, num_shards=num_shards).to(device).to(dtype)
elif provider == "deepspeed_tiled":
layer = DeepSpeedTiledMLPWrapper(config=llama_config, num_shards=num_shards).to(device).to(dtype)
else:
raise ValueError(f"Invalid provider: {provider} for GEGLU")
elif activation_type == "swiglu":
if provider == "huggingface":
layer = LlamaMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger":
layer = LigerSwiGLUMLP(config=llama_config).to(device).to(dtype)
elif provider == "liger_tiled":
layer = LigerTiledSwiGLUMLP(config=llama_config, num_shards=num_shards).to(device).to(dtype)
elif provider == "deepspeed_tiled":
layer = DeepSpeedTiledMLPWrapper(config=llama_config, num_shards=num_shards).to(device).to(dtype)
else:
raise ValueError(f"Invalid provider: {provider} for SwiGLU")
else:
raise ValueError(f"Invalid activation_type: {activation_type}")
def fwd():
return layer(x)
def full():
y = fwd()
y.backward(torch.randn_like(y), retain_graph=True)
if mode == "forward":
mem_50, mem_20, mem_80 = _test_memory(
fwd,
quantiles=QUANTILES,
)
elif mode == "backward":
do = torch.randn_like(x)
y = fwd()
mem_50, mem_20, mem_80 = _test_memory(
lambda: y.backward(do, retain_graph=True),
quantiles=QUANTILES,
)
else:
mem_50, mem_20, mem_80 = _test_memory(
full,
quantiles=QUANTILES,
)
return SingleBenchmarkRunOutput(
y_20=mem_20,
y_50=mem_50,
y_80=mem_80,
)
if __name__ == "__main__":
args = parse_benchmark_script_args()
# Benchmark GEGLU variants
kernel_providers_geglu = ["huggingface", "liger", "liger_tiled", "deepspeed_tiled"]
common_configs_geglu = {
"kernel_name": "tiled_geglu",
"x_name": "T",
"x_label": "sequence length",
"x_values": [2**i for i in range(10, 15)], # 1024 to 16384
"kernel_providers": kernel_providers_geglu,
"extra_benchmark_configs": [
{
"bsz": 2,
"hidden_size": 2048,
"intermediate_size": 4096,
"hidden_act": "gelu_pytorch_tanh",
"activation_type": "geglu",
"num_shards": 4,
"dtype": torch.bfloat16,
}
],
"overwrite": args.overwrite,
}
run_benchmarks(
bench_test_fn=bench_speed_tiled_mlp,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="speed",
metric_unit="ms",
**common_configs_geglu,
)
run_benchmarks(
bench_test_fn=bench_memory_tiled_mlp,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="memory",
metric_unit="MB",
**common_configs_geglu,
)
# Benchmark SwiGLU variants
kernel_providers_swiglu = ["huggingface", "liger", "liger_tiled", "deepspeed_tiled"]
common_configs_swiglu = {
"kernel_name": "tiled_swiglu",
"x_name": "T",
"x_label": "sequence length",
"x_values": [2**i for i in range(10, 15)], # 1024 to 16384
"kernel_providers": kernel_providers_swiglu,
"extra_benchmark_configs": [
{
"bsz": 2,
"hidden_size": 2048,
"intermediate_size": 4096,
"hidden_act": "silu",
"activation_type": "swiglu",
"num_shards": 4,
"dtype": torch.bfloat16,
}
],
"overwrite": args.overwrite,
}
run_benchmarks(
bench_test_fn=bench_speed_tiled_mlp,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="speed",
metric_unit="ms",
**common_configs_swiglu,
)
run_benchmarks(
bench_test_fn=bench_memory_tiled_mlp,
kernel_operation_modes=["full", "forward", "backward"],
metric_name="memory",
metric_unit="MB",
**common_configs_swiglu,
)
import torch
import triton
from utils import QUANTILES
from utils import SingleBenchmarkRunInput
from utils import SingleBenchmarkRunOutput
from utils import _test_memory
from utils import parse_benchmark_script_args
from utils import run_benchmarks
from liger_kernel.transformers.tvd import LigerTVDLoss
from liger_kernel.utils import get_total_gpu_memory
from liger_kernel.utils import infer_device
device = infer_device()
class TorchTVDLoss(torch.nn.Module):
def __init__(self, reduction="batchmean"):
super(TorchTVDLoss, self).__init__()
self.reduction = reduction
def forward(self, p, q):
tvd = torch.abs(p - q) / 2.0
if self.reduction == "mean":
return torch.sum(tvd) / (p.size(0) * p.size(1))
elif self.reduction == "sum":
return torch.sum(tvd)
elif self.reduction == "none":
return tvd
elif self.reduction == "batchmean":
return torch.sum(tvd) / p.size(0)
else:
raise ValueError("Invalid reduction type.")
S, E = 12, 18
def bench_speed_tvd(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
reduction = "batchmean"
V = input.x
B, T = input.extra_benchmark_config["B"], input.extra_benchmark_config["T"]
torch_tvd = TorchTVDLoss(reduction=reduction)
liger_tvd = LigerTVDLoss(reduction=reduction)
_input = torch.randn(B * T, V, requires_grad=True, device=device).softmax(dim=-1)
target = torch.randn(B * T, V, device=device).softmax(dim=-1)
def fwd():
if input.kernel_provider == "liger":
return liger_tvd(_input, target)
else:
return torch_tvd(_input, target)
if input.kernel_operation_mode == "forward":
ms_50, ms_20, ms_80 = triton.testing.do_bench(fwd, quantiles=QUANTILES, rep=100)
elif input.kernel_operation_mode == "backward":
y = fwd()
ms_50, ms_20, ms_80 = triton.testing.do_bench(
lambda: y.backward(retain_graph=True),
quantiles=QUANTILES,
grad_to_none=[_input],
rep=100,
)
elif input.kernel_operation_mode == "full":
def full():
y = fwd()
y.backward(retain_graph=True)
ms_50, ms_20, ms_80 = triton.testing.do_bench(full, quantiles=QUANTILES, rep=100)
return SingleBenchmarkRunOutput(
y_20=ms_20,
y_50=ms_50,
y_80=ms_80,
)
def bench_memory_tvd(input: SingleBenchmarkRunInput) -> SingleBenchmarkRunOutput:
reduction = "batchmean"
torch_tvd = TorchTVDLoss(reduction=reduction)
liger_tvd = LigerTVDLoss(reduction=reduction)
V = input.x
B, T = input.extra_benchmark_config["B"], input.extra_benchmark_config["T"]
_input = torch.randn(B * T, V, requires_grad=True, device=device).softmax(dim=-1)
target = torch.randn(B * T, V, device=device).softmax(dim=-1)
def fwd():
if input.kernel_provider == "liger":
return liger_tvd(_input, target)
else:
return torch_tvd(_input, target)
def full():
y = fwd()
y.backward(retain_graph=True)
mem_50, mem_20, mem_80 = _test_memory(full, quantiles=QUANTILES)
return SingleBenchmarkRunOutput(
y_20=mem_20,
y_50=mem_50,
y_80=mem_80,
)
if __name__ == "__main__":
args = parse_benchmark_script_args()
gpu_memory_gbs = get_total_gpu_memory()
# We know that the full test will require 66GBs for vocab size 2^17
if gpu_memory_gbs >= 66:
x_max = 17
elif gpu_memory_gbs >= 32:
x_max = 16
else:
x_max = 15
common_args = {
"kernel_name": "tvd",
"x_name": "V",
"x_label": "vocab size",
"x_values": [2**i for i in range(12, x_max + 1)],
"kernel_providers": ["liger", "torch"],
"extra_benchmark_configs": [{"B": 8, "T": 2048}],
"overwrite": args.overwrite,
}
run_benchmarks(
bench_test_fn=bench_memory_tvd,
kernel_operation_modes=["full"],
metric_name="memory",
metric_unit="MB",
**common_args,
)
run_benchmarks(
bench_test_fn=bench_speed_tvd,
kernel_operation_modes=["forward", "full", "backward"],
metric_name="speed",
metric_unit="ms",
**common_args,
)
import argparse
import csv
import json
import os
import time
from collections import OrderedDict
from dataclasses import asdict
from dataclasses import dataclass
from importlib.metadata import version
from itertools import zip_longest
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import torch
from liger_kernel.utils import infer_device
device = infer_device()
LIGER_KERNEL_VERSION = version("liger-kernel")
QUANTILES = [0.5, 0.2, 0.8]
@dataclass
class SingleBenchmarkRunInput:
x: Union[int, float]
kernel_provider: str
kernel_operation_mode: Optional[str] = ""
extra_benchmark_config: Optional[Dict[str, Any]] = None
@dataclass
class SingleBenchmarkRunOutput:
# 20th percentile
y_20: float
# 50th percentile (median)
y_50: float
# 80th percentile
y_80: float
@dataclass
class BenchmarkData:
"""
BenchmarkData is a dataclass to store the benchmark data for a a completed benchmark
run on all x-values for a given kernel/kernel operation mode/metric/extra_benchmark_config
"""
kernel_name: str
kernel_provider: str
metric_name: str
metric_unit: str
gpu_name: str
x_name: str
x_label: str
x_values: List[float]
y_values_50: List[float]
y_values_20: List[float]
y_values_80: List[float]
timestamp: str
kernel_operation_mode: Optional[str] = None
extra_benchmark_config_str: Optional[str] = None
liger_version: str = LIGER_KERNEL_VERSION
@dataclass
class BenchmarkDataCSVRow:
# The ordering of field names here will be the order of columns in the CSV
kernel_name: str
kernel_provider: str
kernel_operation_mode: Union[str, None]
metric_name: str
metric_unit: str
x_name: str
x_label: str
x_value: float
y_value_50: float
y_value_20: float
y_value_80: float
extra_benchmark_config_str: Union[str, None]
gpu_name: str
timestamp: str
liger_version: str
def _test_memory(
func: Callable,
_iter: int = 10,
quantiles: Optional[List[float]] = None,
return_mode="mean",
) -> float:
assert return_mode in ["min", "max", "mean", "median"]
total_mem = []
for _ in range(_iter):
getattr(torch, device).memory.reset_peak_memory_stats()
func()
# Convert to MB
mem = getattr(torch, device).max_memory_allocated() / 2**20
total_mem.append(mem)
total_mem = torch.tensor(total_mem, dtype=torch.float)
if quantiles is not None:
quantiles_data = torch.quantile(total_mem, torch.tensor(quantiles, dtype=torch.float)).tolist()
if len(quantiles_data) == 1:
quantiles_data = quantiles_data[0]
return quantiles_data
return getattr(torch, return_mode)(total_mem).item()
def run_speed_benchmark(
fwd_fn: Callable,
mode: str,
input_tensors: List[torch.Tensor],
rep: int = 10,
) -> "SingleBenchmarkRunOutput":
"""Measure execution speed for forward, backward, or full (fwd+bwd).
Covers the common case where the forward function returns a single tensor
and backward uses a random gradient of the same shape. For kernels with
scalar output (losses) or multiple outputs (e.g. RoPE), write custom
measurement logic instead.
"""
import triton
if mode == "forward":
ms_50, ms_20, ms_80 = triton.testing.do_bench(
fwd_fn,
grad_to_none=input_tensors,
rep=rep,
quantiles=QUANTILES,
)
elif mode == "backward":
y = fwd_fn()
do = torch.randn_like(y)
ms_50, ms_20, ms_80 = triton.testing.do_bench(
lambda: y.backward(do, retain_graph=True),
grad_to_none=input_tensors,
rep=rep,
quantiles=QUANTILES,
)
elif mode == "full":
def full():
y = fwd_fn()
y.backward(torch.randn_like(y), retain_graph=True)
ms_50, ms_20, ms_80 = triton.testing.do_bench(
full,
grad_to_none=input_tensors,
rep=rep,
quantiles=QUANTILES,
)
else:
raise ValueError(f"Unsupported mode: {mode}. Use 'forward', 'backward', or 'full'.")
return SingleBenchmarkRunOutput(y_20=ms_20, y_50=ms_50, y_80=ms_80)
def run_memory_benchmark(
fwd_fn: Callable,
mode: str,
) -> "SingleBenchmarkRunOutput":
"""Measure peak memory for forward, backward, or full (fwd+bwd).
Same caveats as :func:`run_speed_benchmark` regarding output shape.
"""
if mode == "forward":
mem_50, mem_20, mem_80 = _test_memory(fwd_fn, quantiles=QUANTILES)
elif mode == "backward":
y = fwd_fn()
do = torch.randn_like(y)
mem_50, mem_20, mem_80 = _test_memory(
lambda: y.backward(do, retain_graph=True),
quantiles=QUANTILES,
)
elif mode == "full":
def full():
y = fwd_fn()
y.backward(torch.randn_like(y), retain_graph=True)
mem_50, mem_20, mem_80 = _test_memory(full, quantiles=QUANTILES)
else:
raise ValueError(f"Unsupported mode: {mode}. Use 'forward', 'backward', or 'full'.")
return SingleBenchmarkRunOutput(y_20=mem_20, y_50=mem_50, y_80=mem_80)
def get_current_file_directory() -> str:
"""
Returns the directory path of the current Python file.
"""
# Get the absolute path of the current file
current_file_path = os.path.abspath(__file__)
# Get the directory path of the current file
return os.path.dirname(current_file_path)
def sleep(seconds):
def decorator(function):
def wrapper(*args, **kwargs):
time.sleep(seconds)
return function(*args, **kwargs)
return wrapper
return decorator
def _print_benchmarking_banner(metric_name: str, kernel_name: str):
print("**************************************")
print(f" BENCHMARKING {metric_name.upper()} for {kernel_name.upper()}")
print("**************************************")
def get_formatted_time():
return time.strftime("%Y-%m-%d %H:%M:%S")
def get_gpu_name():
"""
Returns the current GPU name, formatted to serve as a directory name
"""
torch_device = getattr(torch, device)
if torch_device.is_available():
gpu_name = torch_device.get_device_name(torch_device.current_device())
return gpu_name
else:
raise Exception("Benchmarks can only be run on GPU.")
def update_benchmark_data_csv(
benchmark_data_list: List[BenchmarkData],
filename: str = "all_benchmark_data.csv",
overwrite: bool = True,
):
"""
Update the CSV file with the new benchmark data. If the file does not exist, create it.
If an entry already exists for the benchmark, then overwrite it if `overwrite` is True.
"""
def create_unique_key(row):
# This unique key is used to determine if a benchmark run already exists in the CSV
# If the key is the same, then the benchmark run already exists and will optionally
# be overwritten. Otherwise, it is considered a new benchmark run and appended.
return (
row["kernel_name"],
row["kernel_provider"],
row["kernel_operation_mode"] if row["kernel_operation_mode"] else "",
row["metric_name"],
row["x_name"],
str(row["x_value"]),
(row["extra_benchmark_config_str"] if row["extra_benchmark_config_str"] else ""),
row["gpu_name"],
)
fieldnames = BenchmarkDataCSVRow.__annotations__.keys()
# Make filename path relative to current file
filename_abs_path = os.path.join(get_current_file_directory(), "../data", filename)
file_exists = os.path.isfile(filename_abs_path)
# Read existing data into a list of dicts
existing_data = []
if file_exists:
with open(filename_abs_path, mode="r") as file:
reader = csv.DictReader(file)
for row in reader:
existing_data.append(row)
existing_data_dict = OrderedDict((create_unique_key(row), row) for row in existing_data)
for benchmark_data in benchmark_data_list:
benchmark_data_dict = asdict(benchmark_data)
x_values = benchmark_data_dict.pop("x_values")
y_values_50 = benchmark_data_dict.pop("y_values_50")
y_values_20 = benchmark_data_dict.pop("y_values_20")
y_values_80 = benchmark_data_dict.pop("y_values_80")
# Need to convert benchmark_data into multiple rows based on x_values and y_values
for x_value, y_value_50, y_value_20, y_value_80 in zip_longest(x_values, y_values_50, y_values_20, y_values_80):
if y_value_50 is None:
y_value_50 = float("nan")
if y_value_20 is None:
y_value_20 = float("nan")
if y_value_80 is None:
y_value_80 = float("nan")
row = BenchmarkDataCSVRow(
x_value=x_value,
y_value_50=y_value_50,
y_value_20=y_value_20,
y_value_80=y_value_80,
**benchmark_data_dict,
)
row_dict = asdict(row)
row_key = create_unique_key(row_dict)
if row_key in existing_data_dict:
if overwrite:
# If overwriting, update the row
existing_data_dict[row_key] = row_dict
else:
# If not overwriting, skip this row
pass
else:
existing_data_dict[row_key] = row_dict
os.makedirs(os.path.dirname(filename_abs_path), exist_ok=True)
with open(filename_abs_path, mode="w", newline="") as file:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
for row in existing_data_dict.values():
writer.writerow(row)
class CustomEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, torch.dtype):
return str(obj)
return super().default(self, obj)
def print_benchmark_data(benchmark_data_list: List[BenchmarkData]) -> str:
print("********** Benchmark Data **********")
formatted_list = [obj.__dict__ for obj in benchmark_data_list]
print(json.dumps(formatted_list, indent=2))
def run_benchmarks(
bench_test_fn: Callable,
kernel_name: str,
metric_name: str,
metric_unit: str,
x_name: str,
x_label: str,
x_values: List[Union[float, int]],
kernel_providers: List[str],
kernel_operation_modes: Optional[List[str]] = [None],
extra_benchmark_configs: Optional[List[Dict[str, Any]]] = None,
overwrite: bool = False,
):
"""
Run benchmarks given a bench_test_fn that takes in a SingleBenchmarkRunInput as input and
saves data to the CSV file.
Args:
- bench_test_fn: The benchmark test function to run. This function should take in a
SingleBenchmarkRunInput as input and return a SingleBenchmarkRunOutput.
- kernel_name: The name of the kernel being benchmarked (e.g. "swiglu")
- metric_name: The name of the metric being benchmarked (e.g. "speed" or "memory")
- metric_unit: The unit of the metric being benchmarked (e.g. "ms" or "MB")
- x_name: The name of the x-axis (e.g. "T" for sequence length)
- x_label: The label of the x-axis (e.g. "sequence length")
- x_values: The list of x-values to run the benchmark on (e.g. [2**i for i in range(10, 14)])
- kernel_providers: The list of kernel providers to run the benchmark on (e.g. ["liger", "huggingface"])
- kernel_operation_modes: The list of kernel operation modes to run the benchmark on (e.g. ["full", "backward"])
- extra_benchmark_configs: The list of extra benchmark configurations to run the benchmark on.
- overwrite: Whether to overwrite the existing benchmark data entry if it already exists.
"""
assert len(kernel_operation_modes) >= 1
assert len(kernel_providers) >= 1
_print_benchmarking_banner(metric_name=metric_name, kernel_name=kernel_name)
gpu_name = get_gpu_name()
benchmark_data_list = []
for extra_benchmark_config in extra_benchmark_configs:
for kernel_operation_mode in kernel_operation_modes:
for kernel_provider in kernel_providers:
y_values_50 = []
y_values_20 = []
y_values_80 = []
for x in x_values:
single_benchmark_run_input = SingleBenchmarkRunInput(
x=x,
kernel_provider=kernel_provider,
kernel_operation_mode=kernel_operation_mode,
extra_benchmark_config=extra_benchmark_config,
)
benchmark_result: SingleBenchmarkRunOutput = bench_test_fn(single_benchmark_run_input)
y_values_50.append(benchmark_result.y_50)
y_values_20.append(benchmark_result.y_20)
y_values_80.append(benchmark_result.y_80)
benchmark_run_data = BenchmarkData(
kernel_name=kernel_name,
kernel_operation_mode=kernel_operation_mode,
kernel_provider=kernel_provider,
metric_name=metric_name,
metric_unit=metric_unit,
gpu_name=gpu_name,
x_name=x_name,
x_label=x_label,
x_values=x_values,
y_values_50=y_values_50,
y_values_20=y_values_20,
y_values_80=y_values_80,
extra_benchmark_config_str=json.dumps(extra_benchmark_config, cls=CustomEncoder),
timestamp=get_formatted_time(),
liger_version=LIGER_KERNEL_VERSION,
)
benchmark_data_list.append(benchmark_run_data)
print_benchmark_data(benchmark_data_list)
update_benchmark_data_csv(benchmark_data_list=benchmark_data_list, overwrite=overwrite)
def parse_benchmark_script_args():
parser = argparse.ArgumentParser(description="Benchmarking script for Liger-Kernel")
parser.add_argument(
"--overwrite",
action="store_true",
help="Flag to overwrite existing benchmark data with current run.",
)
parser.add_argument(
"--model",
type=str,
default=None,
help=(
"Model config name from MODEL_REGISTRY "
"(e.g. llama_2_7b, llama_3_8b). "
"Defaults to llama_3_8b when not specified."
),
)
args = parser.parse_args()
return args
from pathlib import Path
import modal
ROOT_PATH = Path(__file__).parent.parent.parent
REMOTE_ROOT_PATH = "/root/liger-kernel"
PYTHON_VERSION = "3.12"
image = modal.Image.debian_slim(python_version=PYTHON_VERSION).pip_install("uv")
app = modal.App("liger_benchmarks", image=image)
# mount: add local files to the remote container
repo = image.add_local_dir(ROOT_PATH, remote_path=REMOTE_ROOT_PATH)
@app.function(gpu="H100!", image=repo, timeout=60 * 90)
def liger_benchmarks():
import os
import subprocess
subprocess.run(
["uv pip install -e '.[dev]' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(["make run-benchmarks"], check=True, shell=True, cwd=REMOTE_ROOT_PATH)
file_path = Path(REMOTE_ROOT_PATH) / "benchmark" / "data" / "all_benchmark_data.csv"
print(f"Checking if file exists at: {file_path}")
print(f"File exists: {os.path.exists(file_path)}")
if not os.path.exists(file_path):
print("Listing directory contents:")
data_dir = file_path.parent
if os.path.exists(data_dir):
print(f"Contents of {data_dir}:")
print(os.listdir(data_dir))
else:
print(f"Data directory {data_dir} does not exist")
raise FileNotFoundError(f"Benchmark data file not found at {file_path}")
with open(file_path, "rb") as f:
data = f.read()
print(f"Successfully read {len(data)} bytes of data")
return data
@app.local_entrypoint()
def main():
try:
# Run the benchmarks and get the data
print("Starting benchmark run...")
benchmark_data = liger_benchmarks.remote()
if not benchmark_data:
raise ValueError("No data received from remote function")
# Save the data locally
local_data_path = ROOT_PATH / "benchmark" / "data" / "all_benchmark_data.csv"
print(f"Attempting to save data to: {local_data_path}")
local_data_path.parent.mkdir(parents=True, exist_ok=True)
with open(local_data_path, "wb") as f:
f.write(benchmark_data)
print(f"Successfully saved {len(benchmark_data)} bytes to: {local_data_path}")
except Exception as e:
print(f"Error occurred: {str(e)}")
raise
from pathlib import Path
import modal
ROOT_PATH = Path(__file__).parent.parent.parent
REMOTE_ROOT_PATH = "/root/liger-kernel"
PYTHON_VERSION = "3.12"
OLDEST_SUPPORTED_TRANSFORMERS_V4_VERSION = "4.52.0"
image = modal.Image.debian_slim(python_version=PYTHON_VERSION).pip_install("uv")
app = modal.App("liger_tests", image=image)
# mount: add local files to the remote container
repo = image.add_local_dir(ROOT_PATH, remote_path=REMOTE_ROOT_PATH)
@app.function(gpu="H100!", image=repo, timeout=90 * 60)
def liger_correctness_tests():
import subprocess
subprocess.run(
["uv pip install -e '.[dev]' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(["make test"], check=True, shell=True, cwd=REMOTE_ROOT_PATH)
@app.function(gpu="H100!", image=repo, timeout=90 * 60)
def liger_convergence_tests():
import subprocess
subprocess.run(
["uv pip install -e '.[dev]' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(["make test-convergence"], check=True, shell=True, cwd=REMOTE_ROOT_PATH)
oldest_v4_app = modal.App("liger_oldest_v4_tests", image=image) # 4.52.0
@oldest_v4_app.function(gpu="H100!", image=repo, timeout=90 * 60)
def liger_oldest_v4_correctness_tests():
import subprocess
subprocess.run(
["uv pip install -e '.[dev]' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(
[f"uv pip install 'transformers=={OLDEST_SUPPORTED_TRANSFORMERS_V4_VERSION}' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(["make test"], check=True, shell=True, cwd=REMOTE_ROOT_PATH)
@oldest_v4_app.function(gpu="H100!", image=repo, timeout=90 * 60)
def liger_oldest_v4_convergence_tests():
import subprocess
subprocess.run(
["uv pip install -e '.[dev]' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(
[f"uv pip install 'transformers=={OLDEST_SUPPORTED_TRANSFORMERS_V4_VERSION}' --system"],
check=True,
shell=True,
cwd=REMOTE_ROOT_PATH,
)
subprocess.run(["make test-convergence"], check=True, shell=True, cwd=REMOTE_ROOT_PATH)
latest_v4_app = modal.App("liger_latest_v4_tests", image=image) # 4.57.6
!!! Example "HANDS-ON USECASE EXAMPLES"
| **Use Case** | **Description** |
|------------------------------------------------|---------------------------------------------------------------------------------------------------|
| [**Hugging Face Trainer**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/huggingface) | Train LLaMA 3-8B ~20% faster with over 40% memory reduction on Alpaca dataset using 4 A100s with FSDP |
| [**Lightning Trainer**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/lightning) | Increase 15% throughput and reduce memory usage by 40% with LLaMA3-8B on MMLU dataset using 8 A100s with DeepSpeed ZeRO3 |
| [**Medusa Multi-head LLM (Retraining Phase)**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/medusa) | Reduce memory usage by 80% with 5 LM heads and improve throughput by 40% using 8 A100s with FSDP |
| [**Vision-Language Model SFT**](https://github.com/linkedin/Liger-Kernel/tree/main/examples/huggingface/run_qwen2_vl.sh) | Finetune Qwen2-VL on image-text data using 4 A100s with FSDP |
| [**Liger ORPO Trainer**](https://github.com/linkedin/Liger-Kernel/blob/main/examples/alignment/run_orpo.py) | Align Llama 3.2 using Liger ORPO Trainer with FSDP with 50% memory reduction |
## HuggingFace Trainer
### How to Run
#### Locally on a GPU machine
You can run the example locally on a GPU machine. The default hyperparameters and configurations work on single node with 4xA100 80GB GPUs and FSDP.
!!! Example
```bash
pip install -r requirements.txt
sh run_{MODEL}.sh
```
#### Remotely on Modal
If you do not have access to a GPU machine, you can run the example on Modal. Modal is a serverless platform that allows you to run your code on a remote GPU machine. You can sign up for a free account at [Modal](https://www.modal.com/).
!!! Example
```bash
pip install modal
modal setup # authenticate with Modal
modal run launch_on_modal.py --script "run_qwen2_vl.sh"
```
!!! Notes
1. This example uses an optional `use_liger` flag. If true, it does a 1 line monkey patch to apply liger kernel.
2. The example uses Llama3 model that requires community license agreement and HuggingFace Hub login. If you want to use Llama3 in this example, please make sure you have done the following:
* Agree on the [community license agreement](https://huggingface.co/meta-llama/Meta-Llama-3-8B) .
* Run `huggingface-cli login` and enter your HuggingFace token.
3. The default hyperparameters and configurations work on single node with 4xA100 80GB GPUs. For running on device with less GPU RAM, please consider reducing the per-GPU batch size and/or enable `CPUOffload` in FSDP.
### Benchmark Result
### Llama
!!! Info
>Benchmark conditions:
>Model= LLaMA 3-8B,Datset= Alpaca, Max seq len = 512, Data Type = bf16, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 4 A100s.
Throughput improves by around 20%, while GPU memory usage drops by 40%. This allows you to train the model on smaller GPUs, use larger batch sizes, or handle longer sequence lengths without incurring additional costs.
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/llama_tps.png)
![GPU Memory Allocated](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/llama_mem_alloc.png)
### Qwen
!!! Info
>Benchmark conditions:
>Model= Qwen2-7B, Dataset= Alpaca, Max seq len = 512, Data Type = bf16, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 4 A100s.
Throughput improves by around 10%, while GPU memory usage drops by 50%.
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/qwen_tps.png)
![GPU Memory Allocated](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/qwen_mem_alloc.png)
### Gemma 7B
!!! Info
>Benchmark conditions:
> Model= Gemma-7B, Dataset= Alpaca, Max seq len = 512, Data Type = bf16, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 4 A100s.
Throughput improves by around 24%, while GPU memory usage drops by 33%.
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/gemma_7b_mem.png)
![GPU Memory Allocated](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/huggingface/img/gemma_7b_tp.png)
## Lightning Trainer
### How to Run
#### Locally on a GPU machine
You can run the example locally on a GPU machine.
!!! Example
```bash
pip install -r requirements.txt
# For single L40 48GB GPU
python training.py --model Qwen/Qwen2-0.5B-Instruct --num_gpu 1 --max_length 1024
# For 8XA100 40GB
python training.py --model meta-llama/Meta-Llama-3-8B --strategy deepspeed
```
!!! Notes
1. The example uses Llama3 model that requires community license agreement and HuggingFace Hub login. If you want to use Llama3 in this example, please make sure you have done the following:
* Agree on the [community license agreement](https://huggingface.co/meta-llama/Meta-Llama-3-8B)
* Run `huggingface-cli login` and enter your HuggingFace token.
2. The default hyperparameters and configurations for gemma works on single L40 48GB GPU and config for llama work on single node with 8xA100 40GB GPUs. For running on device with less GPU RAM, please consider reducing the per-GPU batch size and/or enable `CPUOffload` in FSDP.
## Medusa
Medusa is a simple framework that democratizes the acceleration techniques for LLM generation with multiple decoding heads. To know more, you can check out the [repo](https://arxiv.org/abs/2401.10774) and the [paper](https://arxiv.org/abs/2401.10774) .
The Liger fused CE kernel is highly effective in this scenario, eliminating the need to materialize logits for each head, which usually consumes a large volume of memory due to the extensive vocabulary size (e.g., for LLaMA-3, the vocabulary size is 128k).
The introduction of multiple heads can easily lead to OOM (Out of Memory) issues. However, thanks to the efficient Liger fused CE, which calculates the gradient in place and doesn't materialize the logits, we have observed very effective results. This efficiency opens up more opportunities for multi-token prediction research and development.
### How to Run
!!! Example
```bash
git clone git@github.com:linkedin/Liger-Kernel.git
cd {PATH_TO_Liger-Kernel}/Liger-Kernel/
pip install -e .
cd {PATH_TO_Liger-Kernel}/Liger-Kernel/examples/medusa
pip install -r requirements.txt
sh scripts/llama3_8b_medusa.sh
```
!!! Notes
1. This example uses an optional `use_liger` flag. If true, it does a monkey patch to apply liger kernel with medusa heads.
2. The example uses Llama3 model that requires community license agreement and HuggingFace Hub login. If you want to use Llama3 in this example, please make sure you have done the followings:
* Agree on the community license agreement https://huggingface.co/meta-llama/Meta-Llama-3-8B
* Run `huggingface-cli login` and enter your HuggingFace token
3. The default hyperparameters and configurations work on single node with 8xA100 GPUs. For running on device with less GPU RAM, please consider reducing the per-GPU batch size and/or enable `CPUOffload` in FSDP.
4. We are using a smaller sample of shared GPT data primarily to benchmark performance. The example requires hyperparameter tuning and dataset selection to work effectively, also ensuring the dataset has the same distribution as the LLaMA pretraining data. Welcome contribution to enhance the example code.
### Benchmark Result
!!! Info
> 1. Benchmark conditions: LLaMA 3-8B, Batch Size = 6, Data Type = bf16, Optimizer = AdamW, Gradient Checkpointing = True, Distributed Strategy = FSDP1 on 8 A100s.
#### Stage 1
Stage 1 refers to Medusa-1 where the backbone model is frozen and only weights of LLM heads are updated.
!!! Warning
```bash
# Modify this flag in llama3_8b_medusa.sh to True enables stage1
--medusa_only_heads True
```
#### num_head = 3
![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Memory_Stage1_num_head_3.png)
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Throughput_Stage1_num_head_3.png)
#### num_head = 5
![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Memory_Stage1_num_head_5.png)
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Throughput_Stage1_num_head_5.png)
#### Stage 2
!!! Warning
```bash
# Modify this flag to False in llama3_8b_medusa.sh enables stage2
--medusa_only_heads False
```
Stage 2 refers to Medusa-2 where all the model weights are updated including the backbone model and llm heads.
#### num_head = 3
![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Memory_Stage2_num_head_3.png)
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Throughput_Stage2_num_head_3.png)
#### num_head = 5
![Memory](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Memory_Stage2_num_head_5.png)
![Throughput](https://raw.githubusercontent.com/linkedin/Liger-Kernel/main/examples/medusa/docs/images/Throughput_Stage2_num_head_5.png)
## Vision-Language Model SFT
## How to Run
### Locally on a GPU Machine
You can run the example locally on a GPU machine. The default hyperparameters and configurations work on single node with 4xA100 80GB GPUs.
!!! Example
```bash
#!/bin/bash
torchrun --nnodes=1 --nproc-per-node=4 training_multimodal.py \
--model_name "Qwen/Qwen2-VL-7B-Instruct" \
--bf16 \
--num_train_epochs 1 \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 8 \
--eval_strategy "no" \
--save_strategy "no" \
--learning_rate 6e-6 \
--weight_decay 0.05 \
--warmup_ratio 0.1 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--include_num_input_tokens_seen \
--report_to none \
--fsdp "full_shard auto_wrap" \
--fsdp_config config/fsdp_config.json \
--seed 42 \
--use_liger True \
--output_dir multimodal_finetuning
```
## ORPO Trainer
### How to Run
#### Locally on a GPU Machine
You can run the example locally on a GPU machine and FSDP.
!!! Example
```py
import torch
from datasets import load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from trl import ORPOConfig # noqa: F401
from liger_kernel.transformers.trainer import LigerORPOTrainer # noqa: F401
model = AutoModelForCausalLM.from_pretrained(
"meta-llama/Llama-3.2-1B-Instruct",
dtype=torch.bfloat16,
)
tokenizer = AutoTokenizer.from_pretrained(
"meta-llama/Llama-3.2-1B-Instruct",
max_length=512,
padding="max_length",
)
tokenizer.pad_token = tokenizer.eos_token
train_dataset = load_dataset("trl-lib/tldr-preference", split="train")
training_args = ORPOConfig(
output_dir="Llama3.2_1B_Instruct",
beta=0.1,
max_length=128,
per_device_train_batch_size=32,
max_steps=100,
save_strategy="no",
)
trainer = LigerORPOTrainer(
model=model, args=training_args, tokenizer=tokenizer, train_dataset=train_dataset
)
trainer.train()
```
\ No newline at end of file
There are a couple of ways to apply Liger kernels, depending on the level of customization required.
### 1. Use AutoLigerKernelForCausalLM
Using the `AutoLigerKernelForCausalLM` is the simplest approach, as you don't have to import a model-specific patching API. If the model type is supported, the modeling code will be automatically patched using the default settings.
!!! Example
```python
from liger_kernel.transformers import AutoLigerKernelForCausalLM
# This AutoModel wrapper class automatically monkey-patches the
# model with the optimized Liger kernels if the model is supported.
model = AutoLigerKernelForCausalLM.from_pretrained("path/to/some/model")
```
### 2. Apply Model-Specific Patching APIs
Using the [patching APIs](https://github.com/linkedin/Liger-Kernel?tab=readme-ov-file#patching), you can swap Hugging Face models with optimized Liger Kernels.
!!! Example
```python
import transformers
from liger_kernel.transformers import apply_liger_kernel_to_llama
# 1a. Adding this line automatically monkey-patches the model with the optimized Liger kernels
apply_liger_kernel_to_llama()
# 1b. You could alternatively specify exactly which kernels are applied
apply_liger_kernel_to_llama(
rope=True,
swiglu=True,
cross_entropy=True,
fused_linear_cross_entropy=False,
rms_norm=False
)
# 2. Instantiate patched model
model = transformers.AutoModelForCausalLM("path/to/llama/model")
```
### 3. Compose Your Own Model
You can take individual [kernels](https://github.com/linkedin/Liger-Kernel?tab=readme-ov-file#model-kernels) to compose your models.
!!! Example
```python
from liger_kernel.transformers import LigerFusedLinearCrossEntropyLoss
import torch.nn as nn
import torch
model = nn.Linear(128, 256).cuda()
# fuses linear + cross entropy layers together and performs chunk-by-chunk computation to reduce memory
loss_fn = LigerFusedLinearCrossEntropyLoss()
input = torch.randn(4, 128, requires_grad=True, device="cuda")
target = torch.randint(256, (4, ), device="cuda")
loss = loss_fn(model.weight, input, target)
loss.backward()
```
\ No newline at end of file
# High-Level APIs
## AutoModel
| **AutoModel Variant** | **API** |
|------------------------|---------|
| AutoModelForCausalLM | `liger_kernel.transformers.AutoLigerKernelForCausalLM` |
This API extends the implementation of the `AutoModelForCausalLM` within the `transformers` library from Hugging Face.
::: liger_kernel.transformers.AutoLigerKernelForCausalLM
options:
extra:
show_docstring: true
show_signature: true
show_source: true
!!! Example "Try it Out"
You can experiment as shown in this example [here](https://github.com/linkedin/Liger-Kernel?tab=readme-ov-file#1-use-autoligerkernelforcausallm).
---
## Patching
You can also use the Patching APIs to use the kernels for a specific model architecture.
| **Model** | **API** | **Supported Operations** |
|-------------|--------------------------------------------------------------|-------------------------------------------------------------------------|
| LLaMA 2 & 3 | `liger_kernel.transformers.apply_liger_kernel_to_llama` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| LLaMA 3.2-Vision | `liger_kernel.transformers.apply_liger_kernel_to_mllama` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Mistral | `liger_kernel.transformers.apply_liger_kernel_to_mistral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Mixtral | `liger_kernel.transformers.apply_liger_kernel_to_mixtral` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Gemma1 | `liger_kernel.transformers.apply_liger_kernel_to_gemma` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Gemma2 | `liger_kernel.transformers.apply_liger_kernel_to_gemma2` | RoPE, RMSNorm, GeGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Qwen2, Qwen2.5, & QwQ | `liger_kernel.transformers.apply_liger_kernel_to_qwen2` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Qwen2-VL | `liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl` | RMSNorm, LayerNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
| Phi3 & Phi3.5 | `liger_kernel.transformers.apply_liger_kernel_to_phi3` | RoPE, RMSNorm, SwiGLU, CrossEntropyLoss, FusedLinearCrossEntropy |
### Function Signatures
::: liger_kernel.transformers.apply_liger_kernel_to_llama
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_mllama
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_mistral
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_mixtral
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_gemma
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_gemma2
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_qwen2
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_qwen2_vl
options:
extra:
show_docstring: true
show_signature: true
::: liger_kernel.transformers.apply_liger_kernel_to_phi3
options:
extra:
show_docstring: true
show_signature: true
## Model Kernels
| **Kernel** | **API** |
|---------------------------------|-------------------------------------------------------------|
| RMSNorm | `liger_kernel.transformers.LigerRMSNorm` |
| LayerNorm | `liger_kernel.transformers.LigerLayerNorm` |
| RoPE | `liger_kernel.transformers.liger_rotary_pos_emb` |
| SwiGLU | `liger_kernel.transformers.LigerSwiGLUMLP` |
| GeGLU | `liger_kernel.transformers.LigerGEGLUMLP` |
| CrossEntropy | `liger_kernel.transformers.LigerCrossEntropyLoss` |
| Fused Linear CrossEntropy | `liger_kernel.transformers.LigerFusedLinearCrossEntropyLoss`|
| Multi Token Attention | `liger_kernel.transformers.LigerMultiTokenAttention` |
| Softmax | `liger_kernel.transformers.LigerSoftmax` |
| Sparsemax | `liger_kernel.transformers.LigerSparsemax` |
| mHC (Hyper-Connections) | `liger_kernel.transformers.LigerMHC` |
### RMS Norm
RMS Norm simplifies the LayerNorm operation by eliminating mean subtraction, which reduces computational complexity while retaining effectiveness.
This kernel performs normalization by scaling input vectors to have a unit root mean square (RMS) value. This method allows for a ~7x speed improvement and a ~3x reduction in memory footprint compared to
implementations in PyTorch.
!!! Example "Try it out"
You can experiment as shown in this example [here](https://colab.research.google.com/drive/1CQYhul7MVG5F0gmqTBbx1O1HgolPgF0M?usp=sharing).
### RoPE
RoPE (Rotary Position Embedding) enhances the positional encoding used in transformer models.
The implementation allows for effective handling of positional information without incurring significant computational overhead.
!!! Example "Try it out"
You can experiment as shown in this example [here](https://colab.research.google.com/drive/1llnAdo0hc9FpxYRRnjih0l066NCp7Ylu?usp=sharing).
### SwiGLU
### GeGLU
### CrossEntropy
This kernel is optimized for calculating the loss function used in classification tasks.
The kernel achieves a ~3x execution speed increase and a ~5x reduction in memory usage for substantial vocabulary sizes compared to implementations in PyTorch.
!!! Example "Try it out"
You can experiment as shown in this example [here](https://colab.research.google.com/drive/1WgaU_cmaxVzx8PcdKB5P9yHB6_WyGd4T?usp=sharing).
### Fused Linear CrossEntropy
This kernel combines linear transformations with cross-entropy loss calculations into a single operation.
!!! Example "Try it out"
You can experiment as shown in this example [here](https://colab.research.google.com/drive/1Z2QtvaIiLm5MWOs7X6ZPS1MN3hcIJFbj?usp=sharing)
### Multi Token Attention
The Multi Token Attention kernel implementation provides and optimized fused implementation of multi-token attention over the implemented Pytorch model baseline. This is a new attention mechanism that can operate on multiple Q and K inputs introduced by Meta Research.
Paper: https://arxiv.org/abs/2504.00927
### Softmax
The Softmax kernel implementation provides an optimized implementation of the softmax operation, which is a fundamental component in neural networks for converting raw scores into probability distributions.
The implementation shows notable speedups compared to the Softmax PyTorch implementation
### Sparsemax
Sparsemax is a sparse alternative to softmax that produces sparse probability distributions. This kernel implements an efficient version of the sparsemax operation that can be used as a drop-in replacement for softmax in attention mechanisms or classification tasks.
The implementation achieves significant speed improvements and memory savings compared to standard PyTorch implementations, particularly for large input tensors.
### mHC (Manifold-Constrained Hyper-Connections)
mHC implements fused Triton kernels for Manifold-Constrained Hyper-Connections ([arXiv:2512.24880](https://arxiv.org/abs/2512.24880)). It wraps an arbitrary layer `F: [..., C] -> [..., C]` with multiple residual streams, constraining the residual routing matrix `H_res` onto the Birkhoff polytope (doubly-stochastic matrices) via Sinkhorn-Knopp iterations to stabilize training.
The `LigerMHC` module takes input of shape `[..., HC, C]` where `HC` is the number of residual streams, and performs:
1. **Coefficients** -- Compute data-dependent routing coefficients (`h_pre`, `h_post`, `h_res`) via fused matmul + RMS normalization + Sinkhorn-Knopp iterations.
2. **Pre-aggregate** -- `x_in = sum_i h_pre[i] * x[i]` (shape: `[..., C]`)
3. **Layer** -- `f_out = layer(x_in)` (shape: `[..., C]`)
4. **Post + residual** -- `x_out[o] = sum_i h_res[o,i] * x[i] + h_post[o] * f_out` (shape: `[..., HC, C]`)
Usage:
```python
import torch
import torch.nn as nn
from liger_kernel.transformers import LigerMHC
# Wrap a linear layer with 4 residual streams of dimension 256
layer = nn.Linear(256, 256, bias=False, device="cuda", dtype=torch.bfloat16)
mhc = LigerMHC(layer, hc=4, c=256, phi_dtype=torch.bfloat16).cuda()
# Input: [batch, seq_len, num_streams, channels] in BF16/FP16
x = torch.randn(2, 128, 4, 256, device="cuda", dtype=torch.bfloat16)
out = mhc(x) # shape: [2, 128, 4, 256]
```
Functional APIs are also available:
- `liger_kernel.transformers.functional.liger_mhc_coeffs` -- Compute routing coefficients
- `liger_kernel.transformers.functional.liger_mhc_pre` -- Pre-aggregation
- `liger_kernel.transformers.functional.liger_mhc_post_res` -- Post-aggregation + residual
- `liger_kernel.transformers.functional.liger_mhc_apply` -- Combined pre + post_res
- `liger_kernel.transformers.functional.liger_mhc_forward` -- Full forward pass (coeffs + pre + layer + post_res)
## Alignment Kernels
| **Kernel** | **API** |
|---------------------------------|-------------------------------------------------------------|
| Fused Linear CPO Loss | `liger_kernel.chunked_loss.LigerFusedLinearCPOLoss` |
| Fused Linear DPO Loss | `liger_kernel.chunked_loss.LigerFusedLinearDPOLoss` |
| Fused Linear ORPO Loss | `liger_kernel.chunked_loss.LigerFusedLinearORPOLoss` |
| Fused Linear SimPO Loss | `liger_kernel.chunked_loss.LigerFusedLinearSimPOLoss` |
## Distillation Kernels
| **Kernel** | **API** |
|---------------------------------|-------------------------------------------------------------|
| KLDivergence | `liger_kernel.transformers.LigerKLDIVLoss` |
| JSD | `liger_kernel.transformers.LigerJSD` |
| Fused Linear JSD | `liger_kernel.transformers.LigerFusedLinearJSD` |
## Experimental Kernels
| **Kernel** | **API** |
|---------------------------------|-------------------------------------------------------------|
| Embedding | `liger_kernel.transformers.experimental.LigerEmbedding` |
| Matmul int2xint8 | `liger_kernel.transformers.experimental.matmul` |
\ No newline at end of file
### Design
- [@claire_yishan](https://twitter.com/claire_yishan) for the LOGO design
- [Wave Snippets](https://www.wavesnippets.com/) for generating the animated code snippets
### Code
We referenced or used the following projects:
| # | Project | Description | Location | License |
|---|----------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|
| 1 | [Unsloth](https://github.com/unslothai/unsloth/blob/fd753fed99ed5f10ef8a9b7139588d9de9ddecfb/unsloth/kernels/utils.py#L43) | `calculate_settings` to determine block size and warp; We reuse it for Norm and MLP | [Liger Kernel Utils](https://github.com/linkedin/Liger-Kernel/blob/e249eee723978bf8610ff1ea2297d048a2417e20/src/liger_kernel/ops/utils.py#L23) | [Apache](https://github.com/unslothai/unsloth/blob/fd753fed99ed5f10ef8a9b7139588d9de9ddecfb/LICENSE) |
| 2 | [Unsloth](https://github.com/unslothai/unsloth/blob/976d11a10d54383aeb7a692c69e01151a20bfd72/unsloth/kernels/rms_layernorm.py#L48) | We modified and added dW calculation on top of Unsloth implementation | [Liger Kernel RMS Norm](https://github.com/linkedin/Liger-Kernel/blob/e249eee723978bf8610ff1ea2297d048a2417e20/src/liger_kernel/ops/rms_norm.py#L50) | [Apache](https://github.com/unslothai/unsloth/blob/fd753fed99ed5f10ef8a9b7139588d9de9ddecfb/LICENSE) |
| 3 | [Triton tutorial](https://triton-lang.org/main/index.html) | We modified on top of triton tutorials | [Liger Kernel RMS Norm](https://github.com/linkedin/Liger-Kernel/blob/e249eee723978bf8610ff1ea2297d048a2417e20/src/liger_kernel/ops/rms_norm.py#L50) | [MIT](https://github.com/triton-lang/triton/blob/main/LICENSE) |
| 4 | [tiny shakespeare dataset](https://huggingface.co/datasets/karpathy/tiny_shakespeare) | We use tiny shakespeare dataset to conduct convergence test on mini model | [Liger Kernel Convergence](https://github.com/linkedin/Liger-Kernel/tree/main/test/convergence) | N/A |
| 5 | [Efficient Cross Entropy](https://github.com/mgmalek/efficient_cross_entropy) | We use the idea of gradient-in-forward and chunking | [Liger Kernel Linear Cross Entropy](https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/ops/fused_linear_cross_entropy.py) | [MIT](https://github.com/mgmalek/efficient_cross_entropy/blob/main/LICENSE) |
| 6 | [Flash attn](https://github.com/Dao-AILab/flash-attention) | We take many optimization ideas from the work, such as tiling and recomputation | | [BSD](https://github.com/Dao-AILab/flash-attention/blob/main/LICENSE) |
| 7 | [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) | We reference the design of automodel | [Liger Kernel Auto Model](https://github.com/linkedin/Liger-Kernel/blob/main/src/liger_kernel/transformers/auto_model.py) | [MIT](https://github.com/casper-hansen/AutoAWQ/blob/main/LICENSE) |
| 8 | [llm.c](https://github.com/karpathy/llm.c) | We reference the design of end-to-end testing | [Liger Kernel Convergence Tests](https://github.com/linkedin/Liger-Kernel/tree/main/test/convergence) | [MIT](https://github.com/karpathy/llm.c/blob/master/LICENSE) |
Many thanks to the contributors to these projects for their invaluable work that helped make Liger possible.
Thank you for your interest in contributing to Liger-Kernel! This guide will help you set up your development environment, add a new kernel, run tests, and submit a pull request (PR).
### Maintainers
@ByronHsu(admin) @qingquansong @yundai424 @kvignesh1420 @lancerts @JasonZhu1313 @shimizust @vaibhavjindal @tcc0403 @momochen
## Interested in the ticket?
Leave `#take` in the comment and tag the maintainer.
## Setting Up Your Development Environment
1. **Clone the Repository**
```sh
git clone https://github.com/linkedin/Liger-Kernel.git
cd Liger-Kernel
```
2. **Install Dependencies and Editable Package**
```
pip install . -e[dev]
```
If encounter error `no matches found: .[dev]`, please use
```
pip install -e .'[dev]'
```
3. **Install pre-commit hooks using [`prek`](https://prek.j178.dev/), a `pre-commit` alternative built in rust**
```
prek install
```
Run pre-commit check without committing (`-a` is equivalent to `--all-files`)
```
prek run -a
```
## Structure
### Source Code
- `ops/`: Core Triton operations.
- `transformers/`: PyTorch `nn.Module` implementations built on Triton operations, compliant with the `transformers` API.
### Tests
- `transformers/`: Correctness tests for the Triton-based layers.
- `convergence/`: Patches Hugging Face models with all kernels, runs multiple iterations, and compares weights, logits, and loss layer-by-layer.
### Benchmark
- `benchmark/`: Execution time and memory benchmarks compared to Hugging Face layers.
## Adding support for a new model
To get familiar with the folder structure, please refer [here](https://github.com/linkedin/Liger-Kernel?tab=readme-ov-file#structure.).
1. **Figure out the kernels that can be monkey-patched**
- Check the `src/liger_kernel/ops` directory to find the kernels that can be monkey-patched.
- Kernels like Fused Linear Cross Entropy require a custom lce_forward function to allow monkey-patching. For adding kernels requiring a similar approach, ensure that you create the corresponding forward function in the `src/liger_kernel/transformers/model` directory.
2. **Monkey-patch the HuggingFace model**
- Add the monkey-patching code in the `src/liger_kernel/transformers/monkey_patch.py` file.
- Ensure that the monkey-patching function is added to the `__init__.py` file in the `src/liger_kernel/transformers/` directory.
3. **Add Unit Tests**
- Create unit tests and convergence tests for the monkey-patched model in the tests directory. Ensure that your tests cover all functionalities of the monkey-patched model.
## Adding a New Kernel
To get familiar with the folder structure, please refer [here](https://github.com/linkedin/Liger-Kernel?tab=readme-ov-file#structure.).
1. **Create Your Kernel**
Add your kernel implementation in `src/liger_kernel/`.
2. **Add Unit Tests**
Create unit tests and convergence tests for your kernel in the tests directory. Ensure that your tests cover all kernel functionalities.
3. **Add Benchmark Script**
Add a benchmarking script under `benchmark/scripts` using the naming convention `benchmark_{kernel_name}.py` showing the performance difference between the Liger kernel and HuggingFace.
## Run tests
### Use Makefile to run full tests
1. Run `make test` to ensure correctness.
2. Run `make checkstyle` to ensure code style.
3. Run `make test-convergence` to ensure convergence.
### Run pytest on single file
`python -m pytest test_sample.py::test_function_name`
## Run kernel benchmarks
The `/benchmark` directory contains benchmarking scripts for the individual kernels, demonstrating differences in speed and memory usage between using Liger and HuggingFace module implementations.
1. Run `make run-benchmarks` to run all benchmarking scripts and append data to `benchmark/data/all_benchmark_data.csv`.
- Existing entries that are the same (based on `kernel_name`, `kernel_provider`, `kernel_operation_mode`, `metric_name`, `x_name`, `x_value`, `extra_benchmark_config_str`, and `gpu_name`) will not be overwritten.
2. Run `make run-benchmarks OVERWRITE=1` to overwrite any existing entries that have the same configuration.
3. Run `python benchmark/scripts/benchmark_{kernel_name}.py` to run an individual benchmark.
4. You can use the `benchmark/benchmarks_visualizer.py` script to generate visualizations from the CSV, these are then saved to the `benchmark/visualizations` directory (note: this directory is not tracked by git).
## Submit PR
Fork the repo, copy and paste the successful test logs in the PR and submit the PR followed by the PR template (**[example PR](https://github.com/linkedin/Liger-Kernel/pull/21)**).
> As a contributor, you represent that the code you submit is your original work or that of your employer (in which case you represent you have the right to bind your employer). By submitting code, you (and, if applicable, your employer) are licensing the submitted code to LinkedIn and the open source community subject to the BSD 2-Clause license.
#### Release (Maintainer only)
1. Bump the version in pyproject.toml to the desired version (for example, `0.2.0`)
2. Submit a PR and merge
3. Create a new release based on the current HEAD, tag name using `v<version number>` for example `v0.2.0`. Alternatively, If you want to create release based on a different commit hash, `git tag v0.2.0 <commit hash> && git push origin v0.2.0`, and create release based on this tag
4. Adding release note: Minimum requirement is to click the `Generate Release Notes` button that will automatically generates 1) changes included, 2) new contributors. It's good to add sections on top to highlight the important changes.
5. New pip uploading will be triggered upon a new release. NOTE: Both pre-release and official release will trigger the workflow to build wheel and publish to pypi, so please be sure that step 1-3 are followed correctly!
### Notes on version
Here we follow the [sematic versioning](https://semver.org/). Denote the version as `major.minor.patch`, we increment:
- Major version when there is backward incompatible change.
- Minor version when there is new backward-compatible functionality.
- Patch version for bug fixes.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment