repository
stringclasses 166
values | file_path
stringlengths 6
125
| url
stringlengths 89
210
| code
stringlengths 413
290k
| chunk
stringlengths 56
175k
|
|---|---|---|---|---|
lucidrains/lion-pytorch
|
lion_pytorch/triton.py
|
https://github.com/lucidrains/lion-pytorch/blob/6a74fdc0ba572ab5683dc0270c66c20ecbc02d09/lion_pytorch/triton.py
|
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton>=2.2.0`')
exit()
# triton cuda kernel
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4),
triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8),
], key = ['n_elements'], restore_value=['p_ptr', 'exp_avg_ptr'])
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
jax-ml/jax-triton
|
examples/add.py
|
https://github.com/jax-ml/jax-triton/blob/9aff06677a24d07e510f3632532a88b6804324dc/examples/add.py
|
# Copyright 2024 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Addition example."""
import jax
import jax.numpy as jnp
import jax_triton as jt
import triton
import triton.language as tl
@triton.jit
def add_kernel(
x_ptr,
y_ptr,
output_ptr,
block_size: tl.constexpr,
):
"""Adds two vectors."""
pid = tl.program_id(axis=0)
block_start = pid * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < 8
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
block_size = 8
grid = (triton.cdiv(x.size, block_size),)
return jt.triton_call(
x,
y,
kernel=add_kernel,
out_shape=out_shape,
grid=grid,
block_size=block_size)
def main(unused_argv):
x_val = jnp.arange(8)
y_val = jnp.arange(8, 16)
print(add(x_val, y_val))
print(jax.jit(add)(x_val, y_val))
if __name__ == "__main__":
from absl import app
app.run(main)
|
@triton.jit
def add_kernel(
x_ptr,
y_ptr,
output_ptr,
block_size: tl.constexpr,
):
"""Adds two vectors."""
pid = tl.program_id(axis=0)
block_start = pid * block_size
offsets = block_start + tl.arange(0, block_size)
mask = offsets < 8
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
tl.store(output_ptr + offsets, output, mask=mask)
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
block_size = 8
grid = (triton.cdiv(x.size, block_size),)
return jt.triton_call(
x,
y,
kernel=add_kernel,
out_shape=out_shape,
grid=grid,
block_size=block_size)
def main(unused_argv):
x_val = jnp.arange(8)
y_val = jnp.arange(8, 16)
print(add(x_val, y_val))
print(jax.jit(add)(x_val, y_val))
if __name__ == "__main__":
from absl import app
app.run(main)
|
josStorer/RWKV-Runner
|
finetune/lora/v6/fla/ops/hgrn/chunk.py
|
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
# seq_len chunk recurrent chunk_bwd recurrent_bwd
# 0 128.0 0.039360 0.061056 0.312160 0.205008
# 1 256.0 0.045824 0.123712 0.308784 0.297696
# 2 512.0 0.058688 0.241952 0.310720 0.626528
# 3 1024.0 0.088288 0.476992 0.313184 1.333152
# 4 2048.0 0.169472 0.943264 0.452464 2.724864
# 5 4096.0 0.329920 1.886144 0.881600 5.551520
# 6 8192.0 0.647872 3.755040 1.740496 11.117184
# 7 16384.0 1.272064 7.520576 3.446608 22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
x,
g,
gc,
o,
h0,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_bh * T * D + i_t * BT * D + o_d
p_g = g + i_bh * T * D + i_t * BT * D + o_d
p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
p_o = o + i_bh * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
for i in range(0, BT):
mask_t = mask & ((i_t * BT + i) < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
gc,
o,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(1, tl.cdiv(T, BT)):
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
# [BT, BD]
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
g,
gc,
dx,
do,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
BC = min(BT, T - i_t * BT)
NT = tl.num_programs(1)
p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
if i_t == NT - 1:
b_gc = tl.zeros([BD], dtype=tl.float32)
else:
b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
b_dh = tl.zeros([BD], dtype=tl.float32)
for _ in range(BC - 1, -1, -1):
tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
b_gc = b_gc + b_g
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
p_g -= D
p_gc -= D
p_dx -= D
p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
g,
gc,
o,
dx,
dg,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
mask_t = mask & ((i_t + 1) * BT < T)
b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
# [BT, BD]
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
b_dg = b_o * b_dx * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
@staticmethod
@contiguous
def forward(ctx, x, g, initial_state=None, output_final_state=False):
B, H, T, D = x.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
o = torch.empty_like(x, dtype=torch.float)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_fwd_kernel_h[grid](
x, g, gc, o, initial_state,
T, D,
BT=BT,
USE_INITIAL_STATE=initial_state is not None
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_fwd_kernel_o[grid](
gc, o,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
final_state = None
if output_final_state:
final_state = o[:, :, -1].clone()
o = o.to(x.dtype)
ctx.save_for_backward(g, o, initial_state)
return o, final_state
@staticmethod
@contiguous
def backward(ctx, do, dht=None):
g, o, initial_state = ctx.saved_tensors
B, H, T, D = do.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
dx = torch.empty_like(o)
dg = torch.empty_like(g)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_bwd_kernel_h[grid](
g, gc, dx, do,
T, D,
BT=BT
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_bwd_kernel_o[grid](
g, gc, o, dx, dg,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
if initial_state is not None:
dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
return dx, dg, None, None
def chunk_hgrn(
x: torch.Tensor,
g: torch.Tensor,
initial_state: torch.Tensor = None,
output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if initial_state is not None:
initial_state = initial_state.detach()
o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
return o, final_state
if __name__ == '__main__':
import torch.nn.functional as F
from fla.ops.hgrn.naive import naive_recurrent_hgrn
from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
B, H, T, D = 8, 4, 512, 128
dtype = torch.bfloat16
torch.manual_seed(42)
# [batch_size, n_heads, seq_len, d_head]
x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
print(f"DTYPE:\t{x.dtype}")
do = torch.randn_like(x)
h0 = torch.randn_like(x[:, :, 0])
ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
ref.backward(do)
ref_dx, x.grad = x.grad.clone(), None
ref_dg, g.grad = g.grad.clone(), None
tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
tri.backward(do)
tri_dx, x.grad = x.grad.clone(), None
tri_dg, g.grad = g.grad.clone(), None
print(" \t DIFF\t MAX")
print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
print('Done!')
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['seq_len'],
# different possible values for `x_name`
x_vals=[128 * 2 ** i for i in range(0, 8)],
# argument name whose value corresponds to a different line in the plot
line_arg='provider',
# possible values for `line_arg``
line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# label name for the lines
line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# line styles
styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
ylabel="Execution Time (ms)", # label name for the y-axis
# name for the plot. Used also as a file name for saving the plot.
plot_name="Performance",
args={},
)
)
def benchmark(seq_len, provider):
dtype = torch.bfloat16
B, H, D = 16, 4, 128
x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
x = (1 - g) * x
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
do = torch.randn_like(x, dtype=dtype)
quantiles = [0.5, 0.2, 0.8]
results = 0, 0, 0
if provider == 'chunk':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
if provider == 'recurrent':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
if provider == 'chunk_bwd':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
if provider == 'recurrent_bwd':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
return results
benchmark.run(print_data=True)
|
@triton.jit
def chunk_hgrn_fwd_kernel_h(
x,
g,
gc,
o,
h0,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_bh * T * D + i_t * BT * D + o_d
p_g = g + i_bh * T * D + i_t * BT * D + o_d
p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
p_o = o + i_bh * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
for i in range(0, BT):
mask_t = mask & ((i_t * BT + i) < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
|
josStorer/RWKV-Runner
|
finetune/lora/v6/fla/ops/hgrn/chunk.py
|
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
# seq_len chunk recurrent chunk_bwd recurrent_bwd
# 0 128.0 0.039360 0.061056 0.312160 0.205008
# 1 256.0 0.045824 0.123712 0.308784 0.297696
# 2 512.0 0.058688 0.241952 0.310720 0.626528
# 3 1024.0 0.088288 0.476992 0.313184 1.333152
# 4 2048.0 0.169472 0.943264 0.452464 2.724864
# 5 4096.0 0.329920 1.886144 0.881600 5.551520
# 6 8192.0 0.647872 3.755040 1.740496 11.117184
# 7 16384.0 1.272064 7.520576 3.446608 22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
x,
g,
gc,
o,
h0,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_bh * T * D + i_t * BT * D + o_d
p_g = g + i_bh * T * D + i_t * BT * D + o_d
p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
p_o = o + i_bh * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
for i in range(0, BT):
mask_t = mask & ((i_t * BT + i) < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
gc,
o,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(1, tl.cdiv(T, BT)):
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
# [BT, BD]
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
g,
gc,
dx,
do,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
BC = min(BT, T - i_t * BT)
NT = tl.num_programs(1)
p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
if i_t == NT - 1:
b_gc = tl.zeros([BD], dtype=tl.float32)
else:
b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
b_dh = tl.zeros([BD], dtype=tl.float32)
for _ in range(BC - 1, -1, -1):
tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
b_gc = b_gc + b_g
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
p_g -= D
p_gc -= D
p_dx -= D
p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
g,
gc,
o,
dx,
dg,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
mask_t = mask & ((i_t + 1) * BT < T)
b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
# [BT, BD]
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
b_dg = b_o * b_dx * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
@staticmethod
@contiguous
def forward(ctx, x, g, initial_state=None, output_final_state=False):
B, H, T, D = x.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
o = torch.empty_like(x, dtype=torch.float)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_fwd_kernel_h[grid](
x, g, gc, o, initial_state,
T, D,
BT=BT,
USE_INITIAL_STATE=initial_state is not None
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_fwd_kernel_o[grid](
gc, o,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
final_state = None
if output_final_state:
final_state = o[:, :, -1].clone()
o = o.to(x.dtype)
ctx.save_for_backward(g, o, initial_state)
return o, final_state
@staticmethod
@contiguous
def backward(ctx, do, dht=None):
g, o, initial_state = ctx.saved_tensors
B, H, T, D = do.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
dx = torch.empty_like(o)
dg = torch.empty_like(g)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_bwd_kernel_h[grid](
g, gc, dx, do,
T, D,
BT=BT
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_bwd_kernel_o[grid](
g, gc, o, dx, dg,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
if initial_state is not None:
dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
return dx, dg, None, None
def chunk_hgrn(
x: torch.Tensor,
g: torch.Tensor,
initial_state: torch.Tensor = None,
output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if initial_state is not None:
initial_state = initial_state.detach()
o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
return o, final_state
if __name__ == '__main__':
import torch.nn.functional as F
from fla.ops.hgrn.naive import naive_recurrent_hgrn
from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
B, H, T, D = 8, 4, 512, 128
dtype = torch.bfloat16
torch.manual_seed(42)
# [batch_size, n_heads, seq_len, d_head]
x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
print(f"DTYPE:\t{x.dtype}")
do = torch.randn_like(x)
h0 = torch.randn_like(x[:, :, 0])
ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
ref.backward(do)
ref_dx, x.grad = x.grad.clone(), None
ref_dg, g.grad = g.grad.clone(), None
tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
tri.backward(do)
tri_dx, x.grad = x.grad.clone(), None
tri_dg, g.grad = g.grad.clone(), None
print(" \t DIFF\t MAX")
print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
print('Done!')
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['seq_len'],
# different possible values for `x_name`
x_vals=[128 * 2 ** i for i in range(0, 8)],
# argument name whose value corresponds to a different line in the plot
line_arg='provider',
# possible values for `line_arg``
line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# label name for the lines
line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# line styles
styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
ylabel="Execution Time (ms)", # label name for the y-axis
# name for the plot. Used also as a file name for saving the plot.
plot_name="Performance",
args={},
)
)
def benchmark(seq_len, provider):
dtype = torch.bfloat16
B, H, D = 16, 4, 128
x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
x = (1 - g) * x
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
do = torch.randn_like(x, dtype=dtype)
quantiles = [0.5, 0.2, 0.8]
results = 0, 0, 0
if provider == 'chunk':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
if provider == 'recurrent':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
if provider == 'chunk_bwd':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
if provider == 'recurrent_bwd':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
return results
benchmark.run(print_data=True)
|
@triton.jit
def chunk_hgrn_fwd_kernel_o(
gc,
o,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(1, tl.cdiv(T, BT)):
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
# [BT, BD]
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
|
josStorer/RWKV-Runner
|
finetune/lora/v6/fla/ops/hgrn/chunk.py
|
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
# seq_len chunk recurrent chunk_bwd recurrent_bwd
# 0 128.0 0.039360 0.061056 0.312160 0.205008
# 1 256.0 0.045824 0.123712 0.308784 0.297696
# 2 512.0 0.058688 0.241952 0.310720 0.626528
# 3 1024.0 0.088288 0.476992 0.313184 1.333152
# 4 2048.0 0.169472 0.943264 0.452464 2.724864
# 5 4096.0 0.329920 1.886144 0.881600 5.551520
# 6 8192.0 0.647872 3.755040 1.740496 11.117184
# 7 16384.0 1.272064 7.520576 3.446608 22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
x,
g,
gc,
o,
h0,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_bh * T * D + i_t * BT * D + o_d
p_g = g + i_bh * T * D + i_t * BT * D + o_d
p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
p_o = o + i_bh * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
for i in range(0, BT):
mask_t = mask & ((i_t * BT + i) < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
gc,
o,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(1, tl.cdiv(T, BT)):
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
# [BT, BD]
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
g,
gc,
dx,
do,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
BC = min(BT, T - i_t * BT)
NT = tl.num_programs(1)
p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
if i_t == NT - 1:
b_gc = tl.zeros([BD], dtype=tl.float32)
else:
b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
b_dh = tl.zeros([BD], dtype=tl.float32)
for _ in range(BC - 1, -1, -1):
tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
b_gc = b_gc + b_g
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
p_g -= D
p_gc -= D
p_dx -= D
p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
g,
gc,
o,
dx,
dg,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
mask_t = mask & ((i_t + 1) * BT < T)
b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
# [BT, BD]
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
b_dg = b_o * b_dx * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
@staticmethod
@contiguous
def forward(ctx, x, g, initial_state=None, output_final_state=False):
B, H, T, D = x.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
o = torch.empty_like(x, dtype=torch.float)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_fwd_kernel_h[grid](
x, g, gc, o, initial_state,
T, D,
BT=BT,
USE_INITIAL_STATE=initial_state is not None
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_fwd_kernel_o[grid](
gc, o,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
final_state = None
if output_final_state:
final_state = o[:, :, -1].clone()
o = o.to(x.dtype)
ctx.save_for_backward(g, o, initial_state)
return o, final_state
@staticmethod
@contiguous
def backward(ctx, do, dht=None):
g, o, initial_state = ctx.saved_tensors
B, H, T, D = do.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
dx = torch.empty_like(o)
dg = torch.empty_like(g)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_bwd_kernel_h[grid](
g, gc, dx, do,
T, D,
BT=BT
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_bwd_kernel_o[grid](
g, gc, o, dx, dg,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
if initial_state is not None:
dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
return dx, dg, None, None
def chunk_hgrn(
x: torch.Tensor,
g: torch.Tensor,
initial_state: torch.Tensor = None,
output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if initial_state is not None:
initial_state = initial_state.detach()
o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
return o, final_state
if __name__ == '__main__':
import torch.nn.functional as F
from fla.ops.hgrn.naive import naive_recurrent_hgrn
from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
B, H, T, D = 8, 4, 512, 128
dtype = torch.bfloat16
torch.manual_seed(42)
# [batch_size, n_heads, seq_len, d_head]
x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
print(f"DTYPE:\t{x.dtype}")
do = torch.randn_like(x)
h0 = torch.randn_like(x[:, :, 0])
ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
ref.backward(do)
ref_dx, x.grad = x.grad.clone(), None
ref_dg, g.grad = g.grad.clone(), None
tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
tri.backward(do)
tri_dx, x.grad = x.grad.clone(), None
tri_dg, g.grad = g.grad.clone(), None
print(" \t DIFF\t MAX")
print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
print('Done!')
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['seq_len'],
# different possible values for `x_name`
x_vals=[128 * 2 ** i for i in range(0, 8)],
# argument name whose value corresponds to a different line in the plot
line_arg='provider',
# possible values for `line_arg``
line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# label name for the lines
line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# line styles
styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
ylabel="Execution Time (ms)", # label name for the y-axis
# name for the plot. Used also as a file name for saving the plot.
plot_name="Performance",
args={},
)
)
def benchmark(seq_len, provider):
dtype = torch.bfloat16
B, H, D = 16, 4, 128
x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
x = (1 - g) * x
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
do = torch.randn_like(x, dtype=dtype)
quantiles = [0.5, 0.2, 0.8]
results = 0, 0, 0
if provider == 'chunk':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
if provider == 'recurrent':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
if provider == 'chunk_bwd':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
if provider == 'recurrent_bwd':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
return results
benchmark.run(print_data=True)
|
@triton.jit
def chunk_hgrn_bwd_kernel_h(
g,
gc,
dx,
do,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
BC = min(BT, T - i_t * BT)
NT = tl.num_programs(1)
p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
if i_t == NT - 1:
b_gc = tl.zeros([BD], dtype=tl.float32)
else:
b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
b_dh = tl.zeros([BD], dtype=tl.float32)
for _ in range(BC - 1, -1, -1):
tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
b_gc = b_gc + b_g
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
p_g -= D
p_gc -= D
p_dx -= D
p_do -= D
|
josStorer/RWKV-Runner
|
finetune/lora/v6/fla/ops/hgrn/chunk.py
|
https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
# seq_len chunk recurrent chunk_bwd recurrent_bwd
# 0 128.0 0.039360 0.061056 0.312160 0.205008
# 1 256.0 0.045824 0.123712 0.308784 0.297696
# 2 512.0 0.058688 0.241952 0.310720 0.626528
# 3 1024.0 0.088288 0.476992 0.313184 1.333152
# 4 2048.0 0.169472 0.943264 0.452464 2.724864
# 5 4096.0 0.329920 1.886144 0.881600 5.551520
# 6 8192.0 0.647872 3.755040 1.740496 11.117184
# 7 16384.0 1.272064 7.520576 3.446608 22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
x,
g,
gc,
o,
h0,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr,
USE_INITIAL_STATE: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
p_x = x + i_bh * T * D + i_t * BT * D + o_d
p_g = g + i_bh * T * D + i_t * BT * D + o_d
p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
p_o = o + i_bh * T * D + i_t * BT * D + o_d
b_h = tl.zeros([BD], dtype=tl.float32)
b_gc = tl.zeros([BD], dtype=tl.float32)
if USE_INITIAL_STATE:
if i_t == 0:
b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
for i in range(0, BT):
mask_t = mask & ((i_t * BT + i) < T)
b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
b_h = tl.exp(b_g) * b_h + b_x
b_gc = b_gc + b_g
tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
p_x += D
p_g += D
p_gc += D
p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
gc,
o,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(1, tl.cdiv(T, BT)):
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
# [BT, BD]
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
configs=[
triton.Config({'BD': 32}, num_warps=1),
triton.Config({'BD': 32}, num_warps=2),
triton.Config({'BD': 32}, num_warps=4),
triton.Config({'BD': 32}, num_warps=8),
triton.Config({'BD': 64}, num_warps=1),
triton.Config({'BD': 64}, num_warps=2),
triton.Config({'BD': 64}, num_warps=4),
triton.Config({'BD': 64}, num_warps=8),
triton.Config({'BD': 128}, num_warps=1),
triton.Config({'BD': 128}, num_warps=2),
triton.Config({'BD': 128}, num_warps=4),
triton.Config({'BD': 128}, num_warps=8),
],
key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
g,
gc,
dx,
do,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
BC = min(BT, T - i_t * BT)
NT = tl.num_programs(1)
p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
if i_t == NT - 1:
b_gc = tl.zeros([BD], dtype=tl.float32)
else:
b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
b_dh = tl.zeros([BD], dtype=tl.float32)
for _ in range(BC - 1, -1, -1):
tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
b_gc = b_gc + b_g
b_dh = b_dh + b_do
b_dx = b_dh
b_dh = b_dh * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
p_g -= D
p_gc -= D
p_dx -= D
p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
g,
gc,
o,
dx,
dg,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
mask_t = mask & ((i_t + 1) * BT < T)
b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
# [BT, BD]
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
b_dg = b_o * b_dx * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
@staticmethod
@contiguous
def forward(ctx, x, g, initial_state=None, output_final_state=False):
B, H, T, D = x.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
o = torch.empty_like(x, dtype=torch.float)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_fwd_kernel_h[grid](
x, g, gc, o, initial_state,
T, D,
BT=BT,
USE_INITIAL_STATE=initial_state is not None
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_fwd_kernel_o[grid](
gc, o,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
final_state = None
if output_final_state:
final_state = o[:, :, -1].clone()
o = o.to(x.dtype)
ctx.save_for_backward(g, o, initial_state)
return o, final_state
@staticmethod
@contiguous
def backward(ctx, do, dht=None):
g, o, initial_state = ctx.saved_tensors
B, H, T, D = do.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
dx = torch.empty_like(o)
dg = torch.empty_like(g)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_bwd_kernel_h[grid](
g, gc, dx, do,
T, D,
BT=BT
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_bwd_kernel_o[grid](
g, gc, o, dx, dg,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
if initial_state is not None:
dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
return dx, dg, None, None
def chunk_hgrn(
x: torch.Tensor,
g: torch.Tensor,
initial_state: torch.Tensor = None,
output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if initial_state is not None:
initial_state = initial_state.detach()
o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
return o, final_state
if __name__ == '__main__':
import torch.nn.functional as F
from fla.ops.hgrn.naive import naive_recurrent_hgrn
from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
B, H, T, D = 8, 4, 512, 128
dtype = torch.bfloat16
torch.manual_seed(42)
# [batch_size, n_heads, seq_len, d_head]
x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
print(f"DTYPE:\t{x.dtype}")
do = torch.randn_like(x)
h0 = torch.randn_like(x[:, :, 0])
ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
ref.backward(do)
ref_dx, x.grad = x.grad.clone(), None
ref_dg, g.grad = g.grad.clone(), None
tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
tri.backward(do)
tri_dx, x.grad = x.grad.clone(), None
tri_dg, g.grad = g.grad.clone(), None
print(" \t DIFF\t MAX")
print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
print('Done!')
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['seq_len'],
# different possible values for `x_name`
x_vals=[128 * 2 ** i for i in range(0, 8)],
# argument name whose value corresponds to a different line in the plot
line_arg='provider',
# possible values for `line_arg``
line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# label name for the lines
line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# line styles
styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
ylabel="Execution Time (ms)", # label name for the y-axis
# name for the plot. Used also as a file name for saving the plot.
plot_name="Performance",
args={},
)
)
def benchmark(seq_len, provider):
dtype = torch.bfloat16
B, H, D = 16, 4, 128
x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
x = (1 - g) * x
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
do = torch.randn_like(x, dtype=dtype)
quantiles = [0.5, 0.2, 0.8]
results = 0, 0, 0
if provider == 'chunk':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
if provider == 'recurrent':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
if provider == 'chunk_bwd':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
if provider == 'recurrent_bwd':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
return results
benchmark.run(print_data=True)
|
@triton.jit
def chunk_hgrn_bwd_kernel_o(
g,
gc,
o,
dx,
dg,
s_h,
s_t,
s_d,
T: tl.constexpr,
D: tl.constexpr,
BT: tl.constexpr,
BD: tl.constexpr
):
i_d, i_bh = tl.program_id(0), tl.program_id(1)
o_d = i_d * BD + tl.arange(0, BD)
mask = o_d < D
for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
# [BD,]
mask_t = mask & ((i_t + 1) * BT < T)
b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
# [BT, BD]
b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
b_dg = b_o * b_dx * tl.exp(b_g)
tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
@staticmethod
@contiguous
def forward(ctx, x, g, initial_state=None, output_final_state=False):
B, H, T, D = x.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
o = torch.empty_like(x, dtype=torch.float)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_fwd_kernel_h[grid](
x, g, gc, o, initial_state,
T, D,
BT=BT,
USE_INITIAL_STATE=initial_state is not None
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_fwd_kernel_o[grid](
gc, o,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
final_state = None
if output_final_state:
final_state = o[:, :, -1].clone()
o = o.to(x.dtype)
ctx.save_for_backward(g, o, initial_state)
return o, final_state
@staticmethod
@contiguous
def backward(ctx, do, dht=None):
g, o, initial_state = ctx.saved_tensors
B, H, T, D = do.shape
BT, BD = 128, min(64, triton.next_power_of_2(D))
num_warps = 8 if BD == 64 else 4
gc = torch.empty_like(g, dtype=torch.float)
dx = torch.empty_like(o)
dg = torch.empty_like(g)
def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
chunk_hgrn_bwd_kernel_h[grid](
g, gc, dx, do,
T, D,
BT=BT
)
def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
chunk_hgrn_bwd_kernel_o[grid](
g, gc, o, dx, dg,
o.stride(1), o.stride(2), o.stride(3),
T, D,
BT=BT, BD=BD,
num_warps=num_warps
)
if initial_state is not None:
dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
return dx, dg, None, None
def chunk_hgrn(
x: torch.Tensor,
g: torch.Tensor,
initial_state: torch.Tensor = None,
output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if initial_state is not None:
initial_state = initial_state.detach()
o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
return o, final_state
if __name__ == '__main__':
import torch.nn.functional as F
from fla.ops.hgrn.naive import naive_recurrent_hgrn
from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
B, H, T, D = 8, 4, 512, 128
dtype = torch.bfloat16
torch.manual_seed(42)
# [batch_size, n_heads, seq_len, d_head]
x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
print(f"DTYPE:\t{x.dtype}")
do = torch.randn_like(x)
h0 = torch.randn_like(x[:, :, 0])
ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
ref.backward(do)
ref_dx, x.grad = x.grad.clone(), None
ref_dg, g.grad = g.grad.clone(), None
tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
tri.backward(do)
tri_dx, x.grad = x.grad.clone(), None
tri_dg, g.grad = g.grad.clone(), None
print(" \t DIFF\t MAX")
print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
print('Done!')
@triton.testing.perf_report(
triton.testing.Benchmark(
# argument names to use as an x-axis for the plot
x_names=['seq_len'],
# different possible values for `x_name`
x_vals=[128 * 2 ** i for i in range(0, 8)],
# argument name whose value corresponds to a different line in the plot
line_arg='provider',
# possible values for `line_arg``
line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# label name for the lines
line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
# line styles
styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
ylabel="Execution Time (ms)", # label name for the y-axis
# name for the plot. Used also as a file name for saving the plot.
plot_name="Performance",
args={},
)
)
def benchmark(seq_len, provider):
dtype = torch.bfloat16
B, H, D = 16, 4, 128
x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
x = (1 - g) * x
x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
do = torch.randn_like(x, dtype=dtype)
quantiles = [0.5, 0.2, 0.8]
results = 0, 0, 0
if provider == 'chunk':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
if provider == 'recurrent':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
if provider == 'chunk_bwd':
results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
if provider == 'recurrent_bwd':
results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
return results
benchmark.run(print_data=True)
|
INT-FlashAttention2024/INT-FlashAttention
|
flash_atten_full_int8.py
|
https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py
|
import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, v_scale,#
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
# for start_n in range(0, 32, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
p = p.to(tl.float16)
p = p * 127
p = (p+0.5).to(tl.int8)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
tmp = tl.dot(p, v)
tmp = tmp.to(tl.float32)
tmp = tmp * v_scale / 127
acc = acc + tmp
# tmp = tl.dot(p, v)
# tl.device_print("tmp", tmp)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
stride_v1, stride_v2, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=(1, 0),
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
v_scale = tl.load(V_scale + vscl_offset)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
acc = acc / l_i[:, None]
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
@staticmethod # q, k, v: int8, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
o = o.to(torch.float16)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
_attn_fwd_full_int8[grid](
q, k, v, q_scale, k_scale, v_scale, sm_scale, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
v_scale.stride(0), v_scale.stride(1), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_full_int8 = _attention_full_int8.apply
|
@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, v_scale,#
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
# for start_n in range(0, 32, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
p = p.to(tl.float16)
p = p * 127
p = (p+0.5).to(tl.int8)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
tmp = tl.dot(p, v)
tmp = tmp.to(tl.float32)
tmp = tmp * v_scale / 127
acc = acc + tmp
# tmp = tl.dot(p, v)
# tl.device_print("tmp", tmp)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
|
INT-FlashAttention2024/INT-FlashAttention
|
flash_atten_full_int8.py
|
https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py
|
import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, v_scale,#
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
# for start_n in range(0, 32, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
p = p.to(tl.float16)
p = p * 127
p = (p+0.5).to(tl.int8)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
tmp = tl.dot(p, v)
tmp = tmp.to(tl.float32)
tmp = tmp * v_scale / 127
acc = acc + tmp
# tmp = tl.dot(p, v)
# tl.device_print("tmp", tmp)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
stride_v1, stride_v2, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=(1, 0),
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
v_scale = tl.load(V_scale + vscl_offset)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
acc = acc / l_i[:, None]
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
@staticmethod # q, k, v: int8, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
o = o.to(torch.float16)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
_attn_fwd_full_int8[grid](
q, k, v, q_scale, k_scale, v_scale, sm_scale, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
v_scale.stride(0), v_scale.stride(1), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_full_int8 = _attention_full_int8.apply
|
@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
stride_v1, stride_v2, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=(1, 0),
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
v_scale = tl.load(V_scale + vscl_offset)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
acc = acc / l_i[:, None]
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
@staticmethod # q, k, v: int8, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
o = o.to(torch.float16)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
_attn_fwd_full_int8[grid](
q, k, v, q_scale, k_scale, v_scale, sm_scale, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
v_scale.stride(0), v_scale.stride(1), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_full_int8 = _attention_full_int8.apply
|
TD87/triton-kernels
|
gemm_matmul.py
|
https://github.com/TD87/triton-kernels/blob/17a97ede7b6d0ca7356db68b56d0e5b6a9080ad4/gemm_matmul.py
|
import math
import torch # type: ignore
import triton # type: ignore
import triton.language as tl # type: ignore
@triton.jit()
def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr):
pid_r = tl.program_id(0)
pid_c = tl.program_id(1)
row_start = pid_r * BLOCK_M
row_offsets = row_start + tl.arange(0, BLOCK_M)
col_start = pid_c * BLOCK_N
col_offsets = col_start + tl.arange(0, BLOCK_N)
out = tl.zeros((BLOCK_M, BLOCK_N), dtype = tl.float32)
for k in tl.range(0, K, BLOCK_K):
k_offsets = k + tl.arange(0, BLOCK_K)
row = row_offsets[:, None] * K + k_offsets[None, :]
mask = (row_offsets[:, None] < M) & (k_offsets[None, :] < K)
x = tl.load(x_ptr + row, mask = mask)
col = col_offsets[None, :] + k_offsets[:, None] * N
mask = (col_offsets[None, :] < N) & (k_offsets[:, None] < K)
y = tl.load(y_ptr + col, mask = mask)
out = tl.dot(x, y, out)
out_offsets = row_offsets[:, None] * N + col_offsets[None, :]
mask = (row_offsets[:, None] < M) & (col_offsets[None, :] < N)
tl.store(out_ptr + out_offsets, out, mask = mask)
def matmul(x, y, BLOCK_M = 128, BLOCK_N = 64, BLOCK_K = 64):
M, K = x.size()
N = y.size(1)
assert K == y.size(0)
out = torch.empty(M, N, device = 'cuda', dtype = torch.float32)
grid = (math.ceil(M / BLOCK_M), math.ceil(N / BLOCK_N))
matmul_kernel[grid](x, y, out, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K)
return out
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names = ["M", "N", "K"],
x_vals = [128 * i for i in range(2, 33)],
line_arg = "provider",
line_vals = ["triton", "torch"],
line_names = ["Triton", "Torch"],
styles = [("green", "-"), ("blue", "-")],
ylabel = "TFLOPS",
plot_name = "matmul-performance",
args = {},
))
def benchmark(M, N, K, provider):
x = torch.randn(M, K, device = 'cuda', dtype = torch.float32)
y = torch.randn(K, N, device = 'cuda', dtype = torch.float32)
if provider == "torch":
ms = triton.testing.do_bench(lambda: torch.matmul(x, y))
else:
ms = triton.testing.do_bench(lambda: matmul(x, y))
tflops = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
return tflops(ms)
benchmark.run(print_data = True, save_path = "plots")
|
@triton.jit
()
def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr,
BLOCK_K: tl.constexpr):
pid_r = tl.program_id(0)
pid_c = tl.program_id(1)
row_start = pid_r * BLOCK_M
row_offsets = row_start + tl.arange(0, BLOCK_M)
col_start = pid_c * BLOCK_N
col_offsets = col_start + tl.arange(0, BLOCK_N)
out = tl.zeros((BLOCK_M, BLOCK_N), dtype = tl.float32)
for k in tl.range(0, K, BLOCK_K):
k_offsets = k + tl.arange(0, BLOCK_K)
row = row_offsets[:, None] * K + k_offsets[None, :]
mask = (row_offsets[:, None] < M) & (k_offsets[None, :] < K)
x = tl.load(x_ptr + row, mask = mask)
col = col_offsets[None, :] + k_offsets[:, None] * N
mask = (col_offsets[None, :] < N) & (k_offsets[:, None] < K)
y = tl.load(y_ptr + col, mask = mask)
out = tl.dot(x, y, out)
out_offsets = row_offsets[:, None] * N + col_offsets[None, :]
mask = (row_offsets[:, None] < M) & (col_offsets[None, :] < N)
tl.store(out_ptr + out_offsets, out, mask = mask)
def matmul(x, y, BLOCK_M = 128, BLOCK_N = 64, BLOCK_K = 64):
M, K = x.size()
N = y.size(1)
assert K == y.size(0)
out = torch.empty(M, N, device = 'cuda', dtype = torch.float32)
grid = (math.ceil(M / BLOCK_M), math.ceil(N / BLOCK_N))
matmul_kernel[grid](x, y, out, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K)
return out
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names = ["M", "N", "K"],
x_vals = [128 * i for i in range(2, 33)],
line_arg = "provider",
line_vals = ["triton", "torch"],
line_names = ["Triton", "Torch"],
styles = [("green", "-"), ("blue", "-")],
ylabel = "TFLOPS",
plot_name = "matmul-performance",
args = {},
))
def benchmark(M, N, K, provider):
x = torch.randn(M, K, device = 'cuda', dtype = torch.float32)
y = torch.randn(K, N, device = 'cuda', dtype = torch.float32)
if provider == "torch":
ms = triton.testing.do_bench(lambda: torch.matmul(x, y))
else:
ms = triton.testing.do_bench(lambda: matmul(x, y))
tflops = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
return tflops(ms)
benchmark.run(print_data = True, save_path = "plots")
|
xiaonans/triton-gemm-benchmark
|
kernels/basic_matmul.py
|
https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py
|
import triton
import triton.language as tl
import torch
from .autotune_config import get_autotune_config
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=get_autotune_config(),
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
|
xiaonans/triton-gemm-benchmark
|
kernels/basic_matmul.py
|
https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py
|
import triton
import triton.language as tl
import torch
from .autotune_config import get_autotune_config
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=get_autotune_config(),
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
xiaohuguo2023/scripts
|
others/tune_gemm1.py
|
https://github.com/xiaohuguo2023/scripts/blob/b6de80a590c78e78a4f8d64346c34ef445e2aa17/others/tune_gemm1.py
|
import argparse
import sys
import yaml
import os
import glob
import subprocess
import torch
import triton
import triton.language as tl
from matmul_kernel import matmul_kernel
from datetime import datetime
import pandas as pd
import torch.distributed as dist
from torch.multiprocessing import spawn
def get_full_tuning_space():
configs = []
block_mn_range = [16, 32, 64, 128, 256]
block_k_range = [16, 32, 64, 128, 256]
split_k_range = [1, 2, 4, 5, 6, 8, 10, 12, 16, 18, 24]
num_warps_range = [1, 2, 4, 8]
group_m_range = [1, 4, 8, 16, 32]
num_stage_range = [0]
waves_per_eu_range = [0]
matrix_instr_nonkdim_range = [16, 32]
kpack_range = [1, 2]
for block_m in block_mn_range:
for block_n in block_mn_range:
for block_k in block_k_range:
for num_warps in num_warps_range:
for group_m in group_m_range:
for split_k in split_k_range:
for num_stages in num_stage_range:
for waves_per_eu in waves_per_eu_range:
for matrix_instr_nonkdim in matrix_instr_nonkdim_range:
for kpack in kpack_range:
configs.append({
'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': block_k,
'GROUP_SIZE_M': group_m, 'SPLIT_K': split_k, 'num_warps': num_warps,
'num_stages': num_stages, 'waves_per_eu': waves_per_eu,
'matrix_instr_nonkdim': matrix_instr_nonkdim, 'kpack': kpack
})
return configs
def prune_configs(M, N, K, configs, elemBytes_a, elemBytes_b):
pruned_configs = []
if M < 32 or N < 32:
mfma = 16
else:
mfma = 32
large_gemm = False
if M >= 2048 and N >=2048:
large_gemm = True
for config in configs:
BLOCK_SIZE_M = config.get("BLOCK_SIZE_M")
BLOCK_SIZE_N = config.get("BLOCK_SIZE_N")
BLOCK_SIZE_K = config.get("BLOCK_SIZE_K")
num_warps = config.get("num_warps")
matrix_instr_nonkdim = config.get("matrix_instr_nonkdim")
kpack = config.get("kpack")
if matrix_instr_nonkdim > mfma:
continue
if mfma == 4 and BLOCK_SIZE_K < 64:
continue
if BLOCK_SIZE_M * BLOCK_SIZE_N < 64:
continue
SPLIT_K = config.get("SPLIT_K")
GROUP_M = config.get("GROUP_SIZE_M")
if BLOCK_SIZE_M < matrix_instr_nonkdim or BLOCK_SIZE_N < matrix_instr_nonkdim:
continue
if M <= matrix_instr_nonkdim and BLOCK_SIZE_M != matrix_instr_nonkdim:
continue
if N <= matrix_instr_nonkdim and BLOCK_SIZE_N != matrix_instr_nonkdim:
continue
if BLOCK_SIZE_M > M * 2 and BLOCK_SIZE_M != 16:
continue
if BLOCK_SIZE_N > N * 2 and BLOCK_SIZE_N != 16:
continue
if SPLIT_K != 1 and not need_split_k(M, N, K):
continue
leap = SPLIT_K * BLOCK_SIZE_K
modv = K % leap
if modv != 0:
continue
if GROUP_M * BLOCK_SIZE_M > M and GROUP_M != 1:
continue
LDS = BLOCK_SIZE_K * BLOCK_SIZE_M * elemBytes_a + BLOCK_SIZE_K * BLOCK_SIZE_N * elemBytes_b
if LDS > 65536:
continue
if large_gemm:
if BLOCK_SIZE_M < 64 or BLOCK_SIZE_N < 64:
continue
if BLOCK_SIZE_K < 64:
continue
if num_warps < 4:
continue
pruned_configs.append(config)
return pruned_configs
def need_split_k(SIZE_M, SIZE_N, SIZE_K):
return (SIZE_M < 64 or SIZE_N < 64) and SIZE_K > 1024
def run_bash_command_wrapper(commandstring, capture=True):
try:
run_bash_command(commandstring, capture)
except subprocess.CalledProcessError as e:
if not capture:
print(f"running {commandstring} one more time")
run_bash_command(commandstring, capture)
def run_bash_command(commandstring, capture=True):
if capture:
proc = subprocess.run(commandstring, shell=True, check=True, executable='/bin/bash', stdout=subprocess.PIPE)
return proc.stdout.splitlines()
proc = subprocess.run(commandstring, shell=True, check=True, executable='/bin/bash')
return None
def read_config(config):
block_m = config.get('BLOCK_SIZE_M')
block_n = config.get('BLOCK_SIZE_N')
block_k = config.get('BLOCK_SIZE_K')
group_m = config.get('GROUP_SIZE_M')
split_k = config.get('SPLIT_K')
num_warps = config.get('num_warps')
num_stages = config.get('num_stages')
waves_per_eu = config.get('waves_per_eu')
mfma_instr_size = config.get('matrix_instr_nonkdim')
kpack = config.get('kpack')
return block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfma_instr_size, kpack
def gen_kernel_and_configStr_from_config(M, N, K, config, dtype_a, dtype_b, dtype_c):
block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
torch_dtype_a = 'fp16'
torch_dtype_b = 'fp16'
torch_dtype_c = 'fp16'
if dtype_a:
torch_dtype_a = tl_to_torch_types[name_to_tl_types[dtype_a]]
if dtype_b:
torch_dtype_b = tl_to_torch_types[name_to_tl_types[dtype_b]]
if dtype_c:
torch_dtype_c = tl_to_torch_types[name_to_tl_types[dtype_c]]
configStr = f"M{M}_N{N}_K{K}_BM{block_m}_BN{block_n}_BK{block_k}_GM{group_m}_SK{split_k}_nW{num_warps}_nS{num_stages}_EU{waves_per_eu}_kP{kpack}_mfma{mfmaInstrSize}"
matmul_def_str = f"""
def matmul_{configStr}(a, b, c, M, N, K, am, ak, bk, bn, cm, cn, warmup=False):
grid = triton.cdiv(M, {block_m}) * triton.cdiv(N, {block_n}), {split_k}
if warmup:
matmul_kernel_{configStr}.warmup(
{torch_dtype_a}, {torch_dtype_b}, {torch_dtype_c},
M, N, K,
am, ak, bk, bn, cm, cn,
BLOCK_SIZE_M = {block_m},
BLOCK_SIZE_N = {block_n},
BLOCK_SIZE_K = {block_k},
GROUP_SIZE_M = {group_m},
SPLIT_K = {split_k},
num_warps = {num_warps},
num_stages = {num_stages},
waves_per_eu = {waves_per_eu},
matrix_instr_nonkdim = {mfmaInstrSize},
kpack = {kpack},
grid=(1,)
)
return None
else:
matmul_kernel_{configStr}[grid](
a, b, c,
M, N, K,
am, ak, bk, bn, cm, cn,
BLOCK_SIZE_M = {block_m},
BLOCK_SIZE_N = {block_n},
BLOCK_SIZE_K = {block_k},
GROUP_SIZE_M = {group_m},
SPLIT_K = {split_k},
num_warps = {num_warps},
num_stages = {num_stages},
waves_per_eu = {waves_per_eu},
matrix_instr_nonkdim = {mfmaInstrSize},
kpack = {kpack}
)
return c
def try_config_{configStr}(M, N, K, am, ak, bk, bn, cm, cn):
try:
matmul_{configStr}(None, None, None, M, N, K, am, ak, bk, bn, cm, cn, True)
return True
except Exception as e:
print(f'invalid config(compilation): {configStr}: ', e, flush=True)
return False
"""
return configStr, matmul_def_str
def generated_kernel_name(M, N, K, gpu_id):
path = os.path.dirname(os.path.abspath(__file__))
return f"{path}/generated_kernel{M}-{N}-{K}-{gpu_id}.py"
def generate_kernel(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, jobs, iters, run_bench):
filenames = [generated_kernel_name(M, N, K, i) for i in range(jobs)]
f_kernel = [open(path, 'w') for path in filenames]
import_str = """import torch
import triton
import triton.language as tl
import argparse
import sys
import torch.multiprocessing as mp
from tune_gemm import gen_input
"""
for fi in range(jobs):
f_kernel[fi].write(import_str + "\n")
with open(os.path.dirname(os.path.abspath(__file__))+"/matmul_kernel.py") as file:
matmul_kernel_code = file.read()
idx = 0
for config in configs:
file_idx = idx % jobs
configStr, matmul_def_str = gen_kernel_and_configStr_from_config(M, N, K, config, dtype_a, dtype_b, dtype_c)
matmul_kernel_config = matmul_kernel_code.replace("matmul_kernel", f"matmul_kernel_{configStr}")
matmul_kernel_config = matmul_kernel_config.replace("import triton.language as tl", "")
matmul_kernel_config = matmul_kernel_config.replace("import triton", "")
f_kernel[file_idx].write(matmul_kernel_config + "\n\n")
f_kernel[file_idx].write(matmul_def_str + "\n")
idx += 1
test_gemm_pre_str = f"""def test_gemm(M, N, K, num_threads):
results = []
config_names = []
a, a_fp16 = gen_input(M, K, '{dtype_a}', {col_a}, 1, '{init_type}', device='cuda')
b, b_fp16 = gen_input(K, N, '{dtype_b}', {col_b}, 2, '{init_type}', device='cuda')
c = torch.zeros((M, N), device=a.device, dtype={tl_to_torch_types[name_to_tl_types[dtype_c]]})
task_args = (M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1))
"""
for fi in range(jobs):
f_kernel[fi].write(test_gemm_pre_str + "\n")
idx = 0
for config in configs:
configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
task_str = f" results.append(try_config_{configStr}(*task_args))\n"
task_str += f" config_names.append('{configStr}')\n"
f_kernel[idx % jobs].write(task_str)
idx += 1
for fi in range(jobs):
threadpool_str = """
failed_configs = []
for i in range(len(results)):
if not results[i]:
failed_configs.append(config_names[i])
with open("{filename}.failed_configs", "w") as f:
for cfg in failed_configs:
f.write(cfg + "\\n")
else:
try:
with open("{filename}.failed_configs", "r") as f:
failed_configs = [cfg.strip() for cfg in f.readlines()]
except Exception:
failed_configs = []
""".format(filename=filenames[fi])
f_kernel[fi].write(threadpool_str)
idx = 0
runs = iters if run_bench else 200
for config in configs:
configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
matmul_call_str = f"""
if '{configStr}' not in failed_configs:
for i in range({runs}):
d = matmul_{configStr}(a, b, c, M, N, K, a.stride(0), a.stride(1), b.stride(0), b.stride(1), c.stride(0), c.stride(1))"""
f_kernel[idx % jobs].write(matmul_call_str + "\n")
idx += 1
for fi in range(jobs):
f_kernel[fi].write(" return d\n")
def_main_str = """
def main():
parser = argparse.ArgumentParser(
prog="tune a specific gemm size",
allow_abbrev=False,)
parser.add_argument("-n", type=int, default=1, help='number of threads')
args = parser.parse_args()
numThreads = args.n
"""
test_gemm_call_str = f'test_gemm({M}, {N}, {K}, numThreads)'
for fi in range(jobs):
f_kernel[fi].write(def_main_str)
f_kernel[fi].write(test_gemm_call_str + "\n\n")
f_kernel[fi].write("""if __name__ == '__main__':
sys.exit(main())""")
f_kernel[fi].close()
def extract_kernel_time(M, N, K, config, df):
configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
df = df[df['KernelName'].str.contains(configStr)]
meanTime = df['DurationNs'].tail(100).mean()
return config, meanTime
def profile_batch_kernels(rank, world_size, M, N, K, jobs, verbose):
ngpus = world_size
gpu_id = rank
os.environ['ROCR_VISIBLE_DEVICES'] = str(gpu_id)
jobId = gpu_id
while jobId < jobs:
if verbose:
print(f"profiling {generated_kernel_name(M, N, K, jobId)} on GPU {gpu_id}")
run_bash_command_wrapper(f"rocprof --stats -o results-{jobId}.csv python {generated_kernel_name(M, N, K, jobId)}", capture=(verbose < 2))
jobId += ngpus
def tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, run_bench, jobs, iters, skipWarmup, verbose=0, num_threads=16):
setup(rank, world_size)
if rank == 0:
generate_kernel(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, jobs, iters, run_bench)
run_bash_command("rm -rf ~/.triton/cache")
start_time = datetime.now()
if not skipWarmup:
for i in range(jobs):
run_bash_command(f"python {generated_kernel_name(M, N, K, i)} -n {num_threads}", capture=(verbose < 2))
compile_end = datetime.now()
compile_time = compile_end - start_time
if verbose:
print(f"compile time: {compile_time}", flush=True)
dist.barrier()
profile_batch_kernels(rank, world_size, M, N, K, jobs, verbose)
dist.barrier()
if rank == 0:
profile_end = datetime.now()
profile_time = profile_end - compile_end
if verbose:
print(f"profile time: {profile_time}", flush=True)
minTime = 1024 * 1024 * 1024
tasks = []
idx = 0
df_prof = [pd.read_csv(f"results-{i}.csv") for i in range(jobs)]
for config in configs:
file_idx = idx % jobs
tasks.append((M, N, K, config, df_prof[file_idx]))
idx += 1
for task in tasks:
config, myTime = extract_kernel_time(*task)
if myTime:
min_us = myTime / 1000
if min_us < minTime:
minTime = min_us
bestConfig = config
else:
min_us = -1
print(f"invalid config(post processing): SIZE {M} {N} {K}: {config}", flush=True)
post_end = datetime.now()
post_time = post_end - profile_end
if verbose:
print(f"post processing time: {post_time}", flush=True)
return minTime, bestConfig, compile_time, profile_time, post_time
cleanup()
def gen_input(M, N, ty_name, needTrans, seed, init_type, device='cuda'):
d_type = name_to_tl_types[ty_name]
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
def init_by_size_and_type(size, dtype, init_type):
if init_type == 'hpl':
return torch.empty(size, device='cuda', dtype=dtype).uniform_(-0.5, 0.5)
# This init type has element[i] in row[j] equal to sin(i+j*N)
elif init_type == 'trig_float':
M, N = size
return torch.reshape(torch.arange(0, M*N), (M, N)).sin().to(dtype=dtype, device='cuda')
elif init_type == 'zeros':
return torch.zeros(size, dtype=dtype, device='cuda')
elif init_type == "randn":
temp = torch.randn(size, dtype=dtype, device='cuda')
return temp
else:
raise ValueError("Bad matrix initialization type.")
raw_data = init_by_size_and_type((N,M) if needTrans else (M,N), torch.float32, init_type)
if needTrans:
raw_data = raw_data.T
if (d_type == tl.float8e4b8 and TORCH_HAS_FP8E4B8) or \
(d_type == tl.float8e5b16 and TORCH_HAS_FP8E5B16) or not d_type.is_fp8():
input = raw_data.to(tl_to_torch_types[d_type])
input_f16 = input.to(torch.float16)
else:
f8_tensor = raw_data.to(torch.int8)
# keep only two bits of exponent to avoid overflow
f8_tensor = f8_tensor & 0b00111111
input = triton.reinterpret(f8_tensor, d_type)
input_f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
n_elements = raw_data.numel()
copy_kernel[grid](input, input_f16, n_elements, BLOCK_SIZE=1024)
return input, input_f16
def matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
#assert a.is_contiguous(), "Matrix A must be contiguous"
#assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# 1D launch kernel where each block gets its own program.
grid = triton.cdiv(M, block_m) * triton.cdiv(N, block_n), split_k
matmul_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
BLOCK_SIZE_M=block_m,
BLOCK_SIZE_N=block_n,
BLOCK_SIZE_K=block_k,
GROUP_SIZE_M=group_m,
SPLIT_K=split_k,
num_warps=num_warps,
num_stages=num_stages,
waves_per_eu=waves_per_eu,
matrix_instr_nonkdim = mfmaInstrSize,
kpack = kpack
)
return c
def test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, config, verbose):
block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
torch.manual_seed(0)
#a = torch.randn((M, K), device='cuda', dtype=datatype)
#b = torch.randn((K, N), device='cuda', dtype=datatype)
a, a_fp16 = gen_input(M, K, dtype_a, col_a, 1, init_type, device='cuda')
b, b_fp16 = gen_input(K, N, dtype_b, col_b, 2, init_type, device='cuda')
# Allocates output.
c = torch.zeros((M, N), device=a.device, dtype=tl_to_torch_types[name_to_tl_types[dtype_c]])
triton_output = matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack)
torch_output = torch.matmul(a_fp16, b_fp16)
# print(f"triton_output={triton_output}")
# print(f"torch_output={torch_output}")
rtol = 0 if torch.version.hip is None else 1e-2
atol = 1e-3 if split_k == 1 else 4e-2
row_a_str = 'N' if col_a else 'T'
row_b_str = 'N' if col_b else 'T'
size_str = ''
if verbose:
size_str = f'SIZE M: {M}, N: {N}, K: {K}, trans: {row_a_str}{row_b_str}'
if torch.allclose(triton_output.to(torch.float16), torch_output, atol=atol, rtol=rtol):
print(f'{size_str} Correct✅')
else:
print(f'{size_str} Incorrect❌')
def get_default_tuning_result_filename():
git_branch_name = run_bash_command("git rev-parse --abbrev-ref HEAD")
git_branch_name = git_branch_name[0].decode()
git_commit_hash = run_bash_command("git rev-parse --short HEAD")
git_commit_hash = git_commit_hash[0].decode()
dt_string = datetime.now().strftime("%m-%d-%Y-%H:%M:%S")
defaultName = f"tuning_results_{git_branch_name}@{git_commit_hash}_{dt_string}.yaml"
return defaultName
def parse_args():
parser = argparse.ArgumentParser(
prog="tune a specific gemm size",
allow_abbrev=False,
)
parser.add_argument("-m", type=int, default=0)
parser.add_argument("-n", type=int, default=0)
parser.add_argument("-k", type=int, default=0)
parser.add_argument("-col_a", action='store_true', default=False, help='whether matrix a is column major')
parser.add_argument("-col_b", action='store_true', default=False, help='whether matrix b is column major')
parser.add_argument("-dtype_a", type=str, default='fp16', help="matrix a element data type")
parser.add_argument("-dtype_b", type=str, default='fp16', help="matrix b element data type")
parser.add_argument("-dtype_c", type=str, default='fp16', help="output element data type")
parser.add_argument("--ngpus", type=int, default=0, help='number of GPUs used in the profiling step')
parser.add_argument("--gpu_ids", type=lambda s: [int(id) for id in s.split(',')], default=[], help='list of gpu ids to use for tuning')
parser.add_argument("--gemm_size_file", type=str, default="", help='yaml file to indicate matrix size')
parser.add_argument("--o", type=str, default='', help='yaml file to store tuning results')
parser.add_argument("--keep", action='store_true', default=False, help='keep generated files')
parser.add_argument("--compare", action='store_true', default=False, help="Whether check result correctness")
parser.add_argument("--compare_wo_tuning", action='store_true', default=False, help="Whether check result correctness")
parser.add_argument("--benchmark", action='store_true', default=False, help="Benchmark the given config")
parser.add_argument("--time_breakdown", action='store_true', default=False, help="Show detailed time breakdown of each step during the tuning")
parser.add_argument("--verbose", action='store_true', default=False, help="enables time_breakdown and additional logging messages")
parser.add_argument("--num_threads", type=int, default=16, help="number of threads to use for kernel compilation and post processing")
parser.add_argument("--jobs", type=int, default=1, help="number of generated files")
parser.add_argument("--iters", type=int, default=1000, help="number of generated files")
parser.add_argument("--init_type", type=str, default='randn', help="Initialization type for input matrices (default uniform rand [0, 1.0)])")
parser.add_argument("--no_warmup", action='store_true', default=False, help="Do not call the warmup kernel")
args = parser.parse_args()
if not args.o:
if args.benchmark:
args.o = "benchmarking_results.csv"
else:
args.o = get_default_tuning_result_filename()
return args
TORCH_HAS_FP8E5B16 = hasattr(torch, 'float8_e5m2fnuz')
TORCH_HAS_FP8E4B8 = hasattr(torch, 'float8_e4m3fnuz')
tl_to_torch_types = {
tl.float16: torch.float16,
tl.bfloat16: torch.bfloat16,
tl.float32: torch.float32,
tl.int8: torch.int8,
tl.int32: torch.int32,
}
if TORCH_HAS_FP8E5B16:
tl_to_torch_types[tl.float8e5b16] = torch.float8_e5m2fnuz
if TORCH_HAS_FP8E4B8:
tl_to_torch_types[tl.float8e4b8] = torch.float8_e4m3fnuz
name_to_tl_types = {
'int8': tl.int8,
'int32': tl.int32,
'fp16': tl.float16,
'fp32': tl.float32,
'bf16': tl.bfloat16,
'fp8': tl.float8e4b8,
'bf8': tl.float8e5b16,
}
def process_item(item):
M = item['M']
N = item['N']
K = item['K']
col_a = False if item['rowMajorA'] == 'T' else True
col_b = False if item['rowMajorB'] == 'T' else True
del item['M']
del item['N']
del item['K']
del item['rowMajorA']
del item['rowMajorB']
return M, N, K, col_a, col_b, item
def type_name_to_bytes(ty_name):
if '32' in ty_name:
return 4
if '16' in ty_name:
return 2
if '8' in ty_name:
return 1
else:
print(f"Unrecognized input type name {ty_name}")
sys.exit(1)
def format_output(unformatted):
if unformatted < 0.0001:
formatted = "{:.3e}".format(unformatted)
elif unformatted > 1000:
formatted = "{:.1f}".format(unformatted)
else:
formatted = "{:.2f}".format(unformatted)
return formatted
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group("nccl", rank=rank, world_size=world_size)
print(f"Rank {rank}/{world_size} process initialized.")
def cleanup():
dist.destroy_process_group()
print("Process group destroyed.")
def main():
args = parse_args()
world_size = len(args.gpu_ids) if args.gpu_ids else torch.cuda.device_count()
print(f"Number of GPUs available: {world_size}")
matrix_size_file = args.gemm_size_file
output_file = args.o
keepTmp = args.keep
run_bench = args.benchmark
jobs = args.jobs
iters = args.iters
skipWarmup = args.no_warmup
# Get GPU ids
ngpus = args.ngpus
gpu_ids = args.gpu_ids
if ngpus != 0 and gpu_ids:
print("--ngpus and --gpu_ids are mutually exclusive options")
return os.EX_USAGE
if ngpus == 0 and not gpu_ids:
ngpus = 1
if ngpus != 0:
gpus = list(range(ngpus))
if gpu_ids:
gpus = gpu_ids
if run_bench:
gpus = [gpus[0]]
jobs = 1
# Get element type
dtype_a = args.dtype_a
dtype_b = args.dtype_b
dtype_c = args.dtype_c
if not dtype_a in name_to_tl_types or not dtype_b in name_to_tl_types or not dtype_c in name_to_tl_types:
print(f"Unsupported dtype_a {args.dtype_a} or dtype_b {args.dtype_b} or dtype_c {args.dtype_c}")
print("Supported types: ", list(name_to_tl_types.keys()))
sys.exit(1)
mnks = []
# TODO: make it more robust to get user input
init_type = args.init_type
if matrix_size_file == "" or not os.path.isfile(matrix_size_file):
M = args.m
N = args.n
K = args.k
col_a = args.col_a
col_b = args.col_b
mnks = [(M, N, K, col_a, col_b, None)]
else:
with open(matrix_size_file) as file:
matrix_sizes = yaml.safe_load(file)
for item in matrix_sizes:
M, N, K, col_a, col_b, item = process_item(item)
mnks.append((M, N, K, col_a, col_b, item))
# Check correctness from given configs
if args.compare_wo_tuning:
for (M, N, K, col_a, col_b, myConfig) in mnks:
test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, myConfig, True)
return
configs_full = get_full_tuning_space()
start_time = datetime.now()
# Append to the output file so that we can save all results into one file
f_results = open(output_file, 'a')
if run_bench:
print(f"Benchmarking gemm with {dtype_a} inputs")
print("trans M N K TFLOPS us")
f_results.write("trans,M,N,K,TFLOPS,us\n")
else:
print(f"Tuning {len(mnks)} gemm sizes starts at: {start_time}", flush=True)
for (M, N, K, col_a, col_b, myConfig) in mnks:
start_local_time = datetime.now()
# Obtain a pruned tuning space according to gemm size
# If running benchmark, use the provided config
pruned_configs = [myConfig] if run_bench else prune_configs(M, N, K, configs_full, type_name_to_bytes(dtype_a), type_name_to_bytes(dtype_b))
row_a_str = 'N' if col_a else 'T'
row_b_str = 'N' if col_b else 'T'
size_str = f'SIZE: {M} {N} {K} {row_a_str}{row_b_str}'
if not run_bench:
print(f"{size_str} nConfigs: {len(pruned_configs)}", end=" ", flush=True)
else:
print(f"{row_a_str}{row_b_str} {M:5d} {N:5d} {K:5d} ", end="")
f_results.write(f"{row_a_str}{row_b_str},{M},{N},{K},")
# The main tuning function for one gemm size
verbose_level = 0
if args.time_breakdown:
verbose_level = 1
if args.verbose:
verbose_level = 2
def tune_gemm(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads):
return tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads)
minTime, bestConfig, compile_time, profile_time, post_time = spawn(
tune_gemm,
args=(world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose_level, num_threads),
nprocs=world_size,
join=True
)
# post processing the numbers
perf_tflops = lambda us: 2 * M * N * K * 1e-12 / (us * 1e-6)
tri_tflops = perf_tflops(minTime)
formatted_tflops = format_output(tri_tflops)
minTime = format_output(minTime)
if not run_bench:
print(f'TFLOPS: {formatted_tflops} time(us): {minTime}', end=" ", flush=True)
bestConfig_compact_str, _ = gen_kernel_and_configStr_from_config(M, N, K, bestConfig, None, None, None)
if not run_bench:
print(f'best_config: {bestConfig_compact_str}', end=" ", flush=True)
# write best config to tuning_results.yaml
if run_bench:
print(f"{formatted_tflops} {minTime}")
f_results.write(f"{formatted_tflops},{minTime}\n")
sizeDict = {'M': M, 'N': N, 'K': K, 'rowMajorA': row_a_str, 'rowMajorB': row_b_str}
sizeDict.update(bestConfig)
if not run_bench:
f_results.write("- " + str(sizeDict) + " ")
f_results.write(f'# TFLOPS: {formatted_tflops} time(us): {minTime}\n')
# remove generated files if asked to
if not keepTmp:
for i in range(jobs):
generated_script = generated_kernel_name(M, N, K, i)
os.remove(generated_script)
if not skipWarmup:
os.remove(generated_script + ".failed_configs")
for f in glob.glob(f"results-{i}.*"):
os.remove(f)
# Check correctness if asked to
if args.compare:
print("correctness: ", end=" ", flush=True)
test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, bestConfig, False)
elif not run_bench:
print("", flush=True)
end_local_time = datetime.now()
if not run_bench:
print(f">>> Elapsed time: {end_local_time - start_local_time} = {compile_time} (compile) + {profile_time} (profile) + {post_time} (post processing)", flush=True)
f_results.close()
end_time = datetime.now()
tuning_time = end_time - start_time
if not run_bench:
print(f"Tuning ends at: {end_time}")
print(f"Total tuning time (h:m:s): {tuning_time}")
if __name__ == '__main__':
main()
|
@triton.jit
def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
input = tl.load(input_ptr + offsets, mask=mask)
output = input
tl.store(output_ptr + offsets, output, mask=mask)
def init_by_size_and_type(size, dtype, init_type):
if init_type == 'hpl':
return torch.empty(size, device='cuda', dtype=dtype).uniform_(-0.5, 0.5)
# This init type has element[i] in row[j] equal to sin(i+j*N)
elif init_type == 'trig_float':
M, N = size
return torch.reshape(torch.arange(0, M*N), (M, N)).sin().to(dtype=dtype, device='cuda')
elif init_type == 'zeros':
return torch.zeros(size, dtype=dtype, device='cuda')
elif init_type == "randn":
temp = torch.randn(size, dtype=dtype, device='cuda')
return temp
else:
raise ValueError("Bad matrix initialization type.")
raw_data = init_by_size_and_type((N,M) if needTrans else (M,N), torch.float32, init_type)
if needTrans:
raw_data = raw_data.T
if (d_type == tl.float8e4b8 and TORCH_HAS_FP8E4B8) or \
(d_type == tl.float8e5b16 and TORCH_HAS_FP8E5B16) or not d_type.is_fp8():
input = raw_data.to(tl_to_torch_types[d_type])
input_f16 = input.to(torch.float16)
else:
f8_tensor = raw_data.to(torch.int8)
# keep only two bits of exponent to avoid overflow
f8_tensor = f8_tensor & 0b00111111
input = triton.reinterpret(f8_tensor, d_type)
input_f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
n_elements = raw_data.numel()
copy_kernel[grid](input, input_f16, n_elements, BLOCK_SIZE=1024)
return input, input_f16
def matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
#assert a.is_contiguous(), "Matrix A must be contiguous"
#assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# 1D launch kernel where each block gets its own program.
grid = triton.cdiv(M, block_m) * triton.cdiv(N, block_n), split_k
matmul_kernel[grid](
a, b, c,
M, N, K,
a.stride(0), a.stride(1),
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
BLOCK_SIZE_M=block_m,
BLOCK_SIZE_N=block_n,
BLOCK_SIZE_K=block_k,
GROUP_SIZE_M=group_m,
SPLIT_K=split_k,
num_warps=num_warps,
num_stages=num_stages,
waves_per_eu=waves_per_eu,
matrix_instr_nonkdim = mfmaInstrSize,
kpack = kpack
)
return c
def test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, config, verbose):
block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
torch.manual_seed(0)
#a = torch.randn((M, K), device='cuda', dtype=datatype)
#b = torch.randn((K, N), device='cuda', dtype=datatype)
a, a_fp16 = gen_input(M, K, dtype_a, col_a, 1, init_type, device='cuda')
b, b_fp16 = gen_input(K, N, dtype_b, col_b, 2, init_type, device='cuda')
# Allocates output.
c = torch.zeros((M, N), device=a.device, dtype=tl_to_torch_types[name_to_tl_types[dtype_c]])
triton_output = matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack)
torch_output = torch.matmul(a_fp16, b_fp16)
# print(f"triton_output={triton_output}")
# print(f"torch_output={torch_output}")
rtol = 0 if torch.version.hip is None else 1e-2
atol = 1e-3 if split_k == 1 else 4e-2
row_a_str = 'N' if col_a else 'T'
row_b_str = 'N' if col_b else 'T'
size_str = ''
if verbose:
size_str = f'SIZE M: {M}, N: {N}, K: {K}, trans: {row_a_str}{row_b_str}'
if torch.allclose(triton_output.to(torch.float16), torch_output, atol=atol, rtol=rtol):
print(f'{size_str} Correct✅')
else:
print(f'{size_str} Incorrect❌')
def get_default_tuning_result_filename():
git_branch_name = run_bash_command("git rev-parse --abbrev-ref HEAD")
git_branch_name = git_branch_name[0].decode()
git_commit_hash = run_bash_command("git rev-parse --short HEAD")
git_commit_hash = git_commit_hash[0].decode()
dt_string = datetime.now().strftime("%m-%d-%Y-%H:%M:%S")
defaultName = f"tuning_results_{git_branch_name}@{git_commit_hash}_{dt_string}.yaml"
return defaultName
def parse_args():
parser = argparse.ArgumentParser(
prog="tune a specific gemm size",
allow_abbrev=False,
)
parser.add_argument("-m", type=int, default=0)
parser.add_argument("-n", type=int, default=0)
parser.add_argument("-k", type=int, default=0)
parser.add_argument("-col_a", action='store_true', default=False, help='whether matrix a is column major')
parser.add_argument("-col_b", action='store_true', default=False, help='whether matrix b is column major')
parser.add_argument("-dtype_a", type=str, default='fp16', help="matrix a element data type")
parser.add_argument("-dtype_b", type=str, default='fp16', help="matrix b element data type")
parser.add_argument("-dtype_c", type=str, default='fp16', help="output element data type")
parser.add_argument("--ngpus", type=int, default=0, help='number of GPUs used in the profiling step')
parser.add_argument("--gpu_ids", type=lambda s: [int(id) for id in s.split(',')], default=[], help='list of gpu ids to use for tuning')
parser.add_argument("--gemm_size_file", type=str, default="", help='yaml file to indicate matrix size')
parser.add_argument("--o", type=str, default='', help='yaml file to store tuning results')
parser.add_argument("--keep", action='store_true', default=False, help='keep generated files')
parser.add_argument("--compare", action='store_true', default=False, help="Whether check result correctness")
parser.add_argument("--compare_wo_tuning", action='store_true', default=False, help="Whether check result correctness")
parser.add_argument("--benchmark", action='store_true', default=False, help="Benchmark the given config")
parser.add_argument("--time_breakdown", action='store_true', default=False, help="Show detailed time breakdown of each step during the tuning")
parser.add_argument("--verbose", action='store_true', default=False, help="enables time_breakdown and additional logging messages")
parser.add_argument("--num_threads", type=int, default=16, help="number of threads to use for kernel compilation and post processing")
parser.add_argument("--jobs", type=int, default=1, help="number of generated files")
parser.add_argument("--iters", type=int, default=1000, help="number of generated files")
parser.add_argument("--init_type", type=str, default='randn', help="Initialization type for input matrices (default uniform rand [0, 1.0)])")
parser.add_argument("--no_warmup", action='store_true', default=False, help="Do not call the warmup kernel")
args = parser.parse_args()
if not args.o:
if args.benchmark:
args.o = "benchmarking_results.csv"
else:
args.o = get_default_tuning_result_filename()
return args
TORCH_HAS_FP8E5B16 = hasattr(torch, 'float8_e5m2fnuz')
TORCH_HAS_FP8E4B8 = hasattr(torch, 'float8_e4m3fnuz')
tl_to_torch_types = {
tl.float16: torch.float16,
tl.bfloat16: torch.bfloat16,
tl.float32: torch.float32,
tl.int8: torch.int8,
tl.int32: torch.int32,
}
if TORCH_HAS_FP8E5B16:
tl_to_torch_types[tl.float8e5b16] = torch.float8_e5m2fnuz
if TORCH_HAS_FP8E4B8:
tl_to_torch_types[tl.float8e4b8] = torch.float8_e4m3fnuz
name_to_tl_types = {
'int8': tl.int8,
'int32': tl.int32,
'fp16': tl.float16,
'fp32': tl.float32,
'bf16': tl.bfloat16,
'fp8': tl.float8e4b8,
'bf8': tl.float8e5b16,
}
def process_item(item):
M = item['M']
N = item['N']
K = item['K']
col_a = False if item['rowMajorA'] == 'T' else True
col_b = False if item['rowMajorB'] == 'T' else True
del item['M']
del item['N']
del item['K']
del item['rowMajorA']
del item['rowMajorB']
return M, N, K, col_a, col_b, item
def type_name_to_bytes(ty_name):
if '32' in ty_name:
return 4
if '16' in ty_name:
return 2
if '8' in ty_name:
return 1
else:
print(f"Unrecognized input type name {ty_name}")
sys.exit(1)
def format_output(unformatted):
if unformatted < 0.0001:
formatted = "{:.3e}".format(unformatted)
elif unformatted > 1000:
formatted = "{:.1f}".format(unformatted)
else:
formatted = "{:.2f}".format(unformatted)
return formatted
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group("nccl", rank=rank, world_size=world_size)
print(f"Rank {rank}/{world_size} process initialized.")
def cleanup():
dist.destroy_process_group()
print("Process group destroyed.")
def main():
args = parse_args()
world_size = len(args.gpu_ids) if args.gpu_ids else torch.cuda.device_count()
print(f"Number of GPUs available: {world_size}")
matrix_size_file = args.gemm_size_file
output_file = args.o
keepTmp = args.keep
run_bench = args.benchmark
jobs = args.jobs
iters = args.iters
skipWarmup = args.no_warmup
# Get GPU ids
ngpus = args.ngpus
gpu_ids = args.gpu_ids
if ngpus != 0 and gpu_ids:
print("--ngpus and --gpu_ids are mutually exclusive options")
return os.EX_USAGE
if ngpus == 0 and not gpu_ids:
ngpus = 1
if ngpus != 0:
gpus = list(range(ngpus))
if gpu_ids:
gpus = gpu_ids
if run_bench:
gpus = [gpus[0]]
jobs = 1
# Get element type
dtype_a = args.dtype_a
dtype_b = args.dtype_b
dtype_c = args.dtype_c
if not dtype_a in name_to_tl_types or not dtype_b in name_to_tl_types or not dtype_c in name_to_tl_types:
print(f"Unsupported dtype_a {args.dtype_a} or dtype_b {args.dtype_b} or dtype_c {args.dtype_c}")
print("Supported types: ", list(name_to_tl_types.keys()))
sys.exit(1)
mnks = []
# TODO: make it more robust to get user input
init_type = args.init_type
if matrix_size_file == "" or not os.path.isfile(matrix_size_file):
M = args.m
N = args.n
K = args.k
col_a = args.col_a
col_b = args.col_b
mnks = [(M, N, K, col_a, col_b, None)]
else:
with open(matrix_size_file) as file:
matrix_sizes = yaml.safe_load(file)
for item in matrix_sizes:
M, N, K, col_a, col_b, item = process_item(item)
mnks.append((M, N, K, col_a, col_b, item))
# Check correctness from given configs
if args.compare_wo_tuning:
for (M, N, K, col_a, col_b, myConfig) in mnks:
test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, myConfig, True)
return
configs_full = get_full_tuning_space()
start_time = datetime.now()
# Append to the output file so that we can save all results into one file
f_results = open(output_file, 'a')
if run_bench:
print(f"Benchmarking gemm with {dtype_a} inputs")
print("trans M N K TFLOPS us")
f_results.write("trans,M,N,K,TFLOPS,us\n")
else:
print(f"Tuning {len(mnks)} gemm sizes starts at: {start_time}", flush=True)
for (M, N, K, col_a, col_b, myConfig) in mnks:
start_local_time = datetime.now()
# Obtain a pruned tuning space according to gemm size
# If running benchmark, use the provided config
pruned_configs = [myConfig] if run_bench else prune_configs(M, N, K, configs_full, type_name_to_bytes(dtype_a), type_name_to_bytes(dtype_b))
row_a_str = 'N' if col_a else 'T'
row_b_str = 'N' if col_b else 'T'
size_str = f'SIZE: {M} {N} {K} {row_a_str}{row_b_str}'
if not run_bench:
print(f"{size_str} nConfigs: {len(pruned_configs)}", end=" ", flush=True)
else:
print(f"{row_a_str}{row_b_str} {M:5d} {N:5d} {K:5d} ", end="")
f_results.write(f"{row_a_str}{row_b_str},{M},{N},{K},")
# The main tuning function for one gemm size
verbose_level = 0
if args.time_breakdown:
verbose_level = 1
if args.verbose:
verbose_level = 2
def tune_gemm(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads):
return tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads)
minTime, bestConfig, compile_time, profile_time, post_time = spawn(
tune_gemm,
args=(world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose_level, num_threads),
nprocs=world_size,
join=True
)
# post processing the numbers
perf_tflops = lambda us: 2 * M * N * K * 1e-12 / (us * 1e-6)
tri_tflops = perf_tflops(minTime)
formatted_tflops = format_output(tri_tflops)
minTime = format_output(minTime)
if not run_bench:
print(f'TFLOPS: {formatted_tflops} time(us): {minTime}', end=" ", flush=True)
bestConfig_compact_str, _ = gen_kernel_and_configStr_from_config(M, N, K, bestConfig, None, None, None)
if not run_bench:
print(f'best_config: {bestConfig_compact_str}', end=" ", flush=True)
# write best config to tuning_results.yaml
if run_bench:
print(f"{formatted_tflops} {minTime}")
f_results.write(f"{formatted_tflops},{minTime}\n")
sizeDict = {'M': M, 'N': N, 'K': K, 'rowMajorA': row_a_str, 'rowMajorB': row_b_str}
sizeDict.update(bestConfig)
if not run_bench:
f_results.write("- " + str(sizeDict) + " ")
f_results.write(f'# TFLOPS: {formatted_tflops} time(us): {minTime}\n')
# remove generated files if asked to
if not keepTmp:
for i in range(jobs):
generated_script = generated_kernel_name(M, N, K, i)
os.remove(generated_script)
if not skipWarmup:
os.remove(generated_script + ".failed_configs")
for f in glob.glob(f"results-{i}.*"):
os.remove(f)
# Check correctness if asked to
if args.compare:
print("correctness: ", end=" ", flush=True)
test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, bestConfig, False)
elif not run_bench:
print("", flush=True)
end_local_time = datetime.now()
if not run_bench:
print(f">>> Elapsed time: {end_local_time - start_local_time} = {compile_time} (compile) + {profile_time} (profile) + {post_time} (post processing)", flush=True)
f_results.close()
end_time = datetime.now()
tuning_time = end_time - start_time
if not run_bench:
print(f"Tuning ends at: {end_time}")
print(f"Total tuning time (h:m:s): {tuning_time}")
if __name__ == '__main__':
main()
|
phlippe/liger_kernels
|
liger_kernels/utils.py
|
https://github.com/phlippe/liger_kernels/blob/0abb152b752e66e1c3e0c78a7eb56daea9a07f42/liger_kernels/utils.py
|
import jax
import numpy as np
import triton
import triton.language as tl
@triton.jit
def element_mul_kernel(
_, # alias for X_ptr
grad_output_ptr,
X_ptr,
X_stride,
n_cols,
BLOCK_SIZE: tl.constexpr,
):
"""
This function multiplies each element of the tensor pointed by X_ptr with the value pointed by grad_output_ptr.
The multiplication is performed in-place on the tensor pointed by X_ptr.
Parameters:
X_ptr: Pointer to the input tensor.
X_stride (int): The stride of the input tensor.
grad_output_ptr: Pointer to the gradient output value.
n_cols (int): The number of columns in the input tensor.
BLOCK_SIZE (int): The block size for Triton operations.
"""
# Get the program ID and convert it to int64 to avoid overflow
program_id = tl.program_id(0).to(tl.int64)
# Locate the start index
X_ptr += program_id * X_stride
# Load the gradient output value
grad_output = tl.load(grad_output_ptr)
# Perform the element-wise multiplication
for i in range(0, n_cols, BLOCK_SIZE):
X_offsets = i + tl.arange(0, BLOCK_SIZE)
X_block = tl.load(X_ptr + X_offsets, mask=X_offsets < n_cols)
tl.store(X_ptr + X_offsets, X_block * grad_output, mask=X_offsets < n_cols)
def get_stride(array: jax.Array | jax.ShapeDtypeStruct, axis: int) -> int:
"""
Returns the stride of a JAX array at a given axis.
To calculate all strides, use get_strides.
Args:
array: JAX array or shape-dtype struct.
axis: The axis at which to calculate the stride.
Returns:
The stride of the array at the given axis.
"""
if axis < 0:
axis += len(array.shape)
shape = array.shape
size = array.size
stride = size // np.prod(shape[: axis + 1])
return int(stride)
|
@triton.jit
def element_mul_kernel(
_, # alias for X_ptr
grad_output_ptr,
X_ptr,
X_stride,
n_cols,
BLOCK_SIZE: tl.constexpr,
):
"""
This function multiplies each element of the tensor pointed by X_ptr with the value pointed by grad_output_ptr.
The multiplication is performed in-place on the tensor pointed by X_ptr.
Parameters:
X_ptr: Pointer to the input tensor.
X_stride (int): The stride of the input tensor.
grad_output_ptr: Pointer to the gradient output value.
n_cols (int): The number of columns in the input tensor.
BLOCK_SIZE (int): The block size for Triton operations.
"""
# Get the program ID and convert it to int64 to avoid overflow
program_id = tl.program_id(0).to(tl.int64)
# Locate the start index
X_ptr += program_id * X_stride
# Load the gradient output value
grad_output = tl.load(grad_output_ptr)
# Perform the element-wise multiplication
for i in range(0, n_cols, BLOCK_SIZE):
X_offsets = i + tl.arange(0, BLOCK_SIZE)
X_block = tl.load(X_ptr + X_offsets, mask=X_offsets < n_cols)
tl.store(X_ptr + X_offsets, X_block * grad_output, mask=X_offsets < n_cols)
def get_stride(array: jax.Array | jax.ShapeDtypeStruct, axis: int) -> int:
"""
Returns the stride of a JAX array at a given axis.
To calculate all strides, use get_strides.
Args:
array: JAX array or shape-dtype struct.
axis: The axis at which to calculate the stride.
Returns:
The stride of the array at the given axis.
"""
if axis < 0:
axis += len(array.shape)
shape = array.shape
size = array.size
stride = size // np.prod(shape[: axis + 1])
return int(stride)
|
yifuwang/symm-mem-recipes
|
triton_utils.py
|
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
|
import triton
import triton.language as tl
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
|
yifuwang/symm-mem-recipes
|
triton_utils.py
|
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
|
import triton
import triton.language as tl
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
|
yifuwang/symm-mem-recipes
|
triton_utils.py
|
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
|
import triton
import triton.language as tl
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
|
yifuwang/symm-mem-recipes
|
triton_utils.py
|
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
|
import triton
import triton.language as tl
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
|
yifuwang/symm-mem-recipes
|
triton_utils.py
|
https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py
|
import triton
import triton.language as tl
@triton.jit
def get_tid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %tid.x;
mov.u32 $1, %tid.y;
mov.u32 $2, %tid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_ntid():
return tl.inline_asm_elementwise(
"""
mov.u32 $0, %ntid.x;
mov.u32 $1, %ntid.y;
mov.u32 $2, %ntid.z;
""",
"=r,=r,=r",
[],
dtype=(tl.uint32, tl.uint32, tl.uint32),
is_pure=True,
pack=1,
)
@triton.jit
def get_flat_tid():
tid_x, tid_y, tid_z = get_tid()
ntid_x, ntid_y, _ = get_ntid()
return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
return (
tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
+ tl.program_id(1) * tl.num_programs(0)
+ tl.program_id(0)
)
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
@triton.jit
def sync_threads():
tl.inline_asm_elementwise(
"bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
)
|
Terapines/AI-Benchmark
|
src/triton/resize.py
|
https://github.com/Terapines/AI-Benchmark/blob/0ae8cd849a833d4c35a4b25b722ce98c5af2fe34/src/triton/resize.py
|
import torch
import triton
import triton.language as tl
import os
USE_GPU = False
triton.runtime.driver.set_active_to_cpu()
def get_resize_kernel_autotune_config():
configs = [
triton.Config({'BLOCK_SIZE_W': 1}),
triton.Config({'BLOCK_SIZE_W': 2}),
triton.Config({'BLOCK_SIZE_W': 4}),
triton.Config({'BLOCK_SIZE_W': 8}),
triton.Config({'BLOCK_SIZE_W': 16}),
triton.Config({'BLOCK_SIZE_W': 32}),
triton.Config({'BLOCK_SIZE_W': 64}),
triton.Config({'BLOCK_SIZE_W': 128}),
]
if(os.getenv("ENABLE_AUTOTUNING") == "resize_kernel"):
assert (len(configs) > 1), "Autotuning config size need be larger than 1"
return configs
return [triton.Config({'BLOCK_SIZE_W': 32})]
@triton.autotune(
configs=get_resize_kernel_autotune_config(),
key=[],
)
@triton.jit
def resize_kernel(
src_ptr,
out_ptr,
channel,
height,
width,
BLOCK_SIZE_W: tl.constexpr,
):
pid_h = tl.program_id(axis=0)
pid_c = tl.program_id(axis=1)
dst_height = 2 * height # 2x upsample
dst_width = 2 * width
hw_fl = 7
h_idx = pid_h
input_y = h_idx << (hw_fl - 1)
y0 = input_y >> hw_fl
h1_lambda = input_y - (y0 << hw_fl)
factor = 1 << hw_fl
h0_lambda = factor - h1_lambda
y1 = tl.minimum(y0 + 1, height - 1)
src_offset = pid_c * height * width
src_ptrs0 = src_ptr + src_offset + y0 * width
src_ptrs1 = src_ptr + src_offset + y1 * width
out_ptrs = out_ptr + (pid_c * dst_height * dst_width + h_idx * dst_width)
for off in range(0, width * 2, BLOCK_SIZE_W):
w_idx = off + tl.arange(0, BLOCK_SIZE_W) # [1, BLOCK_SIZE_W]
mask = (w_idx < dst_width)
input_x = w_idx << (hw_fl - 1)
x0 = input_x >> hw_fl
y0x0 = tl.load(src_ptrs0 + x0, mask=mask, other=0).to(tl.int16)
y1x0 = tl.load(src_ptrs1 + x0, mask=mask, other=0).to(tl.int16)
x1 = tl.minimum(x0 + 1, width - 1)
y0x1 = tl.load(src_ptrs0 + x1, mask=mask, other=0).to(tl.int16)
y1x1 = tl.load(src_ptrs1 + x1, mask=mask, other=0).to(tl.int16)
w1_lambda = input_x - (x0 << hw_fl)
w0_lambda = factor - w1_lambda
sum1 = (y0x0 * w0_lambda + y0x1 * w1_lambda) >> hw_fl
sum2 = (y1x0 * w0_lambda + y1x1 * w1_lambda) >> hw_fl
sum = (sum1 * h0_lambda + sum2 * h1_lambda) >> hw_fl
sum = sum.to(tl.int8)
tl.store(out_ptrs + w_idx, sum, mask=mask)
def resize(src_arr, out_arr):
src_arr = src_arr.contiguous()
out_arr = out_arr.contiguous()
# Get dimensions
channel, height, width = src_arr.shape
# BLOCK_H = 32
# BLOCK_W = 32
# Compute grid dimensions
grid = lambda meta: (height * 2, channel, 1)
# Launch the Triton kernel
resize_kernel[grid](
src_arr, out_arr, channel, height, width
)
C, H, W = 3, 512, 512
src = torch.ones((C, H, W), dtype=torch.int8, device='cpu')
out = torch.empty((C, 2 * H, 2 * W), dtype=torch.int8, device='cpu')
resize(src, out)
# print(src)
# print(out)
|
@triton.jit
def resize_kernel(
src_ptr,
out_ptr,
channel,
height,
width,
BLOCK_SIZE_W: tl.constexpr,
):
pid_h = tl.program_id(axis=0)
pid_c = tl.program_id(axis=1)
dst_height = 2 * height # 2x upsample
dst_width = 2 * width
hw_fl = 7
h_idx = pid_h
input_y = h_idx << (hw_fl - 1)
y0 = input_y >> hw_fl
h1_lambda = input_y - (y0 << hw_fl)
factor = 1 << hw_fl
h0_lambda = factor - h1_lambda
y1 = tl.minimum(y0 + 1, height - 1)
src_offset = pid_c * height * width
src_ptrs0 = src_ptr + src_offset + y0 * width
src_ptrs1 = src_ptr + src_offset + y1 * width
out_ptrs = out_ptr + (pid_c * dst_height * dst_width + h_idx * dst_width)
for off in range(0, width * 2, BLOCK_SIZE_W):
w_idx = off + tl.arange(0, BLOCK_SIZE_W) # [1, BLOCK_SIZE_W]
mask = (w_idx < dst_width)
input_x = w_idx << (hw_fl - 1)
x0 = input_x >> hw_fl
y0x0 = tl.load(src_ptrs0 + x0, mask=mask, other=0).to(tl.int16)
y1x0 = tl.load(src_ptrs1 + x0, mask=mask, other=0).to(tl.int16)
x1 = tl.minimum(x0 + 1, width - 1)
y0x1 = tl.load(src_ptrs0 + x1, mask=mask, other=0).to(tl.int16)
y1x1 = tl.load(src_ptrs1 + x1, mask=mask, other=0).to(tl.int16)
w1_lambda = input_x - (x0 << hw_fl)
w0_lambda = factor - w1_lambda
sum1 = (y0x0 * w0_lambda + y0x1 * w1_lambda) >> hw_fl
sum2 = (y1x0 * w0_lambda + y1x1 * w1_lambda) >> hw_fl
sum = (sum1 * h0_lambda + sum2 * h1_lambda) >> hw_fl
sum = sum.to(tl.int8)
tl.store(out_ptrs + w_idx, sum, mask=mask)
def resize(src_arr, out_arr):
src_arr = src_arr.contiguous()
out_arr = out_arr.contiguous()
# Get dimensions
channel, height, width = src_arr.shape
# BLOCK_H = 32
# BLOCK_W = 32
# Compute grid dimensions
grid = lambda meta: (height * 2, channel, 1)
# Launch the Triton kernel
resize_kernel[grid](
src_arr, out_arr, channel, height, width
)
C, H, W = 3, 512, 512
src = torch.ones((C, H, W), dtype=torch.int8, device='cpu')
out = torch.empty((C, 2 * H, 2 * W), dtype=torch.int8, device='cpu')
resize(src, out)
# print(src)
# print(out)
|
khulnasoft/divest
|
divest/kernels/swiglu.py
|
https://github.com/khulnasoft/divest/blob/53b878ed6cf9f8e172a496bf26a2b22ff3a30a51/divest/kernels/swiglu.py
|
import triton
import triton.language as tl
import torch
from .utils import calculate_settings
@triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,):
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# f = e * sigmoid(e)
f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
f_row = f_row.to(g_row.dtype) # Exact copy from HF
# h = f * g
h_row = f_row * g_row
# Store h
tl.store(h + offsets, h_row, mask = mask)
pass
def swiglu_fg_kernel(e, g):
batch, seq_len, hd = e.shape
n_elements = e.numel()
h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda")
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,)
return h
pass
@triton.jit
def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,):
"""
e = e.float()
se = 1.0 / (1.0 + torch.exp(-e))
f = (se * e).to(dtype)
h = f * g
df = DW * f
dg = DW * g
de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
"""
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32)
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# e = e.float()
# se = 1.0 / (1.0 + torch.exp(-e))
se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row))
# f = (se * e).to(dtype)
f_row = se_row * e_row
f_row = f_row.to(DW_row.dtype)
# h = f * g
h_row = f_row * g_row
# df = DW * f
df_row = DW_row * f_row
# dg = DW * g
dg_row = DW_row * g_row
# de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row))
de_row = de_row.to(DW_row.dtype)
# Store derivatives in buffers
tl.store(DW + offsets, h_row, mask = mask) # h = f * g
tl.store(e + offsets, df_row, mask = mask) # df = DW * f
tl.store(g + offsets, de_row, mask = mask) # de
pass
def swiglu_DWf_DW_dfg_kernel(DW, e, g):
batch_seq_len, hd = e.shape
n_elements = e.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,)
return DW, e, g
pass
|
@triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,):
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# f = e * sigmoid(e)
f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
f_row = f_row.to(g_row.dtype) # Exact copy from HF
# h = f * g
h_row = f_row * g_row
# Store h
tl.store(h + offsets, h_row, mask = mask)
pass
def swiglu_fg_kernel(e, g):
batch, seq_len, hd = e.shape
n_elements = e.numel()
h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda")
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,)
return h
pass
|
khulnasoft/divest
|
divest/kernels/swiglu.py
|
https://github.com/khulnasoft/divest/blob/53b878ed6cf9f8e172a496bf26a2b22ff3a30a51/divest/kernels/swiglu.py
|
import triton
import triton.language as tl
import torch
from .utils import calculate_settings
@triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,):
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# f = e * sigmoid(e)
f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
f_row = f_row.to(g_row.dtype) # Exact copy from HF
# h = f * g
h_row = f_row * g_row
# Store h
tl.store(h + offsets, h_row, mask = mask)
pass
def swiglu_fg_kernel(e, g):
batch, seq_len, hd = e.shape
n_elements = e.numel()
h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda")
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,)
return h
pass
@triton.jit
def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,):
"""
e = e.float()
se = 1.0 / (1.0 + torch.exp(-e))
f = (se * e).to(dtype)
h = f * g
df = DW * f
dg = DW * g
de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
"""
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32)
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# e = e.float()
# se = 1.0 / (1.0 + torch.exp(-e))
se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row))
# f = (se * e).to(dtype)
f_row = se_row * e_row
f_row = f_row.to(DW_row.dtype)
# h = f * g
h_row = f_row * g_row
# df = DW * f
df_row = DW_row * f_row
# dg = DW * g
dg_row = DW_row * g_row
# de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row))
de_row = de_row.to(DW_row.dtype)
# Store derivatives in buffers
tl.store(DW + offsets, h_row, mask = mask) # h = f * g
tl.store(e + offsets, df_row, mask = mask) # df = DW * f
tl.store(g + offsets, de_row, mask = mask) # de
pass
def swiglu_DWf_DW_dfg_kernel(DW, e, g):
batch_seq_len, hd = e.shape
n_elements = e.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,)
return DW, e, g
pass
|
@triton.jit
def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,):
"""
e = e.float()
se = 1.0 / (1.0 + torch.exp(-e))
f = (se * e).to(dtype)
h = f * g
df = DW * f
dg = DW * g
de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
"""
block_idx = tl.program_id(0)
offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32)
e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
# e = e.float()
# se = 1.0 / (1.0 + torch.exp(-e))
se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row))
# f = (se * e).to(dtype)
f_row = se_row * e_row
f_row = f_row.to(DW_row.dtype)
# h = f * g
h_row = f_row * g_row
# df = DW * f
df_row = DW_row * f_row
# dg = DW * g
dg_row = DW_row * g_row
# de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row))
de_row = de_row.to(DW_row.dtype)
# Store derivatives in buffers
tl.store(DW + offsets, h_row, mask = mask) # h = f * g
tl.store(e + offsets, df_row, mask = mask) # df = DW * f
tl.store(g + offsets, de_row, mask = mask) # de
pass
def swiglu_DWf_DW_dfg_kernel(DW, e, g):
batch_seq_len, hd = e.shape
n_elements = e.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
_DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,)
return DW, e, g
pass
|
gameofdimension/hfans
|
triton/ext_autograd.py
|
https://github.com/gameofdimension/hfans/blob/7e76ba5f3ae59d2ebaea0d8e67a40a6057f4af57/triton/ext_autograd.py
|
import torch
import triton
import triton.language as tl
try:
# This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it
# should not be added to extras_require in setup.py.
import apex
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
@triton.jit
def _layer_norm_fwd_fused(
X, # pointer to the input
Y, # pointer to the output
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
Y += row * stride
X += row * stride
# Compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# Compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
x = tl.where(cols < N, x - mean, 0.)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# Write mean / rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
w = tl.load(W + cols, mask=mask)
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32)
x_hat = (x - mean) * rstd
y = x_hat * w + b
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, # pointer to the input gradient
DY, # pointer to the output gradient
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
X, # pointer to the input
W, # pointer to the weights
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
Lock, # pointer to the lock
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of X, DX, and DY it should compute.
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
# Offset locks and weights/biases gradient pointer for parallel reduction
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
# Load data to SRAM
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# Compute dx
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
# Write dx
tl.store(DX + cols, dx, mask=mask)
# Accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = (dy).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# First store doesn't accumulate
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
# Release the lock
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_bwd_dwdb(
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
FINAL_DW, # pointer to the weights gradient
FINAL_DB, # pointer to the biases gradient
M, # GROUP_SIZE_M
N, # number of columns
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of DW and DB it should compute.
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# Iterate through the rows of DW and DB to sum the partial sums.
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
db += tl.load(DB + offs, mask=mask, other=0.)
# Write the final sum to the output.
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device=x.device)
rstd = torch.empty((M, ), dtype=torch.float32, device=x.device)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
# enqueue kernel
_layer_norm_fwd_fused[(M, )]( #
x_arg, y, weight, bias, mean, rstd, #
x_arg.stride(0), N, eps, #
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, num_ctas=1) # type: ignore # noqa
ctx.save_for_backward(x, weight, bias, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
return y
@staticmethod
def backward(ctx, dy):
x, w, b, m, v = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DW/DB
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192:
GROUP_SIZE_M = 96
if N <= 4096:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
# allocate output
locks = torch.zeros(
2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device)
_dw = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
_db = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
dw = torch.empty((N, ), dtype=w.dtype, device=w.device)
db = torch.empty((N, ), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M, )]( #
dx, dy, _dw, _db, x, w, m, v, locks, #
x_arg.stride(0), N, #
BLOCK_SIZE_N=ctx.BLOCK_SIZE, #
GROUP_SIZE_M=GROUP_SIZE_M, #
num_warps=ctx.num_warps) # type: ignore
def grid(meta): return [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
# accumulate partial sums in separate kernel
_layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db, min(GROUP_SIZE_M, M), N, #
BLOCK_SIZE_M=32, #
BLOCK_SIZE_N=128, num_ctas=1) # type: ignore
return dx, None, dw, db, None
layer_norm = LayerNorm.apply
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri: torch.Tensor = layer_norm(x, w_shape, weight, bias, eps) # type: ignore # noqa
y_ref = torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
# compare
assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0)
assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0)
assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0)
assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm-backward',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'},
))
def bench_layer_norm(
M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
quantiles = [0.5, 0.2, 0.8]
def y_fwd() -> torch.Tensor: # type: ignore
if provider == "triton":
return layer_norm(x, w_shape, weight, bias, eps) # type: ignore
if provider == "torch":
return torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps) # noqa: F811, E704
if provider == "apex":
apex_layer_norm = (apex.normalization.FusedLayerNorm(
w_shape).to(x.device).to(x.dtype))
return apex_layer_norm(x) # noqa: F811, E704
# forward pass
if mode == 'forward':
def gbps(ms): return 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
y_fwd, quantiles=quantiles, rep=500)
# backward pass
if mode == 'backward':
y = y_fwd()
def gbps(ms): # noqa: F811, E704
return 3 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: y.backward(dy, retain_graph=True), quantiles=quantiles,
grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
if __name__ == "__main__":
test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='./benchmark_result', print_data=True)
|
@triton.jit
def _layer_norm_fwd_fused(
X, # pointer to the input
Y, # pointer to the output
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
Y += row * stride
X += row * stride
# Compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# Compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
x = tl.where(cols < N, x - mean, 0.)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# Write mean / rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
w = tl.load(W + cols, mask=mask)
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32)
x_hat = (x - mean) * rstd
y = x_hat * w + b
# Write output
tl.store(Y + cols, y, mask=mask)
|
gameofdimension/hfans
|
triton/ext_autograd.py
|
https://github.com/gameofdimension/hfans/blob/7e76ba5f3ae59d2ebaea0d8e67a40a6057f4af57/triton/ext_autograd.py
|
import torch
import triton
import triton.language as tl
try:
# This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it
# should not be added to extras_require in setup.py.
import apex
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
@triton.jit
def _layer_norm_fwd_fused(
X, # pointer to the input
Y, # pointer to the output
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
Y += row * stride
X += row * stride
# Compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# Compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
x = tl.where(cols < N, x - mean, 0.)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# Write mean / rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
w = tl.load(W + cols, mask=mask)
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32)
x_hat = (x - mean) * rstd
y = x_hat * w + b
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, # pointer to the input gradient
DY, # pointer to the output gradient
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
X, # pointer to the input
W, # pointer to the weights
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
Lock, # pointer to the lock
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of X, DX, and DY it should compute.
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
# Offset locks and weights/biases gradient pointer for parallel reduction
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
# Load data to SRAM
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# Compute dx
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
# Write dx
tl.store(DX + cols, dx, mask=mask)
# Accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = (dy).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# First store doesn't accumulate
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
# Release the lock
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_bwd_dwdb(
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
FINAL_DW, # pointer to the weights gradient
FINAL_DB, # pointer to the biases gradient
M, # GROUP_SIZE_M
N, # number of columns
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of DW and DB it should compute.
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# Iterate through the rows of DW and DB to sum the partial sums.
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
db += tl.load(DB + offs, mask=mask, other=0.)
# Write the final sum to the output.
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device=x.device)
rstd = torch.empty((M, ), dtype=torch.float32, device=x.device)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
# enqueue kernel
_layer_norm_fwd_fused[(M, )]( #
x_arg, y, weight, bias, mean, rstd, #
x_arg.stride(0), N, eps, #
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, num_ctas=1) # type: ignore # noqa
ctx.save_for_backward(x, weight, bias, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
return y
@staticmethod
def backward(ctx, dy):
x, w, b, m, v = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DW/DB
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192:
GROUP_SIZE_M = 96
if N <= 4096:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
# allocate output
locks = torch.zeros(
2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device)
_dw = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
_db = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
dw = torch.empty((N, ), dtype=w.dtype, device=w.device)
db = torch.empty((N, ), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M, )]( #
dx, dy, _dw, _db, x, w, m, v, locks, #
x_arg.stride(0), N, #
BLOCK_SIZE_N=ctx.BLOCK_SIZE, #
GROUP_SIZE_M=GROUP_SIZE_M, #
num_warps=ctx.num_warps) # type: ignore
def grid(meta): return [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
# accumulate partial sums in separate kernel
_layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db, min(GROUP_SIZE_M, M), N, #
BLOCK_SIZE_M=32, #
BLOCK_SIZE_N=128, num_ctas=1) # type: ignore
return dx, None, dw, db, None
layer_norm = LayerNorm.apply
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri: torch.Tensor = layer_norm(x, w_shape, weight, bias, eps) # type: ignore # noqa
y_ref = torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
# compare
assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0)
assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0)
assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0)
assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm-backward',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'},
))
def bench_layer_norm(
M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
quantiles = [0.5, 0.2, 0.8]
def y_fwd() -> torch.Tensor: # type: ignore
if provider == "triton":
return layer_norm(x, w_shape, weight, bias, eps) # type: ignore
if provider == "torch":
return torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps) # noqa: F811, E704
if provider == "apex":
apex_layer_norm = (apex.normalization.FusedLayerNorm(
w_shape).to(x.device).to(x.dtype))
return apex_layer_norm(x) # noqa: F811, E704
# forward pass
if mode == 'forward':
def gbps(ms): return 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
y_fwd, quantiles=quantiles, rep=500)
# backward pass
if mode == 'backward':
y = y_fwd()
def gbps(ms): # noqa: F811, E704
return 3 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: y.backward(dy, retain_graph=True), quantiles=quantiles,
grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
if __name__ == "__main__":
test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='./benchmark_result', print_data=True)
|
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, # pointer to the input gradient
DY, # pointer to the output gradient
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
X, # pointer to the input
W, # pointer to the weights
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
Lock, # pointer to the lock
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of X, DX, and DY it should compute.
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
# Offset locks and weights/biases gradient pointer for parallel reduction
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
# Load data to SRAM
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# Compute dx
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
# Write dx
tl.store(DX + cols, dx, mask=mask)
# Accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = (dy).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# First store doesn't accumulate
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
# Release the lock
tl.atomic_xchg(Lock, 0)
|
gameofdimension/hfans
|
triton/ext_autograd.py
|
https://github.com/gameofdimension/hfans/blob/7e76ba5f3ae59d2ebaea0d8e67a40a6057f4af57/triton/ext_autograd.py
|
import torch
import triton
import triton.language as tl
try:
# This is https://github.com/NVIDIA/apex, NOT the apex on PyPi, so it
# should not be added to extras_require in setup.py.
import apex
HAS_APEX = True
except ModuleNotFoundError:
HAS_APEX = False
@triton.jit
def _layer_norm_fwd_fused(
X, # pointer to the input
Y, # pointer to the output
W, # pointer to the weights
B, # pointer to the biases
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_SIZE: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
Y += row * stride
X += row * stride
# Compute mean
mean = 0
_mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
a = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
_mean += a
mean = tl.sum(_mean, axis=0) / N
# Compute variance
_var = tl.zeros([BLOCK_SIZE], dtype=tl.float32)
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
x = tl.load(X + cols, mask=cols < N, other=0.).to(tl.float32)
x = tl.where(cols < N, x - mean, 0.)
_var += x * x
var = tl.sum(_var, axis=0) / N
rstd = 1 / tl.sqrt(var + eps)
# Write mean / rstd
tl.store(Mean + row, mean)
tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
for off in range(0, N, BLOCK_SIZE):
cols = off + tl.arange(0, BLOCK_SIZE)
mask = cols < N
w = tl.load(W + cols, mask=mask)
b = tl.load(B + cols, mask=mask)
x = tl.load(X + cols, mask=mask, other=0.).to(tl.float32)
x_hat = (x - mean) * rstd
y = x_hat * w + b
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.jit
def _layer_norm_bwd_dx_fused(
DX, # pointer to the input gradient
DY, # pointer to the output gradient
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
X, # pointer to the input
W, # pointer to the weights
Mean, # pointer to the mean
Rstd, # pointer to the 1/std
Lock, # pointer to the lock
stride, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of X, DX, and DY it should compute.
row = tl.program_id(0)
cols = tl.arange(0, BLOCK_SIZE_N)
mask = cols < N
X += row * stride
DY += row * stride
DX += row * stride
# Offset locks and weights/biases gradient pointer for parallel reduction
lock_id = row % GROUP_SIZE_M
Lock += lock_id
Count = Lock + GROUP_SIZE_M
DW = DW + lock_id * N + cols
DB = DB + lock_id * N + cols
# Load data to SRAM
x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
w = tl.load(W + cols, mask=mask).to(tl.float32)
mean = tl.load(Mean + row)
rstd = tl.load(Rstd + row)
# Compute dx
xhat = (x - mean) * rstd
wdy = w * dy
xhat = tl.where(mask, xhat, 0.)
wdy = tl.where(mask, wdy, 0.)
c1 = tl.sum(xhat * wdy, axis=0) / N
c2 = tl.sum(wdy, axis=0) / N
dx = (wdy - (xhat * c1 + c2)) * rstd
# Write dx
tl.store(DX + cols, dx, mask=mask)
# Accumulate partial sums for dw/db
partial_dw = (dy * xhat).to(w.dtype)
partial_db = (dy).to(w.dtype)
while tl.atomic_cas(Lock, 0, 1) == 1:
pass
count = tl.load(Count)
# First store doesn't accumulate
if count == 0:
tl.atomic_xchg(Count, 1)
else:
partial_dw += tl.load(DW, mask=mask)
partial_db += tl.load(DB, mask=mask)
tl.store(DW, partial_dw, mask=mask)
tl.store(DB, partial_db, mask=mask)
# Release the lock
tl.atomic_xchg(Lock, 0)
@triton.jit
def _layer_norm_bwd_dwdb(
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
FINAL_DW, # pointer to the weights gradient
FINAL_DB, # pointer to the biases gradient
M, # GROUP_SIZE_M
N, # number of columns
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of DW and DB it should compute.
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# Iterate through the rows of DW and DB to sum the partial sums.
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
db += tl.load(DB + offs, mask=mask, other=0.)
# Write the final sum to the output.
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device=x.device)
rstd = torch.empty((M, ), dtype=torch.float32, device=x.device)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
# enqueue kernel
_layer_norm_fwd_fused[(M, )]( #
x_arg, y, weight, bias, mean, rstd, #
x_arg.stride(0), N, eps, #
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, num_ctas=1) # type: ignore # noqa
ctx.save_for_backward(x, weight, bias, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
return y
@staticmethod
def backward(ctx, dy):
x, w, b, m, v = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DW/DB
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192:
GROUP_SIZE_M = 96
if N <= 4096:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
# allocate output
locks = torch.zeros(
2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device)
_dw = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
_db = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
dw = torch.empty((N, ), dtype=w.dtype, device=w.device)
db = torch.empty((N, ), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M, )]( #
dx, dy, _dw, _db, x, w, m, v, locks, #
x_arg.stride(0), N, #
BLOCK_SIZE_N=ctx.BLOCK_SIZE, #
GROUP_SIZE_M=GROUP_SIZE_M, #
num_warps=ctx.num_warps) # type: ignore
def grid(meta): return [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
# accumulate partial sums in separate kernel
_layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db, min(GROUP_SIZE_M, M), N, #
BLOCK_SIZE_M=32, #
BLOCK_SIZE_N=128, num_ctas=1) # type: ignore
return dx, None, dw, db, None
layer_norm = LayerNorm.apply
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri: torch.Tensor = layer_norm(x, w_shape, weight, bias, eps) # type: ignore # noqa
y_ref = torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
# compare
assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0)
assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0)
assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0)
assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm-backward',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'},
))
def bench_layer_norm(
M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
quantiles = [0.5, 0.2, 0.8]
def y_fwd() -> torch.Tensor: # type: ignore
if provider == "triton":
return layer_norm(x, w_shape, weight, bias, eps) # type: ignore
if provider == "torch":
return torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps) # noqa: F811, E704
if provider == "apex":
apex_layer_norm = (apex.normalization.FusedLayerNorm(
w_shape).to(x.device).to(x.dtype))
return apex_layer_norm(x) # noqa: F811, E704
# forward pass
if mode == 'forward':
def gbps(ms): return 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
y_fwd, quantiles=quantiles, rep=500)
# backward pass
if mode == 'backward':
y = y_fwd()
def gbps(ms): # noqa: F811, E704
return 3 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: y.backward(dy, retain_graph=True), quantiles=quantiles,
grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
if __name__ == "__main__":
test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='./benchmark_result', print_data=True)
|
@triton.jit
def _layer_norm_bwd_dwdb(
DW, # pointer to the partial sum of weights gradient
DB, # pointer to the partial sum of biases gradient
FINAL_DW, # pointer to the weights gradient
FINAL_DB, # pointer to the biases gradient
M, # GROUP_SIZE_M
N, # number of columns
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr):
# Map the program id to the elements of DW and DB it should compute.
pid = tl.program_id(0)
cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
# Iterate through the rows of DW and DB to sum the partial sums.
for i in range(0, M, BLOCK_SIZE_M):
rows = i + tl.arange(0, BLOCK_SIZE_M)
mask = (rows[:, None] < M) & (cols[None, :] < N)
offs = rows[:, None] * N + cols[None, :]
dw += tl.load(DW + offs, mask=mask, other=0.)
db += tl.load(DB + offs, mask=mask, other=0.)
# Write the final sum to the output.
sum_dw = tl.sum(dw, axis=0)
sum_db = tl.sum(db, axis=0)
tl.store(FINAL_DW + cols, sum_dw, mask=cols < N)
tl.store(FINAL_DB + cols, sum_db, mask=cols < N)
class LayerNorm(torch.autograd.Function):
@staticmethod
def forward(ctx, x, normalized_shape, weight, bias, eps):
# allocate output
y = torch.empty_like(x)
# reshape input data into 2D tensor
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
mean = torch.empty((M, ), dtype=torch.float32, device=x.device)
rstd = torch.empty((M, ), dtype=torch.float32, device=x.device)
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_SIZE:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
num_warps = min(max(BLOCK_SIZE // 256, 1), 8)
# enqueue kernel
_layer_norm_fwd_fused[(M, )]( #
x_arg, y, weight, bias, mean, rstd, #
x_arg.stride(0), N, eps, #
BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, num_ctas=1) # type: ignore # noqa
ctx.save_for_backward(x, weight, bias, mean, rstd)
ctx.BLOCK_SIZE = BLOCK_SIZE
ctx.num_warps = num_warps
ctx.eps = eps
return y
@staticmethod
def backward(ctx, dy):
x, w, b, m, v = ctx.saved_tensors
# heuristics for amount of parallel reduction stream for DW/DB
N = w.shape[0]
GROUP_SIZE_M = 64
if N <= 8192:
GROUP_SIZE_M = 96
if N <= 4096:
GROUP_SIZE_M = 128
if N <= 1024:
GROUP_SIZE_M = 256
# allocate output
locks = torch.zeros(
2 * GROUP_SIZE_M, dtype=torch.int32, device=w.device)
_dw = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
_db = torch.zeros((GROUP_SIZE_M, N), dtype=x.dtype, device=w.device)
dw = torch.empty((N, ), dtype=w.dtype, device=w.device)
db = torch.empty((N, ), dtype=w.dtype, device=w.device)
dx = torch.empty_like(dy)
# enqueue kernel using forward pass heuristics
# also compute partial sums for DW and DB
x_arg = x.reshape(-1, x.shape[-1])
M, N = x_arg.shape
_layer_norm_bwd_dx_fused[(M, )]( #
dx, dy, _dw, _db, x, w, m, v, locks, #
x_arg.stride(0), N, #
BLOCK_SIZE_N=ctx.BLOCK_SIZE, #
GROUP_SIZE_M=GROUP_SIZE_M, #
num_warps=ctx.num_warps) # type: ignore
def grid(meta): return [triton.cdiv(N, meta['BLOCK_SIZE_N'])]
# accumulate partial sums in separate kernel
_layer_norm_bwd_dwdb[grid](
_dw, _db, dw, db, min(GROUP_SIZE_M, M), N, #
BLOCK_SIZE_M=32, #
BLOCK_SIZE_N=128, num_ctas=1) # type: ignore
return dx, None, dw, db, None
layer_norm = LayerNorm.apply
def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
# forward pass
y_tri: torch.Tensor = layer_norm(x, w_shape, weight, bias, eps) # type: ignore # noqa
y_ref = torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps).to(dtype)
# backward pass (triton)
y_tri.backward(dy, retain_graph=True)
dx_tri, dw_tri, db_tri = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
x.grad, weight.grad, bias.grad = None, None, None
# backward pass (torch)
y_ref.backward(dy, retain_graph=True)
dx_ref, dw_ref, db_ref = [_.grad.clone() for _ in [x, weight, bias]] # type: ignore # noqa
# compare
assert torch.allclose(y_tri, y_ref, atol=1e-2, rtol=0)
assert torch.allclose(dx_tri, dx_ref, atol=1e-2, rtol=0)
assert torch.allclose(db_tri, db_ref, atol=1e-2, rtol=0)
assert torch.allclose(dw_tri, dw_ref, atol=1e-2, rtol=0)
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['N'],
x_vals=[512 * i for i in range(2, 32)],
line_arg='provider',
line_vals=['triton', 'torch'] + (['apex'] if HAS_APEX else []),
line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []),
styles=[('blue', '-'), ('green', '-'), ('orange', '-')],
ylabel='GB/s',
plot_name='layer-norm-backward',
args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'},
))
def bench_layer_norm(
M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'):
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype,
device=device, requires_grad=True)
bias = torch.rand(w_shape, dtype=dtype, device=device, requires_grad=True)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device=device)
dy = .1 * torch.randn_like(x)
x.requires_grad_(True)
quantiles = [0.5, 0.2, 0.8]
def y_fwd() -> torch.Tensor: # type: ignore
if provider == "triton":
return layer_norm(x, w_shape, weight, bias, eps) # type: ignore
if provider == "torch":
return torch.nn.functional.layer_norm(
x, w_shape, weight, bias, eps) # noqa: F811, E704
if provider == "apex":
apex_layer_norm = (apex.normalization.FusedLayerNorm(
w_shape).to(x.device).to(x.dtype))
return apex_layer_norm(x) # noqa: F811, E704
# forward pass
if mode == 'forward':
def gbps(ms): return 2 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
y_fwd, quantiles=quantiles, rep=500)
# backward pass
if mode == 'backward':
y = y_fwd()
def gbps(ms): # noqa: F811, E704
return 3 * x.numel() * x.element_size() / ms * 1e-6
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: y.backward(dy, retain_graph=True), quantiles=quantiles,
grad_to_none=[x], rep=500)
return gbps(ms), gbps(max_ms), gbps(min_ms)
if __name__ == "__main__":
test_layer_norm(1151, 8192, torch.float16)
bench_layer_norm.run(save_path='./benchmark_result', print_data=True)
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
|
BobMcDear/attorch
|
attorch/math.py
|
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/math.py
|
"""
Pure math operations to be performed on loaded Triton tensors.
"""
import triton
import triton.language as tl
from .act_kernels import apply_act_func
@triton.jit
def accum_linear(accum, input1, input2,
fp16: tl.constexpr, tf32: tl.constexpr):
"""
Accumulates matrix multiplications of input tensors for linear functions.
Args:
accum: Accumulator holding aggregation of matrix multiplications.
The accumulator must be of shape [BLOCK_SIZE1, BLOCK_SIZE3].
input1: First operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
input2: Second operand of matrix multiplication.
The operand must be of shape [BLOCK_SIZE2, BLOCK_SIZE3].
fp16: Flag for converting operands to FP16.
tf32: Flag for performing matrix multiplication in TF32.
Returns:
Accumulator with the result of the new matrix multiplication added to it.
"""
if fp16:
input1 = input1.to(tl.float16)
input2 = input2.to(tl.float16)
return accum + tl.dot(input1, input2, allow_tf32=tf32)
@triton.jit
def glu(input1, input2, param, act_func: tl.constexpr):
"""
Applies the gated linear unit with an arbitrary activation function
to the input.
Args:
input1: First half of input to gate.
The first half must be of the same shape as the second half.
input2: Second half of input to gate.
The second half must be of the same shape as the first half.
param: Parameter in the case of parameterized activation functions.
act_func: Name of activation function to apply.
Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu',
'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish',
'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'.
param: Parameter in the case of parameterized activation functions.
Args:
Input transformed by the gated linear unit
with an arbitrary activation function.
"""
return input1 * apply_act_func(input2, None, None, None, param, act_func, False)
@triton.jit
def softmax(input,
log: tl.constexpr):
"""
Normalizes the input using softmax along the last dimension.
Args:
input: Input to normalize.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
log: Flag for indicating if the log of softmax should be taken.
Returns:
Input normalized by softmax.
"""
input = input.to(tl.float32)
input = input - tl.max(input, axis=1)[:, None]
numerator = tl.exp(input)
denominator = tl.sum(numerator, axis=1)[:, None]
if log:
output = input - tl.log(denominator)
else:
output = numerator / denominator
return output
@triton.jit
def calc_mean_and_inv_std(input, last_dim, eps,
last_dim_mask: tl.constexpr):
"""
Calculates the mean and inverse standard deviation of the input
along the last dimension.
Args:
input: Input whose mean and inverse standard deviation are calculated.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
last_dim: Size of the last dimension of input.
eps: Epsilon added in the square root in the denominator
to avoid division by zero.
last_dim_mask: Mask for the last dimension indicating
which elements should be included in the calculations.
The mask must be of shape [BLOCK_SIZE2].
Returns:
Mean and inverse standard deviation of the input.
"""
input = input.to(tl.float32)
mean = tl.sum(input, axis=1) / last_dim
diff = tl.where(last_dim_mask[None, :], input - mean[:, None], 0)
inv_std = tl.rsqrt(tl.sum(diff * diff, axis=1) / last_dim + eps)
return mean, inv_std
@triton.jit
def update_welford(input, prev_count, prev_mean, prev_var, curr_count,
mask: tl.constexpr):
"""
Updates count, mean, and variance (M2) statistics for Welford's algorithm.
Args:
input: Input used to update statistics.
The input must be of the same shape as the mask.
prev_count: Previous count statistic to update.
prev_mean: Previous mean statistic to update.
prev_var: Previous variance (M2) statistic to update.
curr_count: Count of elements in current input.
mask: Mask indicating which elements should be included in the calculations.
The mask must be of the same shape as the input.
Returns:
Updated count, mean, and variance (M2) statistics
"""
input = input.to(tl.float32)
count = prev_count + curr_count
mean = (tl.sum(input) - curr_count * prev_mean) / count
deltas = tl.where(mask, (input - mean) * (input - prev_mean), 0.)
var = prev_var + tl.sum(deltas)
return count, mean, var
@triton.jit
def update_ema(prev_ema, new_val, momentum):
"""
Updates exponential moving average.
Args:
prev_ema: Previous exponential moving average.
new_val: Value used to update the exponential moving average.
momentum: Momentum.
Returns:
Updated running statistic.
"""
return (1 - momentum) * prev_ema + momentum * new_val
@triton.jit
def standardize(input, mean, inv_std, weight, bias):
"""
Standardizes the input given its mean and inverse standard deviation,
multiplies the result by weights, and adds a bias vector.
Args:
input: Input to standardize.
mean: Mean of input.
inv_std: Inverse standard deviation of input.
weight: Weight multiplied by the standardized input.
bias: Bias added to the result of the weight multiplication.
Returns:
Standardized input.
"""
return weight * inv_std * (input - mean) + bias
@triton.jit
def calc_p_loss(input, target, size,
p_loss: tl.constexpr, reduction: tl.constexpr):
"""
Measures the L1 or squared L2 norm of the difference between the input
and target (i.e., mean absolute error or mean squared error).
Args:
input: Input.
The input must be of shape [BLOCK_SIZE].
target: Target.
The target must be of shape [BLOCK_SIZE].
size: Number of elements in the input and target.
This value is used only if reduction is 'mean'.
p_loss: p-norm used to compute the error.
Options are 1 for MAE and 2 for MSE.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the error
across all entries, and 'sum' for summing the error across all entries.
Returns:
Error.
"""
input = input.to(tl.float32)
target = target.to(tl.float32)
diff = input - target
if p_loss == 1:
error = tl.abs(diff)
elif p_loss == 2:
error = diff * diff
if reduction == 'none':
output = error
elif reduction == 'mean':
output = tl.sum(error) / size
elif reduction == 'sum':
output = tl.sum(error)
return output
@triton.jit
def nll_loss(input, size,
reduction: tl.constexpr):
"""
Measures the negative log likelihood loss given log-probabilities of target class.
Args:
input: Input containing predicted log-probabilities corresponding to target class.
The input can have arbitrary shape.
size: Number of elements in the input.
This value is used only if reduction is 'mean'.
reduction: Reduction strategy for the output.
Options are 'none' for no reduction, 'mean' for averaging the loss
across all entries, and 'sum' for summing the loss across all entries.
Returns:
Loss.
"""
input = input.to(tl.float32)
if reduction == 'none':
output = -input
elif reduction == 'mean':
output = -tl.sum(input) / size
elif reduction == 'sum':
output = -tl.sum(input)
return output
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
@triton.jit
def cross_entropy_loss(input, pred):
"""
Measures the per-row cross entropy loss given
input and predicted logits corresponding to target class.
Args:
input: Input.
The input must be of shape [BLOCK_SIZE1, BLOCK_SIZE2].
pred: Predicted logits corresponding to target class.
The predictions must be of shape [BLOCK_SIZE1].
Returns:
Loss.
"""
input = input.to(tl.float32)
pred = pred.to(tl.float32)
mx = tl.max(input, axis=1)
input -= mx[:, None]
loss = tl.log(tl.sum(tl.exp(input), axis=1)) - pred + mx
return loss
|
bryanzhang/triton_fusedattention
|
fused-attention.py
|
https://github.com/bryanzhang/triton_fusedattention/blob/722fb46a6d16f573621cfc9bf791977e0f05f541/fused-attention.py
|
#! /usr/bin/python3
import pytest
import torch
import torch.nn.functional as F
import triton
import triton.language as tl
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in [3, 4, 7]\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
1, offs_m, offs_n, N_CTX #
)
# stage 2: on-band
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX #
)
# epilogue
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
extra_kern_args = {}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
**extra_kern_args)
return o
attention = _attention.apply
import flash_attn
from flash_attn import flash_attn_func
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 15)],
line_arg="provider",
line_vals=["triton-fp16", "flash-v2", "pytorch"],
line_names=["Triton [FP16]", "Flash-v2 [FP16]", "pytorch [FP16]"],
styles=[("red", "-"), ("blue", "-"), ("green", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
},
))
# 标准的softmax计算,直接以e为底计算幂函数, 更准确.
def torch_attention_standard(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.exp(qk) / torch.exp(qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, provider, device="cuda"):
warmup = 25
rep = 100
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
sm_scale = 1.3
fn = lambda: attention(q, k, v, sm_scale)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash-v2":
q = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: flash_attn_func(q=q, k=k, v=v, dropout_p=float(0.0), softmax_scale=1.3, causal=True, window_size=(-1,-1), alibi_slopes=None, deterministic=False)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "pytorch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=1.3)
#fn = lambda: torch_attention_standard_v2(q, k, v, 1.3)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
total_flops *= 0.5
return total_flops / ms * 1e-9
# 模仿flashattention以2为底做幂函数计算,需要前置乘以log2e,结果更接近FlashAttention计算结果.
# 误差是FlashAttention分块计算的scale的误差.
def torch_attention(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.pow(2, qk) / torch.pow(2, qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
if __name__ == "__main__":
# 测试推理正确性.
torch.manual_seed(0)
q = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
k = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
v = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
sm_scale = 1.3
o_triton = attention(q, k, v, sm_scale)
#o_torch = torch_attention_standard_v2(q, k, v, sm_scale)
o_torch = F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=sm_scale)
assert o_triton.shape == (4, 32, 1024, 64)
assert o_torch.shape == (4, 32, 1024, 64)
assert torch.allclose(o_triton[0][0], o_torch[0][0], rtol=0.25*1e-2, atol=0.3*1e-1), (o_triton[0][0], o_torch[0][0])
#assert torch.allclose(o_triton[0][0], o_torch[0][0]), (o_triton[0][0], o_torch[0][0])
# 推理性能测试
bench_flash_attention.run(save_path=".", print_data=True)
|
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in [3, 4, 7]\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
|
bryanzhang/triton_fusedattention
|
fused-attention.py
|
https://github.com/bryanzhang/triton_fusedattention/blob/722fb46a6d16f573621cfc9bf791977e0f05f541/fused-attention.py
|
#! /usr/bin/python3
import pytest
import torch
import torch.nn.functional as F
import triton
import triton.language as tl
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in [3, 4, 7]\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
1, offs_m, offs_n, N_CTX #
)
# stage 2: on-band
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX #
)
# epilogue
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
extra_kern_args = {}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
**extra_kern_args)
return o
attention = _attention.apply
import flash_attn
from flash_attn import flash_attn_func
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 15)],
line_arg="provider",
line_vals=["triton-fp16", "flash-v2", "pytorch"],
line_names=["Triton [FP16]", "Flash-v2 [FP16]", "pytorch [FP16]"],
styles=[("red", "-"), ("blue", "-"), ("green", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
},
))
# 标准的softmax计算,直接以e为底计算幂函数, 更准确.
def torch_attention_standard(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.exp(qk) / torch.exp(qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, provider, device="cuda"):
warmup = 25
rep = 100
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
sm_scale = 1.3
fn = lambda: attention(q, k, v, sm_scale)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash-v2":
q = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: flash_attn_func(q=q, k=k, v=v, dropout_p=float(0.0), softmax_scale=1.3, causal=True, window_size=(-1,-1), alibi_slopes=None, deterministic=False)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "pytorch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=1.3)
#fn = lambda: torch_attention_standard_v2(q, k, v, 1.3)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
total_flops *= 0.5
return total_flops / ms * 1e-9
# 模仿flashattention以2为底做幂函数计算,需要前置乘以log2e,结果更接近FlashAttention计算结果.
# 误差是FlashAttention分块计算的scale的误差.
def torch_attention(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.pow(2, qk) / torch.pow(2, qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
if __name__ == "__main__":
# 测试推理正确性.
torch.manual_seed(0)
q = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
k = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
v = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
sm_scale = 1.3
o_triton = attention(q, k, v, sm_scale)
#o_torch = torch_attention_standard_v2(q, k, v, sm_scale)
o_torch = F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=sm_scale)
assert o_triton.shape == (4, 32, 1024, 64)
assert o_torch.shape == (4, 32, 1024, 64)
assert torch.allclose(o_triton[0][0], o_torch[0][0], rtol=0.25*1e-2, atol=0.3*1e-1), (o_triton[0][0], o_torch[0][0])
#assert torch.allclose(o_triton[0][0], o_torch[0][0]), (o_triton[0][0], o_torch[0][0])
# 推理性能测试
bench_flash_attention.run(save_path=".", print_data=True)
|
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
1, offs_m, offs_n, N_CTX #
)
# stage 2: on-band
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX #
)
# epilogue
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
extra_kern_args = {}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
**extra_kern_args)
return o
attention = _attention.apply
import flash_attn
from flash_attn import flash_attn_func
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 15)],
line_arg="provider",
line_vals=["triton-fp16", "flash-v2", "pytorch"],
line_names=["Triton [FP16]", "Flash-v2 [FP16]", "pytorch [FP16]"],
styles=[("red", "-"), ("blue", "-"), ("green", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
},
))
# 标准的softmax计算,直接以e为底计算幂函数, 更准确.
def torch_attention_standard(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.exp(qk) / torch.exp(qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, provider, device="cuda"):
warmup = 25
rep = 100
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
sm_scale = 1.3
fn = lambda: attention(q, k, v, sm_scale)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash-v2":
q = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, N_CTX, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: flash_attn_func(q=q, k=k, v=v, dropout_p=float(0.0), softmax_scale=1.3, causal=True, window_size=(-1,-1), alibi_slopes=None, deterministic=False)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "pytorch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=False)
fn = lambda: F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=1.3)
#fn = lambda: torch_attention_standard_v2(q, k, v, 1.3)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
total_flops *= 0.5
return total_flops / ms * 1e-9
# 模仿flashattention以2为底做幂函数计算,需要前置乘以log2e,结果更接近FlashAttention计算结果.
# 误差是FlashAttention分块计算的scale的误差.
def torch_attention(Q, K, V, sm_scale):
qk = torch.matmul(Q, K.transpose(-2, -1)) # Compute the dot product
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
qk *= qk_scale
mask = torch.triu(torch.ones_like(qk) * float(-1.0e6), diagonal=1)
qk = qk + mask
qk = qk - torch.max(qk, dim=-1, keepdim=True)[0]
qk_score = torch.pow(2, qk) / torch.pow(2, qk).sum(dim=-1, keepdim=True)
O = torch.matmul(qk_score, V) # Multiply scores with V
return O
if __name__ == "__main__":
# 测试推理正确性.
torch.manual_seed(0)
q = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
k = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
v = torch.randn((4, 32, 1024, 64), dtype=torch.float16, device="cuda")
sm_scale = 1.3
o_triton = attention(q, k, v, sm_scale)
#o_torch = torch_attention_standard_v2(q, k, v, sm_scale)
o_torch = F.scaled_dot_product_attention(q, k, v, is_causal=True, scale=sm_scale)
assert o_triton.shape == (4, 32, 1024, 64)
assert o_torch.shape == (4, 32, 1024, 64)
assert torch.allclose(o_triton[0][0], o_torch[0][0], rtol=0.25*1e-2, atol=0.3*1e-1), (o_triton[0][0], o_torch[0][0])
#assert torch.allclose(o_triton[0][0], o_torch[0][0]), (o_triton[0][0], o_torch[0][0])
# 推理性能测试
bench_flash_attention.run(save_path=".", print_data=True)
|
l1351868270/implicit_gemm.triton
|
triton_gemm.py
|
https://github.com/l1351868270/implicit_gemm.triton/blob/b19c43fb5219c4bac5b0f5f6b1d98e280e07dcc4/triton_gemm.py
|
# copy from https://triton-lang.org/main/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py
import torch
import triton
import triton.language as tl
def get_autotune_config():
return [
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5,
num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5,
num_warps=2),
# Good config for fp8 inputs.
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4)
]
@triton.autotune(
configs=get_autotune_config(),
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
|
l1351868270/implicit_gemm.triton
|
triton_gemm.py
|
https://github.com/l1351868270/implicit_gemm.triton/blob/b19c43fb5219c4bac5b0f5f6b1d98e280e07dcc4/triton_gemm.py
|
# copy from https://triton-lang.org/main/_downloads/d5fee5b55a64e47f1b5724ec39adf171/03-matrix-multiplication.py
import torch
import triton
import triton.language as tl
def get_autotune_config():
return [
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5,
num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5,
num_warps=2),
# Good config for fp8 inputs.
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3,
num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4,
num_warps=4)
]
@triton.autotune(
configs=get_autotune_config(),
key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak, #
stride_bk, stride_bn, #
stride_cm, stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, #
GROUP_SIZE_M: tl.constexpr, #
ACTIVATION: tl.constexpr #
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetic` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator = tl.dot(a, b, accumulator)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
@triton.jit
def leaky_relu(x):
return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
matmul_kernel[grid](
a, b, c, #
M, N, K, #
a.stride(0), a.stride(1), #
b.stride(0), b.stride(1), #
c.stride(0), c.stride(1), #
ACTIVATION=activation #
)
return c
|
zhanghuanrong/cudas
|
scratch_triton/vec_add.py
|
https://github.com/zhanghuanrong/cudas/blob/8f4d176fae9939b428fe634aae9a1416e6252cc1/scratch_triton/vec_add.py
|
import torch
import triton
import triton.language as tl
DEVICE = torch.device('cuda:0')
@triton.jit
def add_kernel(x, y, z, n_elements, BLOCK_SIZE: tl.constexpr) :
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x + offsets, mask = mask)
y = tl.load(y + offsets, mask = mask)
tl.store(z + offsets, x + y, mask=mask)
def add(x: torch.Tensor, y: torch.Tensor):
z = torch.empty_like(x)
n_elements = z.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
add_kernel[grid](x, y, z, n_elements, BLOCK_SIZE=1024)
return z
def main() :
size = 327680
torch.manual_seed(0)
x = torch.rand(size, device = DEVICE)
y = torch.rand(size, device = DEVICE)
z_torch = x + y
z_tr = add(x, y)
print(f'Max diff of add result from torch vs triton are:')
print(f"{torch.max(torch.abs(z_torch - z_tr))}")
main()
|
@triton.jit
def add_kernel(x, y, z, n_elements, BLOCK_SIZE: tl.constexpr) :
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x + offsets, mask = mask)
y = tl.load(y + offsets, mask = mask)
tl.store(z + offsets, x + y, mask=mask)
def add(x: torch.Tensor, y: torch.Tensor):
z = torch.empty_like(x)
n_elements = z.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
add_kernel[grid](x, y, z, n_elements, BLOCK_SIZE=1024)
return z
def main() :
size = 327680
torch.manual_seed(0)
x = torch.rand(size, device = DEVICE)
y = torch.rand(size, device = DEVICE)
z_torch = x + y
z_tr = add(x, y)
print(f'Max diff of add result from torch vs triton are:')
print(f"{torch.max(torch.abs(z_torch - z_tr))}")
main()
|
motobiubiu/biubiuquik
|
src/triton/relu.py
|
https://github.com/motobiubiu/biubiuquik/blob/eedaa2e7d4e2132ecbc969709fd3cbf018cd21db/src/triton/relu.py
|
import torch
import triton
import triton.language as tl
import os
# os.environ["TRITON_INTERPRET"] = "1"
@triton.jit
def relu_kernel(x_ptr, # *Pointer* to first input vector.
output_ptr, # *Pointer* to output vector.
n_elements, # Size of the vector.
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process.
# NOTE: `constexpr` so it can be used as a shape value.
):
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
output=tl.maximum(x,0)
tl.store(output_ptr + offsets, output, mask=mask)
def relu(x: torch.Tensor):
# We need to preallocate the output.
output = torch.empty_like(x)
assert x.is_cuda and output.is_cuda
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
relu_kernel[grid](x, output, n_elements, BLOCK_SIZE=1024)
return output
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
output_torch = torch.relu(x)
output_triton = relu(x)
print(output_torch)
print(output_triton)
print(f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}')
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # Argument names to use as an x-axis for the plot.
x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`.
x_log=True, # x axis is logarithmic.
line_arg='provider', # Argument name whose value corresponds to a different line in the plot.
line_vals=['triton', 'torch'], # Possible values for `line_arg`.
line_names=['Triton', 'Torch'], # Label name for the lines.
styles=[('blue', '-'), ('green', '-')], # Line styles.
ylabel='GB/s', # Label name for the y-axis.
plot_name='vector-add-performance', # Name for the plot. Used also as a file name for saving the plot.
args={}, # Values for function arguments not in `x_names` and `y_name`.
))
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.relu(x), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: relu(x), quantiles=quantiles)
gbps = lambda ms: 3 * x.numel() * x.element_size() / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(print_data=True, show_plots=True)
|
@triton.jit
def relu_kernel(x_ptr, # *Pointer* to first input vector.
output_ptr, # *Pointer* to output vector.
n_elements, # Size of the vector.
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process.
# NOTE: `constexpr` so it can be used as a shape value.
):
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
x = tl.load(x_ptr + offsets, mask=mask)
output=tl.maximum(x,0)
tl.store(output_ptr + offsets, output, mask=mask)
def relu(x: torch.Tensor):
# We need to preallocate the output.
output = torch.empty_like(x)
assert x.is_cuda and output.is_cuda
n_elements = output.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
relu_kernel[grid](x, output, n_elements, BLOCK_SIZE=1024)
return output
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
output_torch = torch.relu(x)
output_triton = relu(x)
print(output_torch)
print(output_triton)
print(f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}')
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # Argument names to use as an x-axis for the plot.
x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`.
x_log=True, # x axis is logarithmic.
line_arg='provider', # Argument name whose value corresponds to a different line in the plot.
line_vals=['triton', 'torch'], # Possible values for `line_arg`.
line_names=['Triton', 'Torch'], # Label name for the lines.
styles=[('blue', '-'), ('green', '-')], # Line styles.
ylabel='GB/s', # Label name for the y-axis.
plot_name='vector-add-performance', # Name for the plot. Used also as a file name for saving the plot.
args={}, # Values for function arguments not in `x_names` and `y_name`.
))
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.relu(x), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: relu(x), quantiles=quantiles)
gbps = lambda ms: 3 * x.numel() * x.element_size() / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(print_data=True, show_plots=True)
|
yynil/RWKVinLLAMA
|
rwkv/fla/modules/l2norm.py
|
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/modules/l2norm.py
|
# -*- coding: utf-8 -*-
import torch
import triton
import triton.language as tl
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["N"],
)
# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None})
@triton.jit
def _l2_norm_fwd_1pass_kernel(
X, # pointer to the input
Y, # pointer to the output
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_x_row
# Compute mean and variance
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
y = x * rstd
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["N"],
)
# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
# @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
@triton.jit
def _l2_norm_bwd_kernel(
X, # pointer to the input
# Y, # pointer to the output to be recomputed
DY, # pointer to the output gradient
DX, # pointer to the input gradient
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the elements of X, DX, and DY it should compute.
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
DX += row * stride_x_row
DY += row * stride_x_row
# Y += row * stride_y_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
var = tl.sum(x * x)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
# y = x * rstd
dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32)
dy = tl.where(cols < N, dy, 0.0)
# dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
tl.store(DX + cols, dx, mask=mask)
def _l2_norm_fwd(
x, eps=1e-6
):
x_shape_og = x.shape
x = x.reshape(-1, x.shape[-1])
if x.stride(-1) != 1:
x = x.contiguous()
M, N = x.shape
assert x.stride(-1) == 1
# allocate output
y = torch.empty_like(x)
assert y.stride(-1) == 1
N = x.shape[-1]
M = x.shape[0]
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_fwd_1pass_kernel[(M,)](
x,
y,
x.stride(0),
N,
eps,
# is_rms_norm,
BLOCK_N,
# residual is not None,
# residual_out is not None,
# bias is not None,
)
return y.reshape(x_shape_og)
def _l2_norm_bwd(
x, dy, eps=1e-5,
):
x_shape_og = x.shape
x = x.reshape(-1, dy.shape[-1])
dy = dy.reshape(-1, dy.shape[-1])
if dy.stride(-1) != 1:
dy = dy.contiguous()
assert dy.shape == x.shape
# allocate output
dx = torch.empty_like(x)
N = x.shape[-1]
M = x.shape[0]
assert x.stride(-1) == 1
assert dy.stride(-1) == 1
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_bwd_kernel[(M,)](
x,
dy,
dx,
x.stride(0),
N,
eps,
BLOCK_N,
)
return dx.reshape(x_shape_og)
class L2NormFN(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x,
eps=1e-6,
):
# reshape input data into 2D tensor
y = _l2_norm_fwd(x, eps)
ctx.eps = eps
ctx.x_dtype = x.dtype
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, dy, *args):
x, = ctx.saved_tensors
dx = _l2_norm_bwd(
x,
dy,
ctx.eps,
)
return (
dx,
None
)
l2_norm_fn = L2NormFN.apply
|
@triton.jit
def _l2_norm_fwd_1pass_kernel(
X, # pointer to the input
Y, # pointer to the output
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_x_row
# Compute mean and variance
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
y = x * rstd
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["N"],
)
# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
# @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
|
yynil/RWKVinLLAMA
|
rwkv/fla/modules/l2norm.py
|
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/modules/l2norm.py
|
# -*- coding: utf-8 -*-
import torch
import triton
import triton.language as tl
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["N"],
)
# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None})
@triton.jit
def _l2_norm_fwd_1pass_kernel(
X, # pointer to the input
Y, # pointer to the output
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
Y += row * stride_x_row
# Compute mean and variance
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
xbar = tl.where(cols < N, x, 0.0)
var = tl.sum(xbar * xbar, axis=0)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
y = x * rstd
# Write output
tl.store(Y + cols, y, mask=mask)
@triton.autotune(
configs=[
triton.Config({}, num_warps=1),
triton.Config({}, num_warps=2),
triton.Config({}, num_warps=4),
triton.Config({}, num_warps=8),
triton.Config({}, num_warps=16),
triton.Config({}, num_warps=32),
],
key=["N"],
)
# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
# @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
@triton.jit
def _l2_norm_bwd_kernel(
X, # pointer to the input
# Y, # pointer to the output to be recomputed
DY, # pointer to the output gradient
DX, # pointer to the input gradient
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the elements of X, DX, and DY it should compute.
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
DX += row * stride_x_row
DY += row * stride_x_row
# Y += row * stride_y_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
var = tl.sum(x * x)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
# y = x * rstd
dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32)
dy = tl.where(cols < N, dy, 0.0)
# dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
tl.store(DX + cols, dx, mask=mask)
def _l2_norm_fwd(
x, eps=1e-6
):
x_shape_og = x.shape
x = x.reshape(-1, x.shape[-1])
if x.stride(-1) != 1:
x = x.contiguous()
M, N = x.shape
assert x.stride(-1) == 1
# allocate output
y = torch.empty_like(x)
assert y.stride(-1) == 1
N = x.shape[-1]
M = x.shape[0]
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_fwd_1pass_kernel[(M,)](
x,
y,
x.stride(0),
N,
eps,
# is_rms_norm,
BLOCK_N,
# residual is not None,
# residual_out is not None,
# bias is not None,
)
return y.reshape(x_shape_og)
def _l2_norm_bwd(
x, dy, eps=1e-5,
):
x_shape_og = x.shape
x = x.reshape(-1, dy.shape[-1])
dy = dy.reshape(-1, dy.shape[-1])
if dy.stride(-1) != 1:
dy = dy.contiguous()
assert dy.shape == x.shape
# allocate output
dx = torch.empty_like(x)
N = x.shape[-1]
M = x.shape[0]
assert x.stride(-1) == 1
assert dy.stride(-1) == 1
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_bwd_kernel[(M,)](
x,
dy,
dx,
x.stride(0),
N,
eps,
BLOCK_N,
)
return dx.reshape(x_shape_og)
class L2NormFN(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x,
eps=1e-6,
):
# reshape input data into 2D tensor
y = _l2_norm_fwd(x, eps)
ctx.eps = eps
ctx.x_dtype = x.dtype
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, dy, *args):
x, = ctx.saved_tensors
dx = _l2_norm_bwd(
x,
dy,
ctx.eps,
)
return (
dx,
None
)
l2_norm_fn = L2NormFN.apply
|
@triton.jit
def _l2_norm_bwd_kernel(
X, # pointer to the input
# Y, # pointer to the output to be recomputed
DY, # pointer to the output gradient
DX, # pointer to the input gradient
stride_x_row, # how much to increase the pointer when moving by 1 row
N, # number of columns in X
eps, # epsilon to avoid division by zero
BLOCK_N: tl.constexpr,
):
# Map the program id to the elements of X, DX, and DY it should compute.
# Map the program id to the row of X and Y it should compute.
row = tl.program_id(0)
X += row * stride_x_row
DX += row * stride_x_row
DY += row * stride_x_row
# Y += row * stride_y_row
cols = tl.arange(0, BLOCK_N)
x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
x = tl.where(cols < N, x, 0.0)
var = tl.sum(x * x)
rstd = 1 / tl.sqrt(var + eps)
# tl.store(Rstd + row, rstd)
# Normalize and apply linear transformation
mask = cols < N
# y = x * rstd
dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32)
dy = tl.where(cols < N, dy, 0.0)
# dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x
tl.store(DX + cols, dx, mask=mask)
def _l2_norm_fwd(
x, eps=1e-6
):
x_shape_og = x.shape
x = x.reshape(-1, x.shape[-1])
if x.stride(-1) != 1:
x = x.contiguous()
M, N = x.shape
assert x.stride(-1) == 1
# allocate output
y = torch.empty_like(x)
assert y.stride(-1) == 1
N = x.shape[-1]
M = x.shape[0]
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_fwd_1pass_kernel[(M,)](
x,
y,
x.stride(0),
N,
eps,
# is_rms_norm,
BLOCK_N,
# residual is not None,
# residual_out is not None,
# bias is not None,
)
return y.reshape(x_shape_og)
def _l2_norm_bwd(
x, dy, eps=1e-5,
):
x_shape_og = x.shape
x = x.reshape(-1, dy.shape[-1])
dy = dy.reshape(-1, dy.shape[-1])
if dy.stride(-1) != 1:
dy = dy.contiguous()
assert dy.shape == x.shape
# allocate output
dx = torch.empty_like(x)
N = x.shape[-1]
M = x.shape[0]
assert x.stride(-1) == 1
assert dy.stride(-1) == 1
# rstd = torch.empty((M,), dtype=torch.float32, device="cuda")
# Less than 64KB per feature: enqueue fused kernel
MAX_FUSED_SIZE = 65536 // x.element_size()
BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
if N > BLOCK_N:
raise RuntimeError(
"This layer norm doesn't support feature dim >= 64KB.")
# heuristics for number of warps
with torch.cuda.device(x.device.index):
_l2_norm_bwd_kernel[(M,)](
x,
dy,
dx,
x.stride(0),
N,
eps,
BLOCK_N,
)
return dx.reshape(x_shape_og)
class L2NormFN(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x,
eps=1e-6,
):
# reshape input data into 2D tensor
y = _l2_norm_fwd(x, eps)
ctx.eps = eps
ctx.x_dtype = x.dtype
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, dy, *args):
x, = ctx.saved_tensors
dx = _l2_norm_bwd(
x,
dy,
ctx.eps,
)
return (
dx,
None
)
l2_norm_fn = L2NormFN.apply
|
RobertCsordas/moe_attention
|
layers/cvmm.py
|
https://github.com/RobertCsordas/moe_attention/blob/7169ad370e68185ecddb592c623b8d550de725df/layers/cvmm.py
|
from typing import Union, Optional
import torch
import math
from dataclasses import dataclass
from torch.cuda.amp import custom_fwd, custom_bwd
import triton
import triton.language as tl
@dataclass
class CVMMSel:
raw_sel: torch.Tensor
sel: torch.Tensor
sel_index: torch.Tensor
out_index: Optional[torch.Tensor] = None
reduction_weight: Optional[torch.Tensor] = None
def clone(self) -> 'CVMMSel':
return CVMMSel(self.raw_sel, self.sel, self.sel_index, self.out_index, self.reduction_weight)
def cvmm_prepare_sel(sel: torch.Tensor, n_experts: Optional[int] = None) -> CVMMSel:
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
return CVMMSel(sel, ssel.view_as(sel), sel_index, None)
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
],
key=['M', 'N', 'K', 'float32', 'allow_tf32']
)
@triton.jit
def cvmm_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bo, stride_bk, stride_bn,
stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32: tl.constexpr, allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_n = (pid % num_pid_in_group) // group_size_m
pid_m = first_pid_m + (pid % group_size_m)
# n_vects = tl.load(cnt_ptr + stride_cnt * matrix_id)
sel_first = tl.load(sel_ptr + pid_m * BLOCK_SIZE_M * stride_sel)
sel_last = tl.load(sel_ptr + (min((pid_m + 1) * BLOCK_SIZE_M, M) - 1) * stride_sel)
sel_all = tl.load(sel_ptr + stride_sel * ((pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M))
for matrix_id in range(sel_first, sel_last + 1):
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
remap_offs_am = tl.load(index_ptr + stride_index * offs_am)
# Create offset pointers
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (remap_offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + matrix_id * stride_bo + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
# if not float32:
# if not float32:
# b = b.to(tl.float16)
if not float32:
a = a.to(tl.float16)
b = b.to(tl.float16)
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if not float32:
c = accumulator.to(tl.float16)
else:
c = accumulator
# c = tl.full((BLOCK_SIZE_M, BLOCK_SIZE_N), index_offset_for_this_matrix+100, dtype=OUT_TYPE)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
# remap_offs_cm = index_ptr[offset_ptr[matrix_id] + offs_am]
# offs_cm = remap_offs_am
if out_index_ptr is not None:
remap_offs_cm = tl.load(out_index_ptr + stride_out_index * offs_am)
else:
remap_offs_cm = remap_offs_am
# remap_offs_cm = offs_cm
# remap_offs_cm = offs_am
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * remap_offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = ((offs_cm[:, None] < M) & (sel_all[:, None] == matrix_id)) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 4}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
],
key=['M', 'N', 'K', 'float32_out', 'allow_tf32', 'op_float16'], reset_to_zero = ['c_ptr']
)
@triton.jit
def cvmm_backward_kernel3(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bk, stride_bn,
stride_co, stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32_out: tl.constexpr, allow_tf32: tl.constexpr, op_float16: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, K_BLOCKS: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
k_block_id = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
a_ptrs_this = a_ptr + offs_am[:, None] * stride_am
b_ptrs_this = b_ptr + offs_bn[None, :] * stride_bn
# Kactual = end_i - start_i
# Nblocks = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# WORK_PER_WORKER = (Nblocks + K_BLOCKS - 1) // K_BLOCKS
# WORK_PER_WORKER = WORK_PER_WORKER if WORK_PER_WORKER > MIN_WORK_SIZE else MIN_WORK_SIZE
# # Kloop_start = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# first_block_k = k_block_id * WORK_PER_WORKER
# last_block_k = min((k_block_id+1) * WORK_PER_WORKER, Nblocks)
block_start_index = k_block_id * BLOCK_SIZE_K * K_BLOCKS
block_end_index = min(block_start_index + BLOCK_SIZE_K * K_BLOCKS, K) - 1
first_mat = tl.load(sel_ptr + stride_sel * block_start_index)
last_mat = tl.load(sel_ptr + stride_sel * block_end_index)
for matrix_index in range(first_mat, last_mat + 1):
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
start_i = block_start_index
end_i = block_end_index + 1
while start_i < end_i:
middle = (start_i + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix < matrix_index:
start_i = middle + 1
else:
end_i = middle
# # Continue binary search: find the first matrix that is > matrix_index
start_i2 = start_i
end_i = block_end_index + 1
while start_i2 < end_i:
middle = (start_i2 + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix <= matrix_index:
start_i2 = middle + 1
else:
end_i = middle
end_i = start_i2
count = end_i - start_i
block_mem_indices_f_base = start_i + tl.arange(0, BLOCK_SIZE_K)
if count > 0:
for k in range((count + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K):
# block_mem_indices = (k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)) % K
block_mem_indices_f = block_mem_indices_f_base + k * BLOCK_SIZE_K
block_mem_indices = block_mem_indices_f % K
a_index = tl.load(index_ptr + stride_index * block_mem_indices)
if out_index_ptr is not None:
b_index = tl.load(out_index_ptr + stride_out_index * block_mem_indices)
else:
b_index = a_index
sel_ok = block_mem_indices_f < end_i
a_ptrs = a_ptrs_this + a_index[None, :] * stride_ak
b_ptrs = b_ptrs_this + b_index[:, None] * stride_bk
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=sel_ok[None, :], other=0.0)
b = tl.load(b_ptrs, mask=sel_ok[:, None], other=0.0)
if op_float16:
a = a.to(tl.float16)
b = b.to(tl.float16)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
if float32_out:
c = accumulator
else:
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_co * matrix_index + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
# tl.store(c_ptrs, c, mask=c_mask)
tl.atomic_add(c_ptrs, c, mask=c_mask)
def cvmm_triton(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_dtype: torch.dtype, out_index: Optional[torch.Tensor] = None):
xorig = x
x = x.flatten(end_dim=-2)
assert x.shape[-1] == keys.shape[1]
sel_shape = sel.shape
sel = sel.flatten()
M = sel.shape[0]
O, K, N = keys.shape
# Allocates output.
out = torch.empty((M, N), device=x.device, dtype=out_dtype)
# out = torch.zeros((M, N), device=x.device, dtype=out_dtype)
# 1D launch kernel where each block gets its own program.
# expected_m_per_matrix = int(math.ceil(M / O * 1.5))
# expected_m_per_matrix = M
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
cvmm_kernel[grid](
x, keys, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
keys.stride(0), keys.stride(1), keys.stride(2),
out.stride(0), out.stride(1),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32=out.dtype==torch.float32, allow_tf32=False, #torch.backends.cuda.matmul.allow_tf32
)
return out.view(*sel_shape, N)
def cvmm_triton_backward(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, grads: torch.Tensor, n_experts: int, key_dtype: torch.dtype,
op_float16: bool, out_index: Optional[torch.Tensor] = None):
x = x.flatten(end_dim=-2)
x = x.transpose(0, 1)
grads = grads.flatten(end_dim=-2)
sel = sel.flatten()
M, _ = x.shape
K, N = grads.shape
out = torch.zeros((n_experts, M, N), device=x.device, dtype=key_dtype)
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), triton.cdiv(K, META['BLOCK_SIZE_K'] * META['K_BLOCKS'])
)
cvmm_backward_kernel3[grid](
x, grads, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
grads.stride(0), grads.stride(1),
out.stride(0), out.stride(1), out.stride(2),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32_out=out.dtype == torch.float32,
op_float16=op_float16,
allow_tf32=False #torch.backends.cuda.matmul.allow_tf32
)
return out
class CVMM(torch.autograd.Function):
warned = False
@staticmethod
def forward(ctx, x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_index: Optional[torch.Tensor] = None, reduction_weight: Optional[torch.Tensor] = None):
ctx.save_for_backward(x, keys, sel, sel_index, out_index, reduction_weight)
out_type = torch.float16 if torch.is_autocast_enabled() else x.dtype
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
res = cvmm_triton(x, sel_index, sel, keys, out_type, out_index)
if reduction_weight is not None:
res = res.view(*reduction_weight.shape, res.shape[-1])
res = (reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
ctx.op_type = out_type
ctx.keys_type = keys.dtype
ctx.is_autocast = torch.is_autocast_enabled()
return res
@staticmethod
def backward(ctx, grad_output):
x, keys, sel, sel_index, out_index, reduction_weight = ctx.saved_tensors
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
# grad_output = grad_output.half()
# x = x.type(ctx.op_type)
# keys_dt = keys.type_as(x)
keys_dt = keys
# Backward for weight
if reduction_weight is not None:
# Project back the grads with he reduction weight, so the grad for the weight matrix is ok
grad_output_w = reduction_weight.unsqueeze(-1).type_as(grad_output) @ grad_output.unsqueeze(-2)
else:
grad_output_w = grad_output
grad_w = cvmm_triton_backward(x, sel_index, sel, grad_output_w, keys_dt.shape[0], ctx.keys_type, ctx.is_autocast, out_index=out_index)
# Backward for input and reduction weight
grad_w_off = None
bw_index = sel_index if out_index is None else out_index
bw_index_out = None
if reduction_weight is not None:
# Hack the output indices to emulate repeats
bw_index_out = bw_index
bw_index = bw_index // reduction_weight.shape[-1]
grad_x_full = cvmm_triton(grad_output, bw_index, sel, keys_dt.transpose(1,2), ctx.op_type, bw_index_out)
grad_x_full = grad_x_full.view(*x.shape[:-1], -1, x.shape[-1])
if reduction_weight is not None:
# grad_x_full is the unscaled grad. For the input, we have to scale it, for the reduction wegiht,
# we have to compute dot products with the input.
grad_x = (reduction_weight.view(*grad_x_full.shape[:-1]).unsqueeze(-2).type_as(grad_x_full) @ grad_x_full).squeeze(-2)
grad_w_off = (grad_x_full.type_as(reduction_weight) @ x.unsqueeze(-1).type_as(reduction_weight)).squeeze(-1).view_as(reduction_weight)
elif grad_x_full.shape[-2] != 1:
grad_x = grad_x_full.sum(-2)
else:
grad_x = grad_x_full
grad_x = grad_x.view_as(x)
return grad_x, None, None, grad_w, None, grad_w_off
known_shapes = set()
def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)
def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:
# Has multiple selections for each batch element
n_per_batch = sel.shape[-1]
# indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)
# indices = indices.repeat_interleave(n_per_batch).flatten()
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
# in_index = indices[sel_index]
in_index = sel_index // n_per_batch
return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)
if __name__ == "__main__":
def cvmm_hack(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
res = CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, None)
if sel.reduction_weight is not None:
res = res.view(*sel.reduction_weight.shape, res.shape[-1])
res = (sel.reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
return res
def test_wsum():
n_experts = 2
n_channels = 3
expert_size = 3
bs = 2
# n_experts = 8
# n_channels = 64
# expert_size = 64
# bs = 32
# n_per_batch = 1
n_per_batch = 2
# reduction_factor = 2
reduction_factor = 1
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel_raw = torch.randint(0, n_experts, (bs,n_per_batch), dtype=torch.int32, device=device)
# w = torch.randn_like(sel, dtype=torch.float32)
w = torch.randn((bs // reduction_factor, n_per_batch * reduction_factor), dtype=torch.float32, device=device)
# sel = torch.tensor([[1,0]], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel2(sel_raw, w)
out = cvmm(testvec, sel, keys)
def cwmm_ref2(x: torch.Tensor, isel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(isel, CVMMSel):
sel = isel.raw_sel
getw = lambda b, c: (isel.reduction_weight[b, c] if isel.reduction_weight is not None else 1.0)
else:
sel = isel
getw = lambda b, c: 1.0
olist2 = []
for c in range(sel.shape[-1]):
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b, c]] * getw(b, c))
olist2.append(torch.cat(olist, dim=0))
res = torch.stack(olist2, dim=-2)
if isinstance(isel, CVMMSel) and isel.reduction_weight is not None:
res = res.sum(-2)
return res
ref = cwmm_ref2(testvec, sel, keys)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton and Torch match")
else:
print("❌ Multi-output: Triton and Torch differ")
def cvmm_ref_backward2(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
res = 0
for c in range(sel.shape[-1]):
gmats = []
for i in range(n_experts):
mask = sel[:, c] != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads[..., c, :], mask[:, None], 0)
gmats.append(sel_my @ grads_my)
res += torch.stack(gmats)
return res
grad_out = torch.randn(*out.shape, dtype=dtype, device=device)
keys_ref = keys.detach().clone().requires_grad_(True)
testvec_ref = testvec.detach().clone().requires_grad_(True)
w_ref = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_ref)
print("CVMM hack")
out_ref = cvmm_hack(testvec_ref, sel, keys_ref)
out_ref.backward(grad_out)
keys_full = keys.detach().clone().requires_grad_(True)
testvec_full = testvec.detach().clone().requires_grad_(True)
w_full = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_full)
print("CVMM full")
out_full = cvmm(testvec_full, sel, keys_full)
out_full.backward(grad_out)
if torch.allclose(keys_ref.grad, keys_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton weight grad ok")
else:
print("❌ Multi-output: Triton weight grad not ok")
if torch.allclose(testvec_ref.grad, testvec_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton input grad ok")
else:
print("❌ Multi-output: Triton input grad not ok")
if torch.allclose(w_ref.grad, w_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton reduction weight grad ok")
else:
print("❌ Multi-output: Triton reduction weight grad not ok")
# g = cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grad_out, keys.shape[0], keys.dtype, False, out_index=sel.out_index)
# gref = cvmm_ref_backward2(testvec, sel, grad_out, keys.shape[0])
# if torch.allclose(g, gref, atol=1e-2, rtol=0):
# print("✅ Multi-output: Triton grad ok")
# else:
# print("❌ Multi-output: Triton grad not ok")
from torch.autograd import gradcheck
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=1e-4)
print("Gradcheck ok.")
def test_module():
from torch.autograd import gradcheck
n_experts = 4
n_channels = 64
expert_size = 64
bs = 32
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
test_grad = torch.randn(bs, expert_size, dtype=dtype, device=device)
olist = []
for b in range(bs):
olist.append(testvec[b:b+1] @ keys[sel[b]])
ref = torch.cat(olist, dim=0)
out = cvmm(testvec, sel, keys)
assert torch.allclose(ref, out, atol=atol_tresh, rtol=0)
print("Forward ok.")
keys = keys.requires_grad_(True)
testvec = testvec.requires_grad_(True)
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=atol_tresh, rtol=0)
print("Backward ok.")
test_wsum()
# test_module()
def cwmm_ref(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(sel, CVMMSel):
sel = sel.raw_sel
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b]])
return torch.cat(olist, dim=0)
def test_forward():
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
keys = keys.transpose(1,2).contiguous().transpose(1,2)
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel(sel, keys.shape[0])
ref = cwmm_ref(testvec, sel, keys)
out = cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
keys = torch.nn.Parameter(torch.randn(n_experts, K, N, dtype=dtype, device=device))
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
keys = keys.transpose(1,2).contiguous().transpose(1,2)
# exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
sel = cvmm_prepare_sel(sel, keys.shape[0])
# ref = cwmm_ref(testvec, sel, keys)
# out = cvmm_triton(testvec, sel, keys)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec, keys[0]), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_forward()
def test_backward():
def cvmm_ref_backward(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
gmats = []
for i in range(n_experts):
mask = sel != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads, mask[:, None], 0)
gmats.append(sel_my @ grads_my)
return torch.stack(gmats)
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
# n_channels = 8
# expert_size = 8
# n_experts = 2
# bs=2
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
grads = torch.randn(bs, expert_size, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
# exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
# sel = torch.tensor([1, 0], dtype=torch.int32, device=device)
cvmmsel = cvmm_prepare_sel(sel, n_experts)
ref = cvmm_ref_backward(testvec, cvmmsel, grads, n_experts)
out = cvmm_triton_backward(testvec, cvmmsel.sel_index, cvmmsel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
# dtype = torch.float32
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
grads = torch.randn(bsz, N, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
sel = cvmm_prepare_sel(sel, n_experts)
# ref = cvmm_ref_backward(testvec, sel, grads, n_experts)
# out = cvmm_triton_backward(testvec, sel, grads, n_experts)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec.transpose(0,1), grads), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_backward()
# do_benchmark(1024, 80)
# do_benchmark(80, 1024)
# do_benchmark(512, 128)
|
@triton.jit
def cvmm_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bo, stride_bk, stride_bn,
stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32: tl.constexpr, allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_n = (pid % num_pid_in_group) // group_size_m
pid_m = first_pid_m + (pid % group_size_m)
# n_vects = tl.load(cnt_ptr + stride_cnt * matrix_id)
sel_first = tl.load(sel_ptr + pid_m * BLOCK_SIZE_M * stride_sel)
sel_last = tl.load(sel_ptr + (min((pid_m + 1) * BLOCK_SIZE_M, M) - 1) * stride_sel)
sel_all = tl.load(sel_ptr + stride_sel * ((pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M))
for matrix_id in range(sel_first, sel_last + 1):
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
remap_offs_am = tl.load(index_ptr + stride_index * offs_am)
# Create offset pointers
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (remap_offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + matrix_id * stride_bo + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
# if not float32:
# if not float32:
# b = b.to(tl.float16)
if not float32:
a = a.to(tl.float16)
b = b.to(tl.float16)
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if not float32:
c = accumulator.to(tl.float16)
else:
c = accumulator
# c = tl.full((BLOCK_SIZE_M, BLOCK_SIZE_N), index_offset_for_this_matrix+100, dtype=OUT_TYPE)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
# remap_offs_cm = index_ptr[offset_ptr[matrix_id] + offs_am]
# offs_cm = remap_offs_am
if out_index_ptr is not None:
remap_offs_cm = tl.load(out_index_ptr + stride_out_index * offs_am)
else:
remap_offs_cm = remap_offs_am
# remap_offs_cm = offs_cm
# remap_offs_cm = offs_am
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * remap_offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = ((offs_cm[:, None] < M) & (sel_all[:, None] == matrix_id)) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 4}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
],
key=['M', 'N', 'K', 'float32_out', 'allow_tf32', 'op_float16'], reset_to_zero = ['c_ptr']
)
|
RobertCsordas/moe_attention
|
layers/cvmm.py
|
https://github.com/RobertCsordas/moe_attention/blob/7169ad370e68185ecddb592c623b8d550de725df/layers/cvmm.py
|
from typing import Union, Optional
import torch
import math
from dataclasses import dataclass
from torch.cuda.amp import custom_fwd, custom_bwd
import triton
import triton.language as tl
@dataclass
class CVMMSel:
raw_sel: torch.Tensor
sel: torch.Tensor
sel_index: torch.Tensor
out_index: Optional[torch.Tensor] = None
reduction_weight: Optional[torch.Tensor] = None
def clone(self) -> 'CVMMSel':
return CVMMSel(self.raw_sel, self.sel, self.sel_index, self.out_index, self.reduction_weight)
def cvmm_prepare_sel(sel: torch.Tensor, n_experts: Optional[int] = None) -> CVMMSel:
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
return CVMMSel(sel, ssel.view_as(sel), sel_index, None)
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4),
],
key=['M', 'N', 'K', 'float32', 'allow_tf32']
)
@triton.jit
def cvmm_kernel(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bo, stride_bk, stride_bn,
stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32: tl.constexpr, allow_tf32: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_n = (pid % num_pid_in_group) // group_size_m
pid_m = first_pid_m + (pid % group_size_m)
# n_vects = tl.load(cnt_ptr + stride_cnt * matrix_id)
sel_first = tl.load(sel_ptr + pid_m * BLOCK_SIZE_M * stride_sel)
sel_last = tl.load(sel_ptr + (min((pid_m + 1) * BLOCK_SIZE_M, M) - 1) * stride_sel)
sel_all = tl.load(sel_ptr + stride_sel * ((pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M))
for matrix_id in range(sel_first, sel_last + 1):
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
remap_offs_am = tl.load(index_ptr + stride_index * offs_am)
# Create offset pointers
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (remap_offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + matrix_id * stride_bo + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
# if not float32:
# if not float32:
# b = b.to(tl.float16)
if not float32:
a = a.to(tl.float16)
b = b.to(tl.float16)
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
if not float32:
c = accumulator.to(tl.float16)
else:
c = accumulator
# c = tl.full((BLOCK_SIZE_M, BLOCK_SIZE_N), index_offset_for_this_matrix+100, dtype=OUT_TYPE)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
# remap_offs_cm = index_ptr[offset_ptr[matrix_id] + offs_am]
# offs_cm = remap_offs_am
if out_index_ptr is not None:
remap_offs_cm = tl.load(out_index_ptr + stride_out_index * offs_am)
else:
remap_offs_cm = remap_offs_am
# remap_offs_cm = offs_cm
# remap_offs_cm = offs_am
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * remap_offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = ((offs_cm[:, None] < M) & (sel_all[:, None] == matrix_id)) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
@triton.autotune(
configs=[
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 4}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
# triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 128}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 8}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 16}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 64}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 8, 'K_BLOCKS': 32}, num_stages=4, num_warps=4),
],
key=['M', 'N', 'K', 'float32_out', 'allow_tf32', 'op_float16'], reset_to_zero = ['c_ptr']
)
@triton.jit
def cvmm_backward_kernel3(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bk, stride_bn,
stride_co, stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32_out: tl.constexpr, allow_tf32: tl.constexpr, op_float16: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, K_BLOCKS: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
k_block_id = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
a_ptrs_this = a_ptr + offs_am[:, None] * stride_am
b_ptrs_this = b_ptr + offs_bn[None, :] * stride_bn
# Kactual = end_i - start_i
# Nblocks = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# WORK_PER_WORKER = (Nblocks + K_BLOCKS - 1) // K_BLOCKS
# WORK_PER_WORKER = WORK_PER_WORKER if WORK_PER_WORKER > MIN_WORK_SIZE else MIN_WORK_SIZE
# # Kloop_start = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# first_block_k = k_block_id * WORK_PER_WORKER
# last_block_k = min((k_block_id+1) * WORK_PER_WORKER, Nblocks)
block_start_index = k_block_id * BLOCK_SIZE_K * K_BLOCKS
block_end_index = min(block_start_index + BLOCK_SIZE_K * K_BLOCKS, K) - 1
first_mat = tl.load(sel_ptr + stride_sel * block_start_index)
last_mat = tl.load(sel_ptr + stride_sel * block_end_index)
for matrix_index in range(first_mat, last_mat + 1):
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
start_i = block_start_index
end_i = block_end_index + 1
while start_i < end_i:
middle = (start_i + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix < matrix_index:
start_i = middle + 1
else:
end_i = middle
# # Continue binary search: find the first matrix that is > matrix_index
start_i2 = start_i
end_i = block_end_index + 1
while start_i2 < end_i:
middle = (start_i2 + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix <= matrix_index:
start_i2 = middle + 1
else:
end_i = middle
end_i = start_i2
count = end_i - start_i
block_mem_indices_f_base = start_i + tl.arange(0, BLOCK_SIZE_K)
if count > 0:
for k in range((count + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K):
# block_mem_indices = (k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)) % K
block_mem_indices_f = block_mem_indices_f_base + k * BLOCK_SIZE_K
block_mem_indices = block_mem_indices_f % K
a_index = tl.load(index_ptr + stride_index * block_mem_indices)
if out_index_ptr is not None:
b_index = tl.load(out_index_ptr + stride_out_index * block_mem_indices)
else:
b_index = a_index
sel_ok = block_mem_indices_f < end_i
a_ptrs = a_ptrs_this + a_index[None, :] * stride_ak
b_ptrs = b_ptrs_this + b_index[:, None] * stride_bk
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=sel_ok[None, :], other=0.0)
b = tl.load(b_ptrs, mask=sel_ok[:, None], other=0.0)
if op_float16:
a = a.to(tl.float16)
b = b.to(tl.float16)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
if float32_out:
c = accumulator
else:
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_co * matrix_index + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
# tl.store(c_ptrs, c, mask=c_mask)
tl.atomic_add(c_ptrs, c, mask=c_mask)
def cvmm_triton(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_dtype: torch.dtype, out_index: Optional[torch.Tensor] = None):
xorig = x
x = x.flatten(end_dim=-2)
assert x.shape[-1] == keys.shape[1]
sel_shape = sel.shape
sel = sel.flatten()
M = sel.shape[0]
O, K, N = keys.shape
# Allocates output.
out = torch.empty((M, N), device=x.device, dtype=out_dtype)
# out = torch.zeros((M, N), device=x.device, dtype=out_dtype)
# 1D launch kernel where each block gets its own program.
# expected_m_per_matrix = int(math.ceil(M / O * 1.5))
# expected_m_per_matrix = M
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
cvmm_kernel[grid](
x, keys, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
keys.stride(0), keys.stride(1), keys.stride(2),
out.stride(0), out.stride(1),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32=out.dtype==torch.float32, allow_tf32=False, #torch.backends.cuda.matmul.allow_tf32
)
return out.view(*sel_shape, N)
def cvmm_triton_backward(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, grads: torch.Tensor, n_experts: int, key_dtype: torch.dtype,
op_float16: bool, out_index: Optional[torch.Tensor] = None):
x = x.flatten(end_dim=-2)
x = x.transpose(0, 1)
grads = grads.flatten(end_dim=-2)
sel = sel.flatten()
M, _ = x.shape
K, N = grads.shape
out = torch.zeros((n_experts, M, N), device=x.device, dtype=key_dtype)
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), triton.cdiv(K, META['BLOCK_SIZE_K'] * META['K_BLOCKS'])
)
cvmm_backward_kernel3[grid](
x, grads, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
grads.stride(0), grads.stride(1),
out.stride(0), out.stride(1), out.stride(2),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32_out=out.dtype == torch.float32,
op_float16=op_float16,
allow_tf32=False #torch.backends.cuda.matmul.allow_tf32
)
return out
class CVMM(torch.autograd.Function):
warned = False
@staticmethod
def forward(ctx, x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_index: Optional[torch.Tensor] = None, reduction_weight: Optional[torch.Tensor] = None):
ctx.save_for_backward(x, keys, sel, sel_index, out_index, reduction_weight)
out_type = torch.float16 if torch.is_autocast_enabled() else x.dtype
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
res = cvmm_triton(x, sel_index, sel, keys, out_type, out_index)
if reduction_weight is not None:
res = res.view(*reduction_weight.shape, res.shape[-1])
res = (reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
ctx.op_type = out_type
ctx.keys_type = keys.dtype
ctx.is_autocast = torch.is_autocast_enabled()
return res
@staticmethod
def backward(ctx, grad_output):
x, keys, sel, sel_index, out_index, reduction_weight = ctx.saved_tensors
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
# grad_output = grad_output.half()
# x = x.type(ctx.op_type)
# keys_dt = keys.type_as(x)
keys_dt = keys
# Backward for weight
if reduction_weight is not None:
# Project back the grads with he reduction weight, so the grad for the weight matrix is ok
grad_output_w = reduction_weight.unsqueeze(-1).type_as(grad_output) @ grad_output.unsqueeze(-2)
else:
grad_output_w = grad_output
grad_w = cvmm_triton_backward(x, sel_index, sel, grad_output_w, keys_dt.shape[0], ctx.keys_type, ctx.is_autocast, out_index=out_index)
# Backward for input and reduction weight
grad_w_off = None
bw_index = sel_index if out_index is None else out_index
bw_index_out = None
if reduction_weight is not None:
# Hack the output indices to emulate repeats
bw_index_out = bw_index
bw_index = bw_index // reduction_weight.shape[-1]
grad_x_full = cvmm_triton(grad_output, bw_index, sel, keys_dt.transpose(1,2), ctx.op_type, bw_index_out)
grad_x_full = grad_x_full.view(*x.shape[:-1], -1, x.shape[-1])
if reduction_weight is not None:
# grad_x_full is the unscaled grad. For the input, we have to scale it, for the reduction wegiht,
# we have to compute dot products with the input.
grad_x = (reduction_weight.view(*grad_x_full.shape[:-1]).unsqueeze(-2).type_as(grad_x_full) @ grad_x_full).squeeze(-2)
grad_w_off = (grad_x_full.type_as(reduction_weight) @ x.unsqueeze(-1).type_as(reduction_weight)).squeeze(-1).view_as(reduction_weight)
elif grad_x_full.shape[-2] != 1:
grad_x = grad_x_full.sum(-2)
else:
grad_x = grad_x_full
grad_x = grad_x.view_as(x)
return grad_x, None, None, grad_w, None, grad_w_off
known_shapes = set()
def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)
def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:
# Has multiple selections for each batch element
n_per_batch = sel.shape[-1]
# indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)
# indices = indices.repeat_interleave(n_per_batch).flatten()
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
# in_index = indices[sel_index]
in_index = sel_index // n_per_batch
return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)
if __name__ == "__main__":
def cvmm_hack(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
res = CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, None)
if sel.reduction_weight is not None:
res = res.view(*sel.reduction_weight.shape, res.shape[-1])
res = (sel.reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
return res
def test_wsum():
n_experts = 2
n_channels = 3
expert_size = 3
bs = 2
# n_experts = 8
# n_channels = 64
# expert_size = 64
# bs = 32
# n_per_batch = 1
n_per_batch = 2
# reduction_factor = 2
reduction_factor = 1
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel_raw = torch.randint(0, n_experts, (bs,n_per_batch), dtype=torch.int32, device=device)
# w = torch.randn_like(sel, dtype=torch.float32)
w = torch.randn((bs // reduction_factor, n_per_batch * reduction_factor), dtype=torch.float32, device=device)
# sel = torch.tensor([[1,0]], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel2(sel_raw, w)
out = cvmm(testvec, sel, keys)
def cwmm_ref2(x: torch.Tensor, isel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(isel, CVMMSel):
sel = isel.raw_sel
getw = lambda b, c: (isel.reduction_weight[b, c] if isel.reduction_weight is not None else 1.0)
else:
sel = isel
getw = lambda b, c: 1.0
olist2 = []
for c in range(sel.shape[-1]):
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b, c]] * getw(b, c))
olist2.append(torch.cat(olist, dim=0))
res = torch.stack(olist2, dim=-2)
if isinstance(isel, CVMMSel) and isel.reduction_weight is not None:
res = res.sum(-2)
return res
ref = cwmm_ref2(testvec, sel, keys)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton and Torch match")
else:
print("❌ Multi-output: Triton and Torch differ")
def cvmm_ref_backward2(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
res = 0
for c in range(sel.shape[-1]):
gmats = []
for i in range(n_experts):
mask = sel[:, c] != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads[..., c, :], mask[:, None], 0)
gmats.append(sel_my @ grads_my)
res += torch.stack(gmats)
return res
grad_out = torch.randn(*out.shape, dtype=dtype, device=device)
keys_ref = keys.detach().clone().requires_grad_(True)
testvec_ref = testvec.detach().clone().requires_grad_(True)
w_ref = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_ref)
print("CVMM hack")
out_ref = cvmm_hack(testvec_ref, sel, keys_ref)
out_ref.backward(grad_out)
keys_full = keys.detach().clone().requires_grad_(True)
testvec_full = testvec.detach().clone().requires_grad_(True)
w_full = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_full)
print("CVMM full")
out_full = cvmm(testvec_full, sel, keys_full)
out_full.backward(grad_out)
if torch.allclose(keys_ref.grad, keys_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton weight grad ok")
else:
print("❌ Multi-output: Triton weight grad not ok")
if torch.allclose(testvec_ref.grad, testvec_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton input grad ok")
else:
print("❌ Multi-output: Triton input grad not ok")
if torch.allclose(w_ref.grad, w_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton reduction weight grad ok")
else:
print("❌ Multi-output: Triton reduction weight grad not ok")
# g = cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grad_out, keys.shape[0], keys.dtype, False, out_index=sel.out_index)
# gref = cvmm_ref_backward2(testvec, sel, grad_out, keys.shape[0])
# if torch.allclose(g, gref, atol=1e-2, rtol=0):
# print("✅ Multi-output: Triton grad ok")
# else:
# print("❌ Multi-output: Triton grad not ok")
from torch.autograd import gradcheck
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=1e-4)
print("Gradcheck ok.")
def test_module():
from torch.autograd import gradcheck
n_experts = 4
n_channels = 64
expert_size = 64
bs = 32
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
test_grad = torch.randn(bs, expert_size, dtype=dtype, device=device)
olist = []
for b in range(bs):
olist.append(testvec[b:b+1] @ keys[sel[b]])
ref = torch.cat(olist, dim=0)
out = cvmm(testvec, sel, keys)
assert torch.allclose(ref, out, atol=atol_tresh, rtol=0)
print("Forward ok.")
keys = keys.requires_grad_(True)
testvec = testvec.requires_grad_(True)
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=atol_tresh, rtol=0)
print("Backward ok.")
test_wsum()
# test_module()
def cwmm_ref(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(sel, CVMMSel):
sel = sel.raw_sel
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b]])
return torch.cat(olist, dim=0)
def test_forward():
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
keys = keys.transpose(1,2).contiguous().transpose(1,2)
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel(sel, keys.shape[0])
ref = cwmm_ref(testvec, sel, keys)
out = cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
keys = torch.nn.Parameter(torch.randn(n_experts, K, N, dtype=dtype, device=device))
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
keys = keys.transpose(1,2).contiguous().transpose(1,2)
# exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
sel = cvmm_prepare_sel(sel, keys.shape[0])
# ref = cwmm_ref(testvec, sel, keys)
# out = cvmm_triton(testvec, sel, keys)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec, keys[0]), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_forward()
def test_backward():
def cvmm_ref_backward(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
gmats = []
for i in range(n_experts):
mask = sel != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads, mask[:, None], 0)
gmats.append(sel_my @ grads_my)
return torch.stack(gmats)
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
# n_channels = 8
# expert_size = 8
# n_experts = 2
# bs=2
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
grads = torch.randn(bs, expert_size, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
# exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
# sel = torch.tensor([1, 0], dtype=torch.int32, device=device)
cvmmsel = cvmm_prepare_sel(sel, n_experts)
ref = cvmm_ref_backward(testvec, cvmmsel, grads, n_experts)
out = cvmm_triton_backward(testvec, cvmmsel.sel_index, cvmmsel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
# dtype = torch.float32
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
grads = torch.randn(bsz, N, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
sel = cvmm_prepare_sel(sel, n_experts)
# ref = cvmm_ref_backward(testvec, sel, grads, n_experts)
# out = cvmm_triton_backward(testvec, sel, grads, n_experts)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec.transpose(0,1), grads), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_backward()
# do_benchmark(1024, 80)
# do_benchmark(80, 1024)
# do_benchmark(512, 128)
|
@triton.jit
def cvmm_backward_kernel3(
# Pointers to matrices
a_ptr, b_ptr, c_ptr, index_ptr, sel_ptr, out_index_ptr,
# Matrix dimensions
M, N, K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am, stride_ak,
stride_bk, stride_bn,
stride_co, stride_cm, stride_cn,
stride_index, stride_sel, stride_out_index,
float32_out: tl.constexpr, allow_tf32: tl.constexpr, op_float16: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, K_BLOCKS: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
k_block_id = tl.program_id(axis=1)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
a_ptrs_this = a_ptr + offs_am[:, None] * stride_am
b_ptrs_this = b_ptr + offs_bn[None, :] * stride_bn
# Kactual = end_i - start_i
# Nblocks = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# WORK_PER_WORKER = (Nblocks + K_BLOCKS - 1) // K_BLOCKS
# WORK_PER_WORKER = WORK_PER_WORKER if WORK_PER_WORKER > MIN_WORK_SIZE else MIN_WORK_SIZE
# # Kloop_start = (Kactual + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K
# first_block_k = k_block_id * WORK_PER_WORKER
# last_block_k = min((k_block_id+1) * WORK_PER_WORKER, Nblocks)
block_start_index = k_block_id * BLOCK_SIZE_K * K_BLOCKS
block_end_index = min(block_start_index + BLOCK_SIZE_K * K_BLOCKS, K) - 1
first_mat = tl.load(sel_ptr + stride_sel * block_start_index)
last_mat = tl.load(sel_ptr + stride_sel * block_end_index)
for matrix_index in range(first_mat, last_mat + 1):
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
start_i = block_start_index
end_i = block_end_index + 1
while start_i < end_i:
middle = (start_i + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix < matrix_index:
start_i = middle + 1
else:
end_i = middle
# # Continue binary search: find the first matrix that is > matrix_index
start_i2 = start_i
end_i = block_end_index + 1
while start_i2 < end_i:
middle = (start_i2 + end_i) // 2
middle_matrix = tl.load(sel_ptr + middle * stride_sel)
if middle_matrix <= matrix_index:
start_i2 = middle + 1
else:
end_i = middle
end_i = start_i2
count = end_i - start_i
block_mem_indices_f_base = start_i + tl.arange(0, BLOCK_SIZE_K)
if count > 0:
for k in range((count + BLOCK_SIZE_K - 1) // BLOCK_SIZE_K):
# block_mem_indices = (k * BLOCK_SIZE_K + tl.arange(0, BLOCK_SIZE_K)) % K
block_mem_indices_f = block_mem_indices_f_base + k * BLOCK_SIZE_K
block_mem_indices = block_mem_indices_f % K
a_index = tl.load(index_ptr + stride_index * block_mem_indices)
if out_index_ptr is not None:
b_index = tl.load(out_index_ptr + stride_out_index * block_mem_indices)
else:
b_index = a_index
sel_ok = block_mem_indices_f < end_i
a_ptrs = a_ptrs_this + a_index[None, :] * stride_ak
b_ptrs = b_ptrs_this + b_index[:, None] * stride_bk
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=sel_ok[None, :], other=0.0)
b = tl.load(b_ptrs, mask=sel_ok[:, None], other=0.0)
if op_float16:
a = a.to(tl.float16)
b = b.to(tl.float16)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b, allow_tf32=allow_tf32)
if float32_out:
c = accumulator
else:
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_co * matrix_index + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
# tl.store(c_ptrs, c, mask=c_mask)
tl.atomic_add(c_ptrs, c, mask=c_mask)
def cvmm_triton(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_dtype: torch.dtype, out_index: Optional[torch.Tensor] = None):
xorig = x
x = x.flatten(end_dim=-2)
assert x.shape[-1] == keys.shape[1]
sel_shape = sel.shape
sel = sel.flatten()
M = sel.shape[0]
O, K, N = keys.shape
# Allocates output.
out = torch.empty((M, N), device=x.device, dtype=out_dtype)
# out = torch.zeros((M, N), device=x.device, dtype=out_dtype)
# 1D launch kernel where each block gets its own program.
# expected_m_per_matrix = int(math.ceil(M / O * 1.5))
# expected_m_per_matrix = M
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']),
)
cvmm_kernel[grid](
x, keys, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
keys.stride(0), keys.stride(1), keys.stride(2),
out.stride(0), out.stride(1),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32=out.dtype==torch.float32, allow_tf32=False, #torch.backends.cuda.matmul.allow_tf32
)
return out.view(*sel_shape, N)
def cvmm_triton_backward(x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, grads: torch.Tensor, n_experts: int, key_dtype: torch.dtype,
op_float16: bool, out_index: Optional[torch.Tensor] = None):
x = x.flatten(end_dim=-2)
x = x.transpose(0, 1)
grads = grads.flatten(end_dim=-2)
sel = sel.flatten()
M, _ = x.shape
K, N = grads.shape
out = torch.zeros((n_experts, M, N), device=x.device, dtype=key_dtype)
grid = lambda META: (
triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), triton.cdiv(K, META['BLOCK_SIZE_K'] * META['K_BLOCKS'])
)
cvmm_backward_kernel3[grid](
x, grads, out, sel_index, sel, out_index,
M, N, K,
x.stride(0), x.stride(1),
grads.stride(0), grads.stride(1),
out.stride(0), out.stride(1), out.stride(2),
sel_index.stride(0), sel.stride(0), out_index.stride(0) if out_index is not None else 0,
float32_out=out.dtype == torch.float32,
op_float16=op_float16,
allow_tf32=False #torch.backends.cuda.matmul.allow_tf32
)
return out
class CVMM(torch.autograd.Function):
warned = False
@staticmethod
def forward(ctx, x: torch.Tensor, sel_index: torch.Tensor, sel: torch.Tensor, keys: torch.Tensor, out_index: Optional[torch.Tensor] = None, reduction_weight: Optional[torch.Tensor] = None):
ctx.save_for_backward(x, keys, sel, sel_index, out_index, reduction_weight)
out_type = torch.float16 if torch.is_autocast_enabled() else x.dtype
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
res = cvmm_triton(x, sel_index, sel, keys, out_type, out_index)
if reduction_weight is not None:
res = res.view(*reduction_weight.shape, res.shape[-1])
res = (reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
ctx.op_type = out_type
ctx.keys_type = keys.dtype
ctx.is_autocast = torch.is_autocast_enabled()
return res
@staticmethod
def backward(ctx, grad_output):
x, keys, sel, sel_index, out_index, reduction_weight = ctx.saved_tensors
# if torch.is_autocast_enabled():
# x = x.half()
# keys = keys.half()
# grad_output = grad_output.half()
# x = x.type(ctx.op_type)
# keys_dt = keys.type_as(x)
keys_dt = keys
# Backward for weight
if reduction_weight is not None:
# Project back the grads with he reduction weight, so the grad for the weight matrix is ok
grad_output_w = reduction_weight.unsqueeze(-1).type_as(grad_output) @ grad_output.unsqueeze(-2)
else:
grad_output_w = grad_output
grad_w = cvmm_triton_backward(x, sel_index, sel, grad_output_w, keys_dt.shape[0], ctx.keys_type, ctx.is_autocast, out_index=out_index)
# Backward for input and reduction weight
grad_w_off = None
bw_index = sel_index if out_index is None else out_index
bw_index_out = None
if reduction_weight is not None:
# Hack the output indices to emulate repeats
bw_index_out = bw_index
bw_index = bw_index // reduction_weight.shape[-1]
grad_x_full = cvmm_triton(grad_output, bw_index, sel, keys_dt.transpose(1,2), ctx.op_type, bw_index_out)
grad_x_full = grad_x_full.view(*x.shape[:-1], -1, x.shape[-1])
if reduction_weight is not None:
# grad_x_full is the unscaled grad. For the input, we have to scale it, for the reduction wegiht,
# we have to compute dot products with the input.
grad_x = (reduction_weight.view(*grad_x_full.shape[:-1]).unsqueeze(-2).type_as(grad_x_full) @ grad_x_full).squeeze(-2)
grad_w_off = (grad_x_full.type_as(reduction_weight) @ x.unsqueeze(-1).type_as(reduction_weight)).squeeze(-1).view_as(reduction_weight)
elif grad_x_full.shape[-2] != 1:
grad_x = grad_x_full.sum(-2)
else:
grad_x = grad_x_full
grad_x = grad_x.view_as(x)
return grad_x, None, None, grad_w, None, grad_w_off
known_shapes = set()
def cvmm(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
return CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, sel.reduction_weight)
def cvmm_prepare_sel2(sel: torch.Tensor, w: Optional[torch.Tensor] = None) -> CVMMSel:
# Has multiple selections for each batch element
n_per_batch = sel.shape[-1]
# indices = torch.arange(sel.nelement() // n_per_batch, device=sel.device, dtype=torch.int32)
# indices = indices.repeat_interleave(n_per_batch).flatten()
fsel = sel.flatten()
ssel, sel_index = fsel.sort()
# in_index = indices[sel_index]
in_index = sel_index // n_per_batch
return CVMMSel(sel, ssel.view_as(sel), in_index, sel_index, w)
if __name__ == "__main__":
def cvmm_hack(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if not isinstance(sel, CVMMSel):
sel = cvmm_prepare_sel(sel, keys.shape[0])
sh = (x.shape, keys.shape)
if sh not in known_shapes:
print("New shape:", sh)
known_shapes.add(sh)
res = CVMM.apply(x, sel.sel_index, sel.sel, keys, sel.out_index, None)
if sel.reduction_weight is not None:
res = res.view(*sel.reduction_weight.shape, res.shape[-1])
res = (sel.reduction_weight.unsqueeze(-2).type_as(res) @ res).squeeze(-2)
return res
def test_wsum():
n_experts = 2
n_channels = 3
expert_size = 3
bs = 2
# n_experts = 8
# n_channels = 64
# expert_size = 64
# bs = 32
# n_per_batch = 1
n_per_batch = 2
# reduction_factor = 2
reduction_factor = 1
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel_raw = torch.randint(0, n_experts, (bs,n_per_batch), dtype=torch.int32, device=device)
# w = torch.randn_like(sel, dtype=torch.float32)
w = torch.randn((bs // reduction_factor, n_per_batch * reduction_factor), dtype=torch.float32, device=device)
# sel = torch.tensor([[1,0]], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel2(sel_raw, w)
out = cvmm(testvec, sel, keys)
def cwmm_ref2(x: torch.Tensor, isel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(isel, CVMMSel):
sel = isel.raw_sel
getw = lambda b, c: (isel.reduction_weight[b, c] if isel.reduction_weight is not None else 1.0)
else:
sel = isel
getw = lambda b, c: 1.0
olist2 = []
for c in range(sel.shape[-1]):
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b, c]] * getw(b, c))
olist2.append(torch.cat(olist, dim=0))
res = torch.stack(olist2, dim=-2)
if isinstance(isel, CVMMSel) and isel.reduction_weight is not None:
res = res.sum(-2)
return res
ref = cwmm_ref2(testvec, sel, keys)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton and Torch match")
else:
print("❌ Multi-output: Triton and Torch differ")
def cvmm_ref_backward2(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
res = 0
for c in range(sel.shape[-1]):
gmats = []
for i in range(n_experts):
mask = sel[:, c] != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads[..., c, :], mask[:, None], 0)
gmats.append(sel_my @ grads_my)
res += torch.stack(gmats)
return res
grad_out = torch.randn(*out.shape, dtype=dtype, device=device)
keys_ref = keys.detach().clone().requires_grad_(True)
testvec_ref = testvec.detach().clone().requires_grad_(True)
w_ref = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_ref)
print("CVMM hack")
out_ref = cvmm_hack(testvec_ref, sel, keys_ref)
out_ref.backward(grad_out)
keys_full = keys.detach().clone().requires_grad_(True)
testvec_full = testvec.detach().clone().requires_grad_(True)
w_full = w.detach().clone().requires_grad_(True)
sel = cvmm_prepare_sel2(sel_raw, w_full)
print("CVMM full")
out_full = cvmm(testvec_full, sel, keys_full)
out_full.backward(grad_out)
if torch.allclose(keys_ref.grad, keys_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton weight grad ok")
else:
print("❌ Multi-output: Triton weight grad not ok")
if torch.allclose(testvec_ref.grad, testvec_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton input grad ok")
else:
print("❌ Multi-output: Triton input grad not ok")
if torch.allclose(w_ref.grad, w_full.grad, atol=1e-2, rtol=0):
print("✅ Multi-output: Triton reduction weight grad ok")
else:
print("❌ Multi-output: Triton reduction weight grad not ok")
# g = cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grad_out, keys.shape[0], keys.dtype, False, out_index=sel.out_index)
# gref = cvmm_ref_backward2(testvec, sel, grad_out, keys.shape[0])
# if torch.allclose(g, gref, atol=1e-2, rtol=0):
# print("✅ Multi-output: Triton grad ok")
# else:
# print("❌ Multi-output: Triton grad not ok")
from torch.autograd import gradcheck
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=1e-4)
print("Gradcheck ok.")
def test_module():
from torch.autograd import gradcheck
n_experts = 4
n_channels = 64
expert_size = 64
bs = 32
device = torch.device("cuda")
dtype = torch.float32
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
test_grad = torch.randn(bs, expert_size, dtype=dtype, device=device)
olist = []
for b in range(bs):
olist.append(testvec[b:b+1] @ keys[sel[b]])
ref = torch.cat(olist, dim=0)
out = cvmm(testvec, sel, keys)
assert torch.allclose(ref, out, atol=atol_tresh, rtol=0)
print("Forward ok.")
keys = keys.requires_grad_(True)
testvec = testvec.requires_grad_(True)
assert gradcheck(cvmm, (testvec, sel, keys), eps=1e-2, atol=atol_tresh, rtol=0)
print("Backward ok.")
test_wsum()
# test_module()
def cwmm_ref(x: torch.Tensor, sel: Union[torch.Tensor, CVMMSel], keys: torch.Tensor):
if isinstance(sel, CVMMSel):
sel = sel.raw_sel
olist = []
for b in range(x.shape[0]):
olist.append(x[b:b+1] @ keys[sel[b]])
return torch.cat(olist, dim=0)
def test_forward():
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
keys = torch.nn.Parameter(torch.randn(n_experts, n_channels, expert_size, dtype=dtype, device=device))
keys = keys.transpose(1,2).contiguous().transpose(1,2)
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
sel = cvmm_prepare_sel(sel, keys.shape[0])
ref = cwmm_ref(testvec, sel, keys)
out = cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
keys = torch.nn.Parameter(torch.randn(n_experts, K, N, dtype=dtype, device=device))
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
keys = keys.transpose(1,2).contiguous().transpose(1,2)
# exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
sel = cvmm_prepare_sel(sel, keys.shape[0])
# ref = cwmm_ref(testvec, sel, keys)
# out = cvmm_triton(testvec, sel, keys)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec, keys[0]), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton(testvec, sel.sel_index, sel.sel, keys, dtype), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_forward()
def test_backward():
def cvmm_ref_backward(x: torch.Tensor, sel: CVMMSel, grads: torch.Tensor, n_experts: int):
sel = sel.raw_sel
x = x.flatten(end_dim=-2).transpose(0,1)
gmats = []
for i in range(n_experts):
mask = sel != i
sel_my = torch.masked_fill(x, mask[None], 0)
grads_my = torch.masked_fill(grads, mask[:, None], 0)
gmats.append(sel_my @ grads_my)
return torch.stack(gmats)
torch.manual_seed(0)
n_experts = 8
n_channels = 64
expert_size = 64
bs = 64
# n_channels = 8
# expert_size = 8
# n_experts = 2
# bs=2
device = torch.device("cuda")
dtype = torch.float16
atol_tresh = 1e-2
testvec = torch.randn(bs, n_channels, dtype=dtype, device=device)
grads = torch.randn(bs, expert_size, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
# exp_sel = torch.distributions.Geometric(0.02).sample((bs,)).to(device).clamp(max=n_experts-1).int()
# exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
# sel = exp_sel
# sel = torch.tensor([0, 1], dtype=torch.int32, device=device)
# sel = torch.tensor([1, 0], dtype=torch.int32, device=device)
cvmmsel = cvmm_prepare_sel(sel, n_experts)
ref = cvmm_ref_backward(testvec, cvmmsel, grads, n_experts)
out = cvmm_triton_backward(testvec, cvmmsel.sel_index, cvmmsel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16)
if torch.allclose(out, ref, atol=1e-2, rtol=0):
print("✅ Triton and Torch match")
else:
print("❌ Triton and Torch differ")
def do_benchmark(K, N):
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['bsz'], # Argument names to use as an x-axis for the plot
x_vals=[
2048 * (i+2) for i in range(0, 32, 8)
]+[131072], # Different possible values for `x_name`
line_arg='provider', # Argument name whose value corresponds to a different line in the plot
# Possible values for `line_arg`
line_vals=['cublas', 'triton'],
# Label name for the lines
line_names=["cuBLAS", "Triton"],
# Line styles
styles=[('green', '-'), ('blue', '-')],
ylabel="TFLOPS", # Label name for the y-axis
plot_name="matmul-performance", # Name for the plot, used also as a file name for saving the plot.
args={},
)
)
def benchmark(bsz, provider):
# a = torch.randn((M, K), device='cuda', dtype=torch.float16)
# b = torch.randn((K, N), device='cuda', dtype=torch.float16)
dtype = torch.float32 if provider == 'cuda' else torch.float16
# dtype = torch.float32
sel = torch.randint(0, n_experts, (bs,), dtype=torch.int32, device=device)
testvec = torch.randn(bsz, K, dtype=dtype, device=device)
grads = torch.randn(bsz, N, dtype=dtype, device=device)
sel = torch.randint(0, n_experts, (bsz,), dtype=torch.int32, device=device)
exp_sel = torch.distributions.Geometric(0.02).sample((bsz,)).to(device).clamp(max=n_experts-1).int()
exp_sel = torch.randperm(n_experts, device=device, dtype=torch.int32)[exp_sel]
sel = exp_sel
sel = cvmm_prepare_sel(sel, n_experts)
# ref = cvmm_ref_backward(testvec, sel, grads, n_experts)
# out = cvmm_triton_backward(testvec, sel, grads, n_experts)
# if torch.allclose(out, ref, atol=5e-2, rtol=0):
# print("✅ Triton and Torch match")
# else:
# print("❌ Triton and Torch differ")
quantiles = [0.5, 0.2, 0.8]
if provider == 'cublas':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(testvec.transpose(0,1), grads), quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm_triton_backward(testvec, sel.sel_index, sel.sel, grads, n_experts, key_dtype=ref.dtype, op_float16=dtype==torch.float16), quantiles=quantiles)
# if provider == 'cuda':
# ms, min_ms, max_ms = triton.testing.do_bench(lambda: cvmm(testvec, sel, keys), quantiles=quantiles)
perf = lambda ms: 2 * bsz * N * K * 1e-12 / (ms * 1e-3)
# perf = lambda x: x
return perf(ms), perf(max_ms), perf(min_ms)
print(f"Benchmark: [bsz, {K}] @ [{n_experts}, {K}, {N}] -> [bsz, {N}]")
benchmark.run(show_plots=True, print_data=True)
do_benchmark(128, 512)
do_benchmark(256, 512)
do_benchmark(512, 128)
test_backward()
# do_benchmark(1024, 80)
# do_benchmark(80, 1024)
# do_benchmark(512, 128)
|
neuro-ml/kerops
|
kerops/kernels/dw_conv.py
|
https://github.com/neuro-ml/kerops/blob/6e2128bd77d894a5faf33cfd4ed3e2e7a09d99b0/kerops/kernels/dw_conv.py
|
import triton
import triton.language as tl
@triton.jit
def _DWConv_cl3d_impl(
input_ptr,
weight_ptr,
output_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
output_ptr += D_cell * D_block * channels
input_ptr += D_cell * D_block * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[:, None, None] * channels + channels_offset[None, None, :] + near_offset[None, :, None] * channels
mask = d_offset[:, None, None] + near_offset[None, :, None] < D - D_block * D_cell
mask = mask and (d_offset[:, None, None] + near_offset[None, :, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[None, :, None] != 2)
weight_offset = channels_offset[None, None, :] + tl.arange(0, 4)[None, :, None] * channels
weight_mask = tl.arange(0, 4)[None, :, None] != 3
weight_h0_w0 = tl.load(weight_ptr + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w1 = tl.load((weight_ptr + 3 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w2 = tl.load((weight_ptr + 6 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w0 = tl.load((weight_ptr + 9 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w1 = tl.load((weight_ptr + 12 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w2 = tl.load((weight_ptr + 15 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w0 = tl.load((weight_ptr + 18 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w1 = tl.load((weight_ptr + 21 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w2 = tl.load((weight_ptr + 24 * channels) + weight_offset, mask=weight_mask, other=0.0)
h0_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
out_mask = d_offset[:, None] < D - D_block * D_cell
out_offset = d_offset[:, None] * channels + channels_offset[None, :]
H1_store = 2 * H_cell + 1 < H
W1_store = 2 * W_cell + 1 < W
load_all = (H_cell > 0 and H_cell < tl.cdiv(H, 2) - 1) and (W_cell > 0 and W_cell < tl.cdiv(W, 2) - 1)
i = -1
j = -1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
for k in tl.static_range(0, 16):
if k == 0:
h0_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 1:
h0_w0 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 2:
h0_w0 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 3:
h1_w0 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 4:
h0_w0 += tl.sum(x * weight_h0_w1, axis=1)
h0_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 5:
h0_w0 += tl.sum(x * weight_h1_w1, axis=1)
h0_w1 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w1, axis=1)
h1_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 6:
h0_w0 += tl.sum(x * weight_h2_w1, axis=1)
h0_w1 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w1, axis=1)
h1_w1 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 7:
h1_w0 += tl.sum(x * weight_h2_w1, axis=1)
h1_w1 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 8:
h0_w0 += tl.sum(x * weight_h0_w2, axis=1)
h0_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 9:
h0_w0 += tl.sum(x * weight_h1_w2, axis=1)
h0_w1 += tl.sum(x * weight_h1_w1, axis=1)
h1_w0 += tl.sum(x * weight_h0_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 10:
h0_w0 += tl.sum(x * weight_h2_w2, axis=1)
h0_w1 += tl.sum(x * weight_h2_w1, axis=1)
h1_w0 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w1, axis=1)
elif k == 11:
h1_w0 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h2_w1, axis=1)
elif k == 12:
h0_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 13:
h0_w1 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 14:
h0_w1 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w2, axis=1)
else:
h1_w1 += tl.sum(x * weight_h2_w2, axis=1)
k_ = k + 1
i = (k_ % 4) - 1
j = (k_ // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w0, mask=out_mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w1, mask=out_mask and W1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w0, mask=out_mask and H1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w1, mask=out_mask and (H1_store and W1_store))
# TODO: single kernel for both grad_X and grad_W
@triton.jit
def _DWConv_wgrad_cl3d_impl(
grad_ptr,
input_ptr,
weight_grad_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
WD_grid,
D_grid,
delta_H_grid,
ILP: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
input_ptr += D_cell * D_block * channels
grad_ptr += D_cell * D_block * channels
weight_grad_ptr += (H_cell * WD_grid + W_cell * D_grid + D_cell) * 27 * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[None, None, :] * channels + channels_offset[None, :, None] + near_offset[:, None, None] * channels
mask = d_offset[None, None, :] + near_offset[:, None, None] < D - D_block * D_cell
mask = mask and (d_offset[None, None, :] + near_offset[:, None, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[:, None, None] != 2)
grad_offset = d_offset[None, :] * channels + channels_offset[:, None]
grad_mask = d_offset[None, :] < D - D_block * D_cell
h0_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
gradw_offset = tl.arange(0, 4)[:, None] * channels + channels_offset[None, :]
gradw_mask = near_offset[:, None] != 2
for ilp in tl.static_range(0, ILP):
H0_load = 2 * H_cell < H
H1_load = 2 * H_cell + 1 < H
W1_load = 2 * W_cell + 1 < W
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride
x_h0_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H0_load)
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + 2 * W_cell * W_stride
x_h1_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H1_load)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1) * W_stride
x_h0_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H0_load))
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
x_h1_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H1_load))
for k in tl.static_range(0, 16):
i = (k % 4) - 1
j = (k // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
if load_next:
grad = tl.load(tmp_grad_ptr + grad_offset, mask=grad_mask, other=0.0)[None]
if i == -1 and j == -1:
h2_w2 += tl.sum(grad * x_h0_w0, axis=2)
elif i == -1 and j == 0:
h2_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 1:
h2_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 2:
h2_w0 += tl.sum(grad * x_h0_w1, axis=2)
elif i == 0 and j == -1:
h1_w2 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 0 and j == 0:
h1_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h1_w0, axis=2)
h1_w2 += tl.sum(grad * x_h0_w1, axis=2)
h2_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 1:
h1_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w0 += tl.sum(grad * x_h1_w0, axis=2)
h1_w1 += tl.sum(grad * x_h0_w1, axis=2)
h2_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 2:
h1_w0 += tl.sum(grad * x_h0_w1, axis=2)
h2_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == -1:
h0_w2 += tl.sum(grad * x_h0_w0, axis=2)
h1_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 1 and j == 0:
h0_w1 += tl.sum(grad * x_h0_w0, axis=2)
h1_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h0_w1, axis=2)
h1_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 1:
h0_w0 += tl.sum(grad * x_h0_w0, axis=2)
h1_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h0_w1, axis=2)
h1_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 2:
h0_w0 += tl.sum(grad * x_h0_w1, axis=2)
h1_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == -1:
h0_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 2 and j == 0:
h0_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == 1:
h0_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h1_w1, axis=2)
else:
h0_w0 += tl.sum(grad * x_h1_w1, axis=2)
H_cell += delta_H_grid
tl.store(weight_grad_ptr + gradw_offset, h0_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 3 * channels) + gradw_offset, h0_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 6 * channels) + gradw_offset, h0_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 9 * channels) + gradw_offset, h1_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 12 * channels) + gradw_offset, h1_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 15 * channels) + gradw_offset, h1_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 18 * channels) + gradw_offset, h2_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 21 * channels) + gradw_offset, h2_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 24 * channels) + gradw_offset, h2_w2, mask=gradw_mask)
|
@triton.jit
def _DWConv_cl3d_impl(
input_ptr,
weight_ptr,
output_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
output_ptr += D_cell * D_block * channels
input_ptr += D_cell * D_block * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[:, None, None] * channels + channels_offset[None, None, :] + near_offset[None, :, None] * channels
mask = d_offset[:, None, None] + near_offset[None, :, None] < D - D_block * D_cell
mask = mask and (d_offset[:, None, None] + near_offset[None, :, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[None, :, None] != 2)
weight_offset = channels_offset[None, None, :] + tl.arange(0, 4)[None, :, None] * channels
weight_mask = tl.arange(0, 4)[None, :, None] != 3
weight_h0_w0 = tl.load(weight_ptr + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w1 = tl.load((weight_ptr + 3 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w2 = tl.load((weight_ptr + 6 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w0 = tl.load((weight_ptr + 9 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w1 = tl.load((weight_ptr + 12 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w2 = tl.load((weight_ptr + 15 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w0 = tl.load((weight_ptr + 18 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w1 = tl.load((weight_ptr + 21 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w2 = tl.load((weight_ptr + 24 * channels) + weight_offset, mask=weight_mask, other=0.0)
h0_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
out_mask = d_offset[:, None] < D - D_block * D_cell
out_offset = d_offset[:, None] * channels + channels_offset[None, :]
H1_store = 2 * H_cell + 1 < H
W1_store = 2 * W_cell + 1 < W
load_all = (H_cell > 0 and H_cell < tl.cdiv(H, 2) - 1) and (W_cell > 0 and W_cell < tl.cdiv(W, 2) - 1)
i = -1
j = -1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
for k in tl.static_range(0, 16):
if k == 0:
h0_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 1:
h0_w0 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 2:
h0_w0 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 3:
h1_w0 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 4:
h0_w0 += tl.sum(x * weight_h0_w1, axis=1)
h0_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 5:
h0_w0 += tl.sum(x * weight_h1_w1, axis=1)
h0_w1 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w1, axis=1)
h1_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 6:
h0_w0 += tl.sum(x * weight_h2_w1, axis=1)
h0_w1 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w1, axis=1)
h1_w1 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 7:
h1_w0 += tl.sum(x * weight_h2_w1, axis=1)
h1_w1 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 8:
h0_w0 += tl.sum(x * weight_h0_w2, axis=1)
h0_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 9:
h0_w0 += tl.sum(x * weight_h1_w2, axis=1)
h0_w1 += tl.sum(x * weight_h1_w1, axis=1)
h1_w0 += tl.sum(x * weight_h0_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 10:
h0_w0 += tl.sum(x * weight_h2_w2, axis=1)
h0_w1 += tl.sum(x * weight_h2_w1, axis=1)
h1_w0 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w1, axis=1)
elif k == 11:
h1_w0 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h2_w1, axis=1)
elif k == 12:
h0_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 13:
h0_w1 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 14:
h0_w1 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w2, axis=1)
else:
h1_w1 += tl.sum(x * weight_h2_w2, axis=1)
k_ = k + 1
i = (k_ % 4) - 1
j = (k_ // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w0, mask=out_mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w1, mask=out_mask and W1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w0, mask=out_mask and H1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w1, mask=out_mask and (H1_store and W1_store))
# TODO: single kernel for both grad_X and grad_W
|
neuro-ml/kerops
|
kerops/kernels/dw_conv.py
|
https://github.com/neuro-ml/kerops/blob/6e2128bd77d894a5faf33cfd4ed3e2e7a09d99b0/kerops/kernels/dw_conv.py
|
import triton
import triton.language as tl
@triton.jit
def _DWConv_cl3d_impl(
input_ptr,
weight_ptr,
output_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
output_ptr += D_cell * D_block * channels
input_ptr += D_cell * D_block * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[:, None, None] * channels + channels_offset[None, None, :] + near_offset[None, :, None] * channels
mask = d_offset[:, None, None] + near_offset[None, :, None] < D - D_block * D_cell
mask = mask and (d_offset[:, None, None] + near_offset[None, :, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[None, :, None] != 2)
weight_offset = channels_offset[None, None, :] + tl.arange(0, 4)[None, :, None] * channels
weight_mask = tl.arange(0, 4)[None, :, None] != 3
weight_h0_w0 = tl.load(weight_ptr + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w1 = tl.load((weight_ptr + 3 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h0_w2 = tl.load((weight_ptr + 6 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w0 = tl.load((weight_ptr + 9 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w1 = tl.load((weight_ptr + 12 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h1_w2 = tl.load((weight_ptr + 15 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w0 = tl.load((weight_ptr + 18 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w1 = tl.load((weight_ptr + 21 * channels) + weight_offset, mask=weight_mask, other=0.0)
weight_h2_w2 = tl.load((weight_ptr + 24 * channels) + weight_offset, mask=weight_mask, other=0.0)
h0_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([D_block, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([D_block, channels], dtype=ACCTYPE)
out_mask = d_offset[:, None] < D - D_block * D_cell
out_offset = d_offset[:, None] * channels + channels_offset[None, :]
H1_store = 2 * H_cell + 1 < H
W1_store = 2 * W_cell + 1 < W
load_all = (H_cell > 0 and H_cell < tl.cdiv(H, 2) - 1) and (W_cell > 0 and W_cell < tl.cdiv(W, 2) - 1)
i = -1
j = -1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
for k in tl.static_range(0, 16):
if k == 0:
h0_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 1:
h0_w0 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 2:
h0_w0 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 3:
h1_w0 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 4:
h0_w0 += tl.sum(x * weight_h0_w1, axis=1)
h0_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 5:
h0_w0 += tl.sum(x * weight_h1_w1, axis=1)
h0_w1 += tl.sum(x * weight_h1_w0, axis=1)
h1_w0 += tl.sum(x * weight_h0_w1, axis=1)
h1_w1 += tl.sum(x * weight_h0_w0, axis=1)
elif k == 6:
h0_w0 += tl.sum(x * weight_h2_w1, axis=1)
h0_w1 += tl.sum(x * weight_h2_w0, axis=1)
h1_w0 += tl.sum(x * weight_h1_w1, axis=1)
h1_w1 += tl.sum(x * weight_h1_w0, axis=1)
elif k == 7:
h1_w0 += tl.sum(x * weight_h2_w1, axis=1)
h1_w1 += tl.sum(x * weight_h2_w0, axis=1)
elif k == 8:
h0_w0 += tl.sum(x * weight_h0_w2, axis=1)
h0_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 9:
h0_w0 += tl.sum(x * weight_h1_w2, axis=1)
h0_w1 += tl.sum(x * weight_h1_w1, axis=1)
h1_w0 += tl.sum(x * weight_h0_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w1, axis=1)
elif k == 10:
h0_w0 += tl.sum(x * weight_h2_w2, axis=1)
h0_w1 += tl.sum(x * weight_h2_w1, axis=1)
h1_w0 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w1, axis=1)
elif k == 11:
h1_w0 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h2_w1, axis=1)
elif k == 12:
h0_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 13:
h0_w1 += tl.sum(x * weight_h1_w2, axis=1)
h1_w1 += tl.sum(x * weight_h0_w2, axis=1)
elif k == 14:
h0_w1 += tl.sum(x * weight_h2_w2, axis=1)
h1_w1 += tl.sum(x * weight_h1_w2, axis=1)
else:
h1_w1 += tl.sum(x * weight_h2_w2, axis=1)
k_ = k + 1
i = (k_ % 4) - 1
j = (k_ // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_input_ptr = input_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
x = tl.load(tmp_input_ptr + offset, mask=(load_all or load_next) and mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w0, mask=out_mask)
tmp_output_ptr = output_ptr + (2 * H_cell) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h0_w1, mask=out_mask and W1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w0, mask=out_mask and H1_store)
tmp_output_ptr = output_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
tl.store(tmp_output_ptr + out_offset, h1_w1, mask=out_mask and (H1_store and W1_store))
# TODO: single kernel for both grad_X and grad_W
@triton.jit
def _DWConv_wgrad_cl3d_impl(
grad_ptr,
input_ptr,
weight_grad_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
WD_grid,
D_grid,
delta_H_grid,
ILP: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
input_ptr += D_cell * D_block * channels
grad_ptr += D_cell * D_block * channels
weight_grad_ptr += (H_cell * WD_grid + W_cell * D_grid + D_cell) * 27 * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[None, None, :] * channels + channels_offset[None, :, None] + near_offset[:, None, None] * channels
mask = d_offset[None, None, :] + near_offset[:, None, None] < D - D_block * D_cell
mask = mask and (d_offset[None, None, :] + near_offset[:, None, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[:, None, None] != 2)
grad_offset = d_offset[None, :] * channels + channels_offset[:, None]
grad_mask = d_offset[None, :] < D - D_block * D_cell
h0_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
gradw_offset = tl.arange(0, 4)[:, None] * channels + channels_offset[None, :]
gradw_mask = near_offset[:, None] != 2
for ilp in tl.static_range(0, ILP):
H0_load = 2 * H_cell < H
H1_load = 2 * H_cell + 1 < H
W1_load = 2 * W_cell + 1 < W
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride
x_h0_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H0_load)
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + 2 * W_cell * W_stride
x_h1_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H1_load)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1) * W_stride
x_h0_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H0_load))
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
x_h1_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H1_load))
for k in tl.static_range(0, 16):
i = (k % 4) - 1
j = (k // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
if load_next:
grad = tl.load(tmp_grad_ptr + grad_offset, mask=grad_mask, other=0.0)[None]
if i == -1 and j == -1:
h2_w2 += tl.sum(grad * x_h0_w0, axis=2)
elif i == -1 and j == 0:
h2_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 1:
h2_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 2:
h2_w0 += tl.sum(grad * x_h0_w1, axis=2)
elif i == 0 and j == -1:
h1_w2 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 0 and j == 0:
h1_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h1_w0, axis=2)
h1_w2 += tl.sum(grad * x_h0_w1, axis=2)
h2_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 1:
h1_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w0 += tl.sum(grad * x_h1_w0, axis=2)
h1_w1 += tl.sum(grad * x_h0_w1, axis=2)
h2_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 2:
h1_w0 += tl.sum(grad * x_h0_w1, axis=2)
h2_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == -1:
h0_w2 += tl.sum(grad * x_h0_w0, axis=2)
h1_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 1 and j == 0:
h0_w1 += tl.sum(grad * x_h0_w0, axis=2)
h1_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h0_w1, axis=2)
h1_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 1:
h0_w0 += tl.sum(grad * x_h0_w0, axis=2)
h1_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h0_w1, axis=2)
h1_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 2:
h0_w0 += tl.sum(grad * x_h0_w1, axis=2)
h1_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == -1:
h0_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 2 and j == 0:
h0_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == 1:
h0_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h1_w1, axis=2)
else:
h0_w0 += tl.sum(grad * x_h1_w1, axis=2)
H_cell += delta_H_grid
tl.store(weight_grad_ptr + gradw_offset, h0_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 3 * channels) + gradw_offset, h0_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 6 * channels) + gradw_offset, h0_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 9 * channels) + gradw_offset, h1_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 12 * channels) + gradw_offset, h1_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 15 * channels) + gradw_offset, h1_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 18 * channels) + gradw_offset, h2_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 21 * channels) + gradw_offset, h2_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 24 * channels) + gradw_offset, h2_w2, mask=gradw_mask)
|
@triton.jit
def _DWConv_wgrad_cl3d_impl(
grad_ptr,
input_ptr,
weight_grad_ptr,
H,
W,
D,
H_stride,
W_stride,
ACCTYPE: tl.constexpr,
channels: tl.constexpr,
D_block: tl.constexpr,
WD_grid,
D_grid,
delta_H_grid,
ILP: tl.constexpr,
):
H_cell = tl.program_id(0)
W_cell = tl.program_id(1)
D_cell = tl.program_id(2)
input_ptr += D_cell * D_block * channels
grad_ptr += D_cell * D_block * channels
weight_grad_ptr += (H_cell * WD_grid + W_cell * D_grid + D_cell) * 27 * channels
channels_offset = tl.arange(0, channels)
channels_offset = tl.max_contiguous(tl.multiple_of(channels_offset, channels), channels)
d_offset = tl.arange(0, D_block)
near_offset = tl.arange(0, 4) - 1
offset = d_offset[None, None, :] * channels + channels_offset[None, :, None] + near_offset[:, None, None] * channels
mask = d_offset[None, None, :] + near_offset[:, None, None] < D - D_block * D_cell
mask = mask and (d_offset[None, None, :] + near_offset[:, None, None] >= 0 - D_block * D_cell)
mask = mask and (near_offset[:, None, None] != 2)
grad_offset = d_offset[None, :] * channels + channels_offset[:, None]
grad_mask = d_offset[None, :] < D - D_block * D_cell
h0_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h0_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h1_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w0 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w1 = tl.zeros([4, channels], dtype=ACCTYPE)
h2_w2 = tl.zeros([4, channels], dtype=ACCTYPE)
gradw_offset = tl.arange(0, 4)[:, None] * channels + channels_offset[None, :]
gradw_mask = near_offset[:, None] != 2
for ilp in tl.static_range(0, ILP):
H0_load = 2 * H_cell < H
H1_load = 2 * H_cell + 1 < H
W1_load = 2 * W_cell + 1 < W
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + 2 * W_cell * W_stride
x_h0_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H0_load)
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + 2 * W_cell * W_stride
x_h1_w0 = tl.load(tmp_input_ptr + offset, mask=mask and H1_load)
tmp_input_ptr = input_ptr + 2 * H_cell * H_stride + (2 * W_cell + 1) * W_stride
x_h0_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H0_load))
tmp_input_ptr = input_ptr + (2 * H_cell + 1) * H_stride + (2 * W_cell + 1) * W_stride
x_h1_w1 = tl.load(tmp_input_ptr + offset, mask=mask and (W1_load and H1_load))
for k in tl.static_range(0, 16):
i = (k % 4) - 1
j = (k // 4) - 1
load_next = (2 * H_cell + i < H and 2 * H_cell + i >= 0) and (2 * W_cell + j < W and 2 * W_cell + j >= 0)
tmp_grad_ptr = grad_ptr + (2 * H_cell + i) * H_stride + (2 * W_cell + j) * W_stride
if load_next:
grad = tl.load(tmp_grad_ptr + grad_offset, mask=grad_mask, other=0.0)[None]
if i == -1 and j == -1:
h2_w2 += tl.sum(grad * x_h0_w0, axis=2)
elif i == -1 and j == 0:
h2_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 1:
h2_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h0_w1, axis=2)
elif i == -1 and j == 2:
h2_w0 += tl.sum(grad * x_h0_w1, axis=2)
elif i == 0 and j == -1:
h1_w2 += tl.sum(grad * x_h0_w0, axis=2)
h2_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 0 and j == 0:
h1_w1 += tl.sum(grad * x_h0_w0, axis=2)
h2_w1 += tl.sum(grad * x_h1_w0, axis=2)
h1_w2 += tl.sum(grad * x_h0_w1, axis=2)
h2_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 1:
h1_w0 += tl.sum(grad * x_h0_w0, axis=2)
h2_w0 += tl.sum(grad * x_h1_w0, axis=2)
h1_w1 += tl.sum(grad * x_h0_w1, axis=2)
h2_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 0 and j == 2:
h1_w0 += tl.sum(grad * x_h0_w1, axis=2)
h2_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == -1:
h0_w2 += tl.sum(grad * x_h0_w0, axis=2)
h1_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 1 and j == 0:
h0_w1 += tl.sum(grad * x_h0_w0, axis=2)
h1_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h0_w1, axis=2)
h1_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 1:
h0_w0 += tl.sum(grad * x_h0_w0, axis=2)
h1_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h0_w1, axis=2)
h1_w1 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 1 and j == 2:
h0_w0 += tl.sum(grad * x_h0_w1, axis=2)
h1_w0 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == -1:
h0_w2 += tl.sum(grad * x_h1_w0, axis=2)
elif i == 2 and j == 0:
h0_w1 += tl.sum(grad * x_h1_w0, axis=2)
h0_w2 += tl.sum(grad * x_h1_w1, axis=2)
elif i == 2 and j == 1:
h0_w0 += tl.sum(grad * x_h1_w0, axis=2)
h0_w1 += tl.sum(grad * x_h1_w1, axis=2)
else:
h0_w0 += tl.sum(grad * x_h1_w1, axis=2)
H_cell += delta_H_grid
tl.store(weight_grad_ptr + gradw_offset, h0_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 3 * channels) + gradw_offset, h0_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 6 * channels) + gradw_offset, h0_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 9 * channels) + gradw_offset, h1_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 12 * channels) + gradw_offset, h1_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 15 * channels) + gradw_offset, h1_w2, mask=gradw_mask)
tl.store((weight_grad_ptr + 18 * channels) + gradw_offset, h2_w0, mask=gradw_mask)
tl.store((weight_grad_ptr + 21 * channels) + gradw_offset, h2_w1, mask=gradw_mask)
tl.store((weight_grad_ptr + 24 * channels) + gradw_offset, h2_w2, mask=gradw_mask)
|
2395959141/100-days-of-cuda
|
day17/fp8_gemm.py
|
https://github.com/2395959141/100-days-of-cuda/blob/1aeb4aa316f7e69cf3cf2d08fd3573c372cdfe6c/day17/fp8_gemm.py
|
import torch
import triton
import triton.language as tl
from triton import Config
#! 分Block量化
@triton.jit
def act_quant_kernal(
x_ptr,
y_ptr,
s_ptr,
BLOCK_SIZE: tl.constexpr
):
"""
修正后的量化核函数
:param x_ptr: 输入矩阵指针
:param y_ptr: 输出矩阵指针
:param s_ptr: 缩放系数指针
:param BLOCK_SIZE: 块大小(编译时常量)
"""
pid = tl.program_id(axis=0)
# 计算当前block的偏移量
offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offset).to(tl.float16)
#* 计算block的最大绝对值
max_val = tl.max(tl.abs(x), axis=0)
s = max_val / 448.0 #! 量化系数是 float32的
# 量化操作
y = x / s #! s的布局是 [... , hidden_dim / block_size]
# 存储结果
tl.store(y_ptr + offset , y)
tl.store(s_ptr + pid, s)
def act_quant(x: torch.Tensor, block_size: int = 128) -> tuple[torch.Tensor, torch.Tensor]:
"""
修正后的量化函数,处理维度计算问题
"""
assert x.is_contiguous()
assert x.size(-1) % block_size == 0
# 创建输出张量(保持输入维度)
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
# 调整缩放因子维度为 [..., K/block_size, 1]
s_shape = (*x.size()[:-1], x.size(-1) // block_size)
s = x.new_empty(s_shape, dtype=torch.float16)
#! 为了兼容多维度矩阵相乘
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']),)
# 启动内核时添加显式维度参数
act_quant_kernal[grid](x, y, s, BLOCK_SIZE=block_size)
return y, s
# 修改后的自动调优配置
fp8_gemm_configs = [
Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=8),
Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128}, num_stages=5, num_warps=8)
]
@triton.autotune(configs=fp8_gemm_configs, key=['M', 'N', 'K'])
@triton.jit
def fp8_gemm_kernel(
a_ptr,
b_ptr,
c_ptr,
a_s_ptr, b_s_ptr,
M, N, K, # 修改参数顺序
stride_am, stride_ak, # 添加步长参数
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
Args:
a_ptr (tl.tensor): Pointer to the first input matrix A.
b_ptr (tl.tensor): Pointer to the second input matrix B.
c_ptr (tl.tensor): Pointer to the output matrix C.
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
M (int): Number of rows in matrix A and C.
N (tl.constexpr): Number of columns in matrix B and C.
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
stride_am (tl.constexpr): Stride for the M dimension in matrix A.
stride_ak (tl.constexpr): Stride for the K dimension in matrix A.
stride_bk (tl.constexpr): Stride for the K dimension in matrix B.
stride_bn (tl.constexpr): Stride for the N dimension in matrix B.
stride_cm (tl.constexpr): Stride for the M dimension in matrix C.
stride_cn (tl.constexpr): Stride for the N dimension in matrix C.
Returns:
None
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
k = tl.cdiv(K, BLOCK_SIZE_K)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) % M
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
# 修正后的指针计算
a_ptr = a_ptr + (offs_m[:, None] * K + offs_k[None, :])
b_ptr = b_ptr + (offs_n[None, :] * K + offs_k[:, None]) #! 这里是按照转置之后方式取值的
#!错误:b_ptr = b_ptr + (offs_k[:, None] * N + offs_n[None, :])
# 修正缩放因子索引
a_s_ptr = a_s_ptr + (pid_m * k)
b_s_ptr = b_s_ptr + (pid_n * k)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(k):
# 修正mask条件
mask_a = (offs_m[:, None] < M) & (offs_k[None, :] < K - i * BLOCK_SIZE_K)
mask_b = (offs_k[:, None] < K - i * BLOCK_SIZE_K) & (offs_n[None, :] < N)
a = tl.load(a_ptr, mask=mask_a, other=0.0)
b = tl.load(b_ptr, mask=mask_b, other=0.0)
a_s = tl.load(a_s_ptr + i)
b_s = tl.load(b_s_ptr + i)
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
# 修正指针递增
a_ptr += BLOCK_SIZE_K
b_ptr += BLOCK_SIZE_K
#! 修正后的指针递增
a_s_ptr += 1
b_s_ptr += 1
output = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptr = c_ptr + (offs_cm[:, None] * N + offs_cn[None, :])
mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptr, output, mask=mask)
def fp8_gemm(a: torch.Tensor, b: torch.Tensor):
"""
FP8矩阵乘法接口函数
参数:
a: (M, K) 输入矩阵,FP8格式
b: (K, N) 输入矩阵,FP8格式
a_scale: (M,) A矩阵的缩放因子
b_scale: (N,) B矩阵的缩放因子
返回:
c: (M, N) 输出矩阵,FP32格式
"""
# 参数校验
#assert b.dtype == torch.float32, "输入矩阵必须是FP32,进行动态量化"
assert a.is_contiguous() and b.is_contiguous(), "输入矩阵必须是连续内存布局"
a_fp8, a_scale = act_quant(a)
b_fp8, b_scale = act_quant(b)
M, K = a.shape
_, N = b.shape
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
grid = lambda meta: (
triton.cdiv(M, meta['BLOCK_SIZE_M']),
triton.cdiv(N, meta['BLOCK_SIZE_N'])
)
# 修改后的内核调用
fp8_gemm_kernel[grid](
a_fp8, b_fp8, c,
a_scale, b_scale,
M, N, K,
a.stride(0), a.stride(1), # 添加步长参数
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
# 移除 GROUP_SIZE_M 参数
)
return c
|
@triton.jit
def act_quant_kernal(
x_ptr,
y_ptr,
s_ptr,
BLOCK_SIZE: tl.constexpr
):
"""
修正后的量化核函数
:param x_ptr: 输入矩阵指针
:param y_ptr: 输出矩阵指针
:param s_ptr: 缩放系数指针
:param BLOCK_SIZE: 块大小(编译时常量)
"""
pid = tl.program_id(axis=0)
# 计算当前block的偏移量
offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offset).to(tl.float16)
#* 计算block的最大绝对值
max_val = tl.max(tl.abs(x), axis=0)
s = max_val / 448.0 #! 量化系数是 float32的
# 量化操作
y = x / s #! s的布局是 [... , hidden_dim / block_size]
# 存储结果
tl.store(y_ptr + offset , y)
tl.store(s_ptr + pid, s)
def act_quant(x: torch.Tensor, block_size: int = 128) -> tuple[torch.Tensor, torch.Tensor]:
"""
修正后的量化函数,处理维度计算问题
"""
assert x.is_contiguous()
assert x.size(-1) % block_size == 0
# 创建输出张量(保持输入维度)
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
# 调整缩放因子维度为 [..., K/block_size, 1]
s_shape = (*x.size()[:-1], x.size(-1) // block_size)
s = x.new_empty(s_shape, dtype=torch.float16)
#! 为了兼容多维度矩阵相乘
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']),)
# 启动内核时添加显式维度参数
act_quant_kernal[grid](x, y, s, BLOCK_SIZE=block_size)
return y, s
# 修改后的自动调优配置
fp8_gemm_configs = [
Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=8),
Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128}, num_stages=5, num_warps=8)
]
@triton.autotune(configs=fp8_gemm_configs, key=['M', 'N', 'K'])
|
2395959141/100-days-of-cuda
|
day17/fp8_gemm.py
|
https://github.com/2395959141/100-days-of-cuda/blob/1aeb4aa316f7e69cf3cf2d08fd3573c372cdfe6c/day17/fp8_gemm.py
|
import torch
import triton
import triton.language as tl
from triton import Config
#! 分Block量化
@triton.jit
def act_quant_kernal(
x_ptr,
y_ptr,
s_ptr,
BLOCK_SIZE: tl.constexpr
):
"""
修正后的量化核函数
:param x_ptr: 输入矩阵指针
:param y_ptr: 输出矩阵指针
:param s_ptr: 缩放系数指针
:param BLOCK_SIZE: 块大小(编译时常量)
"""
pid = tl.program_id(axis=0)
# 计算当前block的偏移量
offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
x = tl.load(x_ptr + offset).to(tl.float16)
#* 计算block的最大绝对值
max_val = tl.max(tl.abs(x), axis=0)
s = max_val / 448.0 #! 量化系数是 float32的
# 量化操作
y = x / s #! s的布局是 [... , hidden_dim / block_size]
# 存储结果
tl.store(y_ptr + offset , y)
tl.store(s_ptr + pid, s)
def act_quant(x: torch.Tensor, block_size: int = 128) -> tuple[torch.Tensor, torch.Tensor]:
"""
修正后的量化函数,处理维度计算问题
"""
assert x.is_contiguous()
assert x.size(-1) % block_size == 0
# 创建输出张量(保持输入维度)
y = torch.empty_like(x, dtype=torch.float8_e4m3fn)
# 调整缩放因子维度为 [..., K/block_size, 1]
s_shape = (*x.size()[:-1], x.size(-1) // block_size)
s = x.new_empty(s_shape, dtype=torch.float16)
#! 为了兼容多维度矩阵相乘
grid = lambda meta: (triton.cdiv(x.numel(), meta['BLOCK_SIZE']),)
# 启动内核时添加显式维度参数
act_quant_kernal[grid](x, y, s, BLOCK_SIZE=block_size)
return y, s
# 修改后的自动调优配置
fp8_gemm_configs = [
Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64}, num_stages=4, num_warps=8),
Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128}, num_stages=5, num_warps=8)
]
@triton.autotune(configs=fp8_gemm_configs, key=['M', 'N', 'K'])
@triton.jit
def fp8_gemm_kernel(
a_ptr,
b_ptr,
c_ptr,
a_s_ptr, b_s_ptr,
M, N, K, # 修改参数顺序
stride_am, stride_ak, # 添加步长参数
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
Args:
a_ptr (tl.tensor): Pointer to the first input matrix A.
b_ptr (tl.tensor): Pointer to the second input matrix B.
c_ptr (tl.tensor): Pointer to the output matrix C.
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
M (int): Number of rows in matrix A and C.
N (tl.constexpr): Number of columns in matrix B and C.
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
stride_am (tl.constexpr): Stride for the M dimension in matrix A.
stride_ak (tl.constexpr): Stride for the K dimension in matrix A.
stride_bk (tl.constexpr): Stride for the K dimension in matrix B.
stride_bn (tl.constexpr): Stride for the N dimension in matrix B.
stride_cm (tl.constexpr): Stride for the M dimension in matrix C.
stride_cn (tl.constexpr): Stride for the N dimension in matrix C.
Returns:
None
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
k = tl.cdiv(K, BLOCK_SIZE_K)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) % M
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
# 修正后的指针计算
a_ptr = a_ptr + (offs_m[:, None] * K + offs_k[None, :])
b_ptr = b_ptr + (offs_n[None, :] * K + offs_k[:, None]) #! 这里是按照转置之后方式取值的
#!错误:b_ptr = b_ptr + (offs_k[:, None] * N + offs_n[None, :])
# 修正缩放因子索引
a_s_ptr = a_s_ptr + (pid_m * k)
b_s_ptr = b_s_ptr + (pid_n * k)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(k):
# 修正mask条件
mask_a = (offs_m[:, None] < M) & (offs_k[None, :] < K - i * BLOCK_SIZE_K)
mask_b = (offs_k[:, None] < K - i * BLOCK_SIZE_K) & (offs_n[None, :] < N)
a = tl.load(a_ptr, mask=mask_a, other=0.0)
b = tl.load(b_ptr, mask=mask_b, other=0.0)
a_s = tl.load(a_s_ptr + i)
b_s = tl.load(b_s_ptr + i)
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
# 修正指针递增
a_ptr += BLOCK_SIZE_K
b_ptr += BLOCK_SIZE_K
#! 修正后的指针递增
a_s_ptr += 1
b_s_ptr += 1
output = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptr = c_ptr + (offs_cm[:, None] * N + offs_cn[None, :])
mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptr, output, mask=mask)
def fp8_gemm(a: torch.Tensor, b: torch.Tensor):
"""
FP8矩阵乘法接口函数
参数:
a: (M, K) 输入矩阵,FP8格式
b: (K, N) 输入矩阵,FP8格式
a_scale: (M,) A矩阵的缩放因子
b_scale: (N,) B矩阵的缩放因子
返回:
c: (M, N) 输出矩阵,FP32格式
"""
# 参数校验
#assert b.dtype == torch.float32, "输入矩阵必须是FP32,进行动态量化"
assert a.is_contiguous() and b.is_contiguous(), "输入矩阵必须是连续内存布局"
a_fp8, a_scale = act_quant(a)
b_fp8, b_scale = act_quant(b)
M, K = a.shape
_, N = b.shape
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
grid = lambda meta: (
triton.cdiv(M, meta['BLOCK_SIZE_M']),
triton.cdiv(N, meta['BLOCK_SIZE_N'])
)
# 修改后的内核调用
fp8_gemm_kernel[grid](
a_fp8, b_fp8, c,
a_scale, b_scale,
M, N, K,
a.stride(0), a.stride(1), # 添加步长参数
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
# 移除 GROUP_SIZE_M 参数
)
return c
|
@triton.jit
def fp8_gemm_kernel(
a_ptr,
b_ptr,
c_ptr,
a_s_ptr, b_s_ptr,
M, N, K, # 修改参数顺序
stride_am, stride_ak, # 添加步长参数
stride_bk, stride_bn,
stride_cm, stride_cn,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
Performs a matrix multiplication operation on FP8 matrices with scaling factors.
Args:
a_ptr (tl.tensor): Pointer to the first input matrix A.
b_ptr (tl.tensor): Pointer to the second input matrix B.
c_ptr (tl.tensor): Pointer to the output matrix C.
a_s_ptr (tl.tensor): Pointer to the scaling factors for matrix A.
b_s_ptr (tl.tensor): Pointer to the scaling factors for matrix B.
M (int): Number of rows in matrix A and C.
N (tl.constexpr): Number of columns in matrix B and C.
K (tl.constexpr): Number of columns in matrix A and rows in matrix B.
BLOCK_SIZE_M (tl.constexpr): Block size for the M dimension.
BLOCK_SIZE_N (tl.constexpr): Block size for the N dimension.
BLOCK_SIZE_K (tl.constexpr): Block size for the K dimension.
stride_am (tl.constexpr): Stride for the M dimension in matrix A.
stride_ak (tl.constexpr): Stride for the K dimension in matrix A.
stride_bk (tl.constexpr): Stride for the K dimension in matrix B.
stride_bn (tl.constexpr): Stride for the N dimension in matrix B.
stride_cm (tl.constexpr): Stride for the M dimension in matrix C.
stride_cn (tl.constexpr): Stride for the N dimension in matrix C.
Returns:
None
"""
pid_m = tl.program_id(axis=0)
pid_n = tl.program_id(axis=1)
k = tl.cdiv(K, BLOCK_SIZE_K)
offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) % M
offs_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
# 修正后的指针计算
a_ptr = a_ptr + (offs_m[:, None] * K + offs_k[None, :])
b_ptr = b_ptr + (offs_n[None, :] * K + offs_k[:, None]) #! 这里是按照转置之后方式取值的
#!错误:b_ptr = b_ptr + (offs_k[:, None] * N + offs_n[None, :])
# 修正缩放因子索引
a_s_ptr = a_s_ptr + (pid_m * k)
b_s_ptr = b_s_ptr + (pid_n * k)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(k):
# 修正mask条件
mask_a = (offs_m[:, None] < M) & (offs_k[None, :] < K - i * BLOCK_SIZE_K)
mask_b = (offs_k[:, None] < K - i * BLOCK_SIZE_K) & (offs_n[None, :] < N)
a = tl.load(a_ptr, mask=mask_a, other=0.0)
b = tl.load(b_ptr, mask=mask_b, other=0.0)
a_s = tl.load(a_s_ptr + i)
b_s = tl.load(b_s_ptr + i)
accumulator += tl.dot(a, b) * a_s[:, None] * b_s[None, :]
# 修正指针递增
a_ptr += BLOCK_SIZE_K
b_ptr += BLOCK_SIZE_K
#! 修正后的指针递增
a_s_ptr += 1
b_s_ptr += 1
output = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptr = c_ptr + (offs_cm[:, None] * N + offs_cn[None, :])
mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptr, output, mask=mask)
def fp8_gemm(a: torch.Tensor, b: torch.Tensor):
"""
FP8矩阵乘法接口函数
参数:
a: (M, K) 输入矩阵,FP8格式
b: (K, N) 输入矩阵,FP8格式
a_scale: (M,) A矩阵的缩放因子
b_scale: (N,) B矩阵的缩放因子
返回:
c: (M, N) 输出矩阵,FP32格式
"""
# 参数校验
#assert b.dtype == torch.float32, "输入矩阵必须是FP32,进行动态量化"
assert a.is_contiguous() and b.is_contiguous(), "输入矩阵必须是连续内存布局"
a_fp8, a_scale = act_quant(a)
b_fp8, b_scale = act_quant(b)
M, K = a.shape
_, N = b.shape
c = torch.empty((M, N), device=a.device, dtype=torch.float16)
grid = lambda meta: (
triton.cdiv(M, meta['BLOCK_SIZE_M']),
triton.cdiv(N, meta['BLOCK_SIZE_N'])
)
# 修改后的内核调用
fp8_gemm_kernel[grid](
a_fp8, b_fp8, c,
a_scale, b_scale,
M, N, K,
a.stride(0), a.stride(1), # 添加步长参数
b.stride(0), b.stride(1),
c.stride(0), c.stride(1),
# 移除 GROUP_SIZE_M 参数
)
return c
|
MKrbm/worms
|
python/rmsKit/rms_torch/optimizer/trion.py
|
https://github.com/MKrbm/worms/blob/bebb1b6f4611ef135d8d0e0cc1f6f255ff306679/python/rmsKit/rms_torch/optimizer/trion.py
|
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4),
triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8),
], key = ['n_elements'])
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
daemyung/practice-triton
|
heuristics.py
|
https://github.com/daemyung/practice-triton/blob/27f727726f1507c8380a1c11751d851c7c4a07ce/heuristics.py
|
# MIT License
#
# Copyright (c) 2024 Daemyung Jang
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import triton
import triton.language as tl
@triton.heuristics({'boundary_check': lambda args: args["x_size"] % args["block_size"] })
@triton.jit
def add_kernel(x_ptr, y_ptr, z_ptr, size, block_size: tl.constexpr, boundary_check: tl.constexpr):
offset = tl.program_id(0) * block_size
x_block_ptr = tl.make_block_ptr(
x_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
y_block_ptr = tl.make_block_ptr(
y_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
if boundary_check:
x = tl.load(x_block_ptr, boundary_check=(0,))
y = tl.load(y_block_ptr, boundary_check=(0,))
else:
x = tl.load(x_block_ptr)
y = tl.load(y_block_ptr)
z = x + y
z_block_ptr = tl.make_block_ptr(
z_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
if boundary_check:
tl.store(z_block_ptr, z, boundary_check=(0,))
else:
tl.store(z_block_ptr, z)
def add(x, y):
z = torch.empty_like(x, device="cuda")
size = z.numel()
def grid(meta):
return (triton.cdiv(size, meta["block_size"]),)
add_kernel[grid](x, y, z, size, 1024)
return z
def main():
size = 2**16
x = torch.rand(size, device="cuda")
y = torch.rand(size, device="cuda")
a = add(x, y)
b = x + y
assert torch.allclose(a, b)
if __name__ == "__main__":
main()
|
@triton.jit
def add_kernel(x_ptr, y_ptr, z_ptr, size, block_size: tl.constexpr, boundary_check: tl.constexpr):
offset = tl.program_id(0) * block_size
x_block_ptr = tl.make_block_ptr(
x_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
y_block_ptr = tl.make_block_ptr(
y_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
if boundary_check:
x = tl.load(x_block_ptr, boundary_check=(0,))
y = tl.load(y_block_ptr, boundary_check=(0,))
else:
x = tl.load(x_block_ptr)
y = tl.load(y_block_ptr)
z = x + y
z_block_ptr = tl.make_block_ptr(
z_ptr, shape=(size,), strides=(1,), offsets=(offset,), block_shape=(block_size,), order=(0,)
)
if boundary_check:
tl.store(z_block_ptr, z, boundary_check=(0,))
else:
tl.store(z_block_ptr, z)
def add(x, y):
z = torch.empty_like(x, device="cuda")
size = z.numel()
def grid(meta):
return (triton.cdiv(size, meta["block_size"]),)
add_kernel[grid](x, y, z, size, 1024)
return z
def main():
size = 2**16
x = torch.rand(size, device="cuda")
y = torch.rand(size, device="cuda")
a = add(x, y)
b = x + y
assert torch.allclose(a, b)
if __name__ == "__main__":
main()
|
ghidav/group-sae
|
group_sae/kernels.py
|
https://github.com/ghidav/group-sae/blob/e556eeac2af1e956a228a96e243230c510d4e62b/group_sae/kernels.py
|
"""
Copied from https://github.com/openai/sparse_autoencoder/blob/main/sparse_autoencoder/kernels.py
"""
import torch
import triton
import triton.language as tl
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
def grid(META):
return triton.cdiv(AK, META["BLOCK_SIZE_AK"]), 1
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoder(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
|
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
|
ghidav/group-sae
|
group_sae/kernels.py
|
https://github.com/ghidav/group-sae/blob/e556eeac2af1e956a228a96e243230c510d4e62b/group_sae/kernels.py
|
"""
Copied from https://github.com/openai/sparse_autoencoder/blob/main/sparse_autoencoder/kernels.py
"""
import torch
import triton
import triton.language as tl
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
def grid(META):
return triton.cdiv(AK, META["BLOCK_SIZE_AK"]), 1
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoder(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
|
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
|
ghidav/group-sae
|
group_sae/kernels.py
|
https://github.com/ghidav/group-sae/blob/e556eeac2af1e956a228a96e243230c510d4e62b/group_sae/kernels.py
|
"""
Copied from https://github.com/openai/sparse_autoencoder/blob/main/sparse_autoencoder/kernels.py
"""
import torch
import triton
import triton.language as tl
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
def grid(META):
return triton.cdiv(AK, META["BLOCK_SIZE_AK"]), 1
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoder(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
|
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoder(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
|
congtuong/whisper-4-2gpu
|
triton_ops.py
|
https://github.com/congtuong/whisper-4-2gpu/blob/2ee389299968590d49dc30571ed2c5f844c0a7de/triton_ops.py
|
from functools import lru_cache
import numpy as np
import torch
try:
import triton
import triton.language as tl
except ImportError:
raise RuntimeError("triton import failed; try `pip install --pre triton`")
@triton.jit
def dtw_kernel(
cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr
):
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < M
for k in range(1, N + M + 1): # k = i + j
print(k)
tl.debug_barrier()
p0 = cost + (k - 1) * cost_stride
p1 = cost + k * cost_stride
p2 = cost + k * cost_stride + 1
c0 = tl.load(p0 + offsets, mask=mask)
c1 = tl.load(p1 + offsets, mask=mask)
c2 = tl.load(p2 + offsets, mask=mask)
x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0)
cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2)
cost_ptr = cost + (k + 1) * cost_stride + 1
tl.store(cost_ptr + offsets, cost_row, mask=mask)
trace_ptr = trace + (k + 1) * trace_stride + 1
tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1))
tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2))
tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2))
print(k)
@lru_cache(maxsize=None)
def median_kernel(filter_width: int):
@triton.jit
def kernel(
y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr
): # x.shape[-1] == filter_width
row_idx = tl.program_id(0)
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < y_stride
x_ptr = x + row_idx * x_stride # noqa: F841
y_ptr = y + row_idx * y_stride
LOAD_ALL_ROWS_HERE # noqa: F821
BUBBLESORT_HERE # noqa: F821
tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask) # noqa: F821
kernel = triton.JITFunction(kernel.fn)
kernel.src = kernel.src.replace(
" LOAD_ALL_ROWS_HERE",
"\n".join(
[
f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)"
for i in range(filter_width)
]
),
)
kernel.src = kernel.src.replace(
" BUBBLESORT_HERE",
"\n\n".join(
[
"\n\n".join(
[
"\n".join(
[
f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})",
f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})",
f" row{j} = smaller",
f" row{j + 1} = larger",
]
)
for j in range(filter_width - i - 1)
]
)
for i in range(filter_width // 2 + 1)
]
),
)
kernel.src = kernel.src.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}")
return kernel
def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y
|
@triton.jit
def dtw_kernel(
cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr
):
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < M
for k in range(1, N + M + 1): # k = i + j
print(k)
tl.debug_barrier()
p0 = cost + (k - 1) * cost_stride
p1 = cost + k * cost_stride
p2 = cost + k * cost_stride + 1
c0 = tl.load(p0 + offsets, mask=mask)
c1 = tl.load(p1 + offsets, mask=mask)
c2 = tl.load(p2 + offsets, mask=mask)
x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0)
cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2)
cost_ptr = cost + (k + 1) * cost_stride + 1
tl.store(cost_ptr + offsets, cost_row, mask=mask)
trace_ptr = trace + (k + 1) * trace_stride + 1
tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1))
tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2))
tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2))
print(k)
@lru_cache(maxsize=None)
def median_kernel(filter_width: int):
|
congtuong/whisper-4-2gpu
|
triton_ops.py
|
https://github.com/congtuong/whisper-4-2gpu/blob/2ee389299968590d49dc30571ed2c5f844c0a7de/triton_ops.py
|
from functools import lru_cache
import numpy as np
import torch
try:
import triton
import triton.language as tl
except ImportError:
raise RuntimeError("triton import failed; try `pip install --pre triton`")
@triton.jit
def dtw_kernel(
cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr
):
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < M
for k in range(1, N + M + 1): # k = i + j
print(k)
tl.debug_barrier()
p0 = cost + (k - 1) * cost_stride
p1 = cost + k * cost_stride
p2 = cost + k * cost_stride + 1
c0 = tl.load(p0 + offsets, mask=mask)
c1 = tl.load(p1 + offsets, mask=mask)
c2 = tl.load(p2 + offsets, mask=mask)
x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0)
cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2)
cost_ptr = cost + (k + 1) * cost_stride + 1
tl.store(cost_ptr + offsets, cost_row, mask=mask)
trace_ptr = trace + (k + 1) * trace_stride + 1
tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1))
tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2))
tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2))
print(k)
@lru_cache(maxsize=None)
def median_kernel(filter_width: int):
@triton.jit
def kernel(
y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr
): # x.shape[-1] == filter_width
row_idx = tl.program_id(0)
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < y_stride
x_ptr = x + row_idx * x_stride # noqa: F841
y_ptr = y + row_idx * y_stride
LOAD_ALL_ROWS_HERE # noqa: F821
BUBBLESORT_HERE # noqa: F821
tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask) # noqa: F821
kernel = triton.JITFunction(kernel.fn)
kernel.src = kernel.src.replace(
" LOAD_ALL_ROWS_HERE",
"\n".join(
[
f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)"
for i in range(filter_width)
]
),
)
kernel.src = kernel.src.replace(
" BUBBLESORT_HERE",
"\n\n".join(
[
"\n\n".join(
[
"\n".join(
[
f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})",
f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})",
f" row{j} = smaller",
f" row{j + 1} = larger",
]
)
for j in range(filter_width - i - 1)
]
)
for i in range(filter_width // 2 + 1)
]
),
)
kernel.src = kernel.src.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}")
return kernel
def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y
|
@triton.jit
def kernel(
y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr
): # x.shape[-1] == filter_width
row_idx = tl.program_id(0)
offsets = tl.arange(0, BLOCK_SIZE)
mask = offsets < y_stride
x_ptr = x + row_idx * x_stride # noqa: F841
y_ptr = y + row_idx * y_stride
LOAD_ALL_ROWS_HERE # noqa: F821
BUBBLESORT_HERE # noqa: F821
tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask) # noqa: F821
kernel = triton.JITFunction(kernel.fn)
kernel.src = kernel.src.replace(
" LOAD_ALL_ROWS_HERE",
"\n".join(
[
f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)"
for i in range(filter_width)
]
),
)
kernel.src = kernel.src.replace(
" BUBBLESORT_HERE",
"\n\n".join(
[
"\n\n".join(
[
"\n".join(
[
f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})",
f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})",
f" row{j} = smaller",
f" row{j + 1} = larger",
]
)
for j in range(filter_width - i - 1)
]
)
for i in range(filter_width // 2 + 1)
]
),
)
kernel.src = kernel.src.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}")
return kernel
def median_filter_cuda(x: torch.Tensor, filter_width: int):
"""Apply a median filter of given width along the last dimension of x"""
slices = x.contiguous().unfold(-1, filter_width, 1)
grid = np.prod(slices.shape[:-2])
kernel = median_kernel(filter_width)
y = torch.empty_like(slices[..., 0])
BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length()
kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE)
return y
|
SwekeR-463/100kernels
|
day46/tlswiglu.py
|
https://github.com/SwekeR-463/100kernels/blob/80b7cea5a2b66f428380b2cb723147379f849f88/day46/tlswiglu.py
|
import triton
import triton.language as tl
import torch
@triton.jit
def swiglu_forward_kernel(
x_ptr,
y_ptr,
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load input tensor (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
# SwiGLU activation: SiLU(x_W) * x_V
swiglu = (x_W * tl.sigmoid(x_W)) * x_V
tl.store(y_ptr + offsets, swiglu, mask=mask)
@triton.jit
def swiglu_backward_kernel(
dy_ptr, # pointer to gradient of loss w.r.t. output
x_ptr, # pointer to input tensor
dx_ptr, # pointer to gradient of loss w.r.t. input
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load inputs (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0)
# compute SiLU and its derivative for x_W
sig = tl.sigmoid(x_W)
sig_derivative = sig * (1 - sig)
silu_grad = sig + x_W * sig_derivative
# compute gradients for x_W and x_V
dx_W = dy * x_V * silu_grad
dx_V = dy * (x_W * sig)
tl.store(dx_ptr + offsets, dx_W, mask=mask)
tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask)
def swiglu_forward(x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
y = torch.empty_like(x[..., :feature_dim])
grid = (triton.cdiv(n_elements, 1024),)
swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return y
def swiglu_backward(dy, x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
dx = torch.empty_like(x)
grid = (triton.cdiv(n_elements, 1024),)
swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return dx
class SwiGLUTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = swiglu_forward(x)
ctx.save_for_backward(x) # save input for backward pass
return y
@staticmethod
def backward(ctx, dy):
x, = ctx.saved_tensors
dx = swiglu_backward(dy, x)
return dx
class TritonSwiGLULayer(torch.nn.Module):
def forward(self, x):
return SwiGLUTriton.apply(x)
# test the implementation
x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64)
y = SwiGLUTriton.apply(x)
# backward test
dy = torch.ones_like(y) # assume dL/dy = 1
dx = torch.autograd.grad(y, x, grad_outputs=dy)[0]
print(y) # forward pass output
print(dx) # backward pass gradients
# gradient check
test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5)
print("Gradient check passed:", test) # True
|
@triton.jit
def swiglu_forward_kernel(
x_ptr,
y_ptr,
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load input tensor (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
# SwiGLU activation: SiLU(x_W) * x_V
swiglu = (x_W * tl.sigmoid(x_W)) * x_V
tl.store(y_ptr + offsets, swiglu, mask=mask)
|
SwekeR-463/100kernels
|
day46/tlswiglu.py
|
https://github.com/SwekeR-463/100kernels/blob/80b7cea5a2b66f428380b2cb723147379f849f88/day46/tlswiglu.py
|
import triton
import triton.language as tl
import torch
@triton.jit
def swiglu_forward_kernel(
x_ptr,
y_ptr,
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load input tensor (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
# SwiGLU activation: SiLU(x_W) * x_V
swiglu = (x_W * tl.sigmoid(x_W)) * x_V
tl.store(y_ptr + offsets, swiglu, mask=mask)
@triton.jit
def swiglu_backward_kernel(
dy_ptr, # pointer to gradient of loss w.r.t. output
x_ptr, # pointer to input tensor
dx_ptr, # pointer to gradient of loss w.r.t. input
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load inputs (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0)
# compute SiLU and its derivative for x_W
sig = tl.sigmoid(x_W)
sig_derivative = sig * (1 - sig)
silu_grad = sig + x_W * sig_derivative
# compute gradients for x_W and x_V
dx_W = dy * x_V * silu_grad
dx_V = dy * (x_W * sig)
tl.store(dx_ptr + offsets, dx_W, mask=mask)
tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask)
def swiglu_forward(x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
y = torch.empty_like(x[..., :feature_dim])
grid = (triton.cdiv(n_elements, 1024),)
swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return y
def swiglu_backward(dy, x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
dx = torch.empty_like(x)
grid = (triton.cdiv(n_elements, 1024),)
swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return dx
class SwiGLUTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = swiglu_forward(x)
ctx.save_for_backward(x) # save input for backward pass
return y
@staticmethod
def backward(ctx, dy):
x, = ctx.saved_tensors
dx = swiglu_backward(dy, x)
return dx
class TritonSwiGLULayer(torch.nn.Module):
def forward(self, x):
return SwiGLUTriton.apply(x)
# test the implementation
x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64)
y = SwiGLUTriton.apply(x)
# backward test
dy = torch.ones_like(y) # assume dL/dy = 1
dx = torch.autograd.grad(y, x, grad_outputs=dy)[0]
print(y) # forward pass output
print(dx) # backward pass gradients
# gradient check
test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5)
print("Gradient check passed:", test) # True
|
@triton.jit
def swiglu_backward_kernel(
dy_ptr, # pointer to gradient of loss w.r.t. output
x_ptr, # pointer to input tensor
dx_ptr, # pointer to gradient of loss w.r.t. input
n_elements: tl.constexpr,
BLOCK_SIZE: tl.constexpr,
feature_dim: tl.constexpr # feature dimension (size of x_W and x_V)
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# load inputs (split into x_W and x_V)
x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0)
x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0)
dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0)
# compute SiLU and its derivative for x_W
sig = tl.sigmoid(x_W)
sig_derivative = sig * (1 - sig)
silu_grad = sig + x_W * sig_derivative
# compute gradients for x_W and x_V
dx_W = dy * x_V * silu_grad
dx_V = dy * (x_W * sig)
tl.store(dx_ptr + offsets, dx_W, mask=mask)
tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask)
def swiglu_forward(x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
y = torch.empty_like(x[..., :feature_dim])
grid = (triton.cdiv(n_elements, 1024),)
swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return y
def swiglu_backward(dy, x):
n_elements = x.numel() // 2 # output has half the feature size
feature_dim = x.shape[-1] // 2 # split input into two halves
dx = torch.empty_like(x)
grid = (triton.cdiv(n_elements, 1024),)
swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4)
return dx
class SwiGLUTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = swiglu_forward(x)
ctx.save_for_backward(x) # save input for backward pass
return y
@staticmethod
def backward(ctx, dy):
x, = ctx.saved_tensors
dx = swiglu_backward(dy, x)
return dx
class TritonSwiGLULayer(torch.nn.Module):
def forward(self, x):
return SwiGLUTriton.apply(x)
# test the implementation
x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64)
y = SwiGLUTriton.apply(x)
# backward test
dy = torch.ones_like(y) # assume dL/dy = 1
dx = torch.autograd.grad(y, x, grad_outputs=dy)[0]
print(y) # forward pass output
print(dx) # backward pass gradients
# gradient check
test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5)
print("Gradient check passed:", test) # True
|
lizelive/nix-ai
|
try/app.py
|
https://github.com/lizelive/nix-ai/blob/ef595ddb5694ccebae79d528fd83528135d3cb46/try/app.py
|
import triton
import triton.language as tl
BLOCK_SIZE = 1024
@triton.jit
def softmax(Y, stride_ym, stride_yn, X, stride_xm, stride_xn, M, N):
# row index
m = tl.program_id(0)
# col indices
# this specific kernel only works for matrices that
# have less than BLOCK_SIZE columns
n = tl.arange(0, BLOCK_SIZE)
# the memory address of all the elements
# that we want to load can be computed as follows
X = X + m * stride_xm + n * stride_xn
# load input data; pad out-of-bounds elements with 0
x = tl.load(X, mask=n < N, other=-float("inf"))
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
y = num / denom
# write back to Y
Y = Y + m * stride_ym + n * stride_yn
tl.store(Y, y, mask=n < N)
import torch
# Allocate input/output tensors
X = torch.normal(0, 1, size=(583, 931), device="cuda")
Y = torch.empty_like(X)
# SPMD launch grid
grid = (X.shape[0],)
# enqueue GPU kernel
softmax[grid](
Y, Y.stride(0), Y.stride(1), X, X.stride(0), X.stride(1), X.shape[0], X.shape[1]
)
|
@triton.jit
def softmax(Y, stride_ym, stride_yn, X, stride_xm, stride_xn, M, N):
# row index
m = tl.program_id(0)
# col indices
# this specific kernel only works for matrices that
# have less than BLOCK_SIZE columns
n = tl.arange(0, BLOCK_SIZE)
# the memory address of all the elements
# that we want to load can be computed as follows
X = X + m * stride_xm + n * stride_xn
# load input data; pad out-of-bounds elements with 0
x = tl.load(X, mask=n < N, other=-float("inf"))
# compute numerically-stable softmax
z = x - tl.max(x, axis=0)
num = tl.exp(z)
denom = tl.sum(num, axis=0)
y = num / denom
# write back to Y
Y = Y + m * stride_ym + n * stride_yn
tl.store(Y, y, mask=n < N)
import torch
# Allocate input/output tensors
X = torch.normal(0, 1, size=(583, 931), device="cuda")
Y = torch.empty_like(X)
# SPMD launch grid
grid = (X.shape[0],)
# enqueue GPU kernel
softmax[grid](
Y, Y.stride(0), Y.stride(1), X, X.stride(0), X.stride(1), X.shape[0], X.shape[1]
)
|
wantbook-book/EXAKAI
|
exact/exact/ops.py
|
https://github.com/wantbook-book/EXAKAI/blob/11d0c6cdd42b697c4163af44fce4d038a5593d11/exact/exact/ops.py
|
import pdb
import numpy as np
import triton
import triton.language as tl
import time
import torch
import torch.nn.functional as F
from torch.autograd.function import Function
from torch.cuda.amp import custom_fwd, custom_bwd
from torch_sparse import matmul
from .conf import config
import exact.cpp_extension.spmm as spmm
import exact.cpp_extension.backward_func as ext_backward_func
import exact.cpp_extension.quantization as ext_quantization
from exact.utils import empty_cache, get_memory_usage, compute_tensor_bytes, swap_to_cpu, cast_low_bit_int
# mn: minimum; mx: maximum
def quantize_and_pack(data, bits, mn, mx):
# simulate? what?
if config.simulate:
N = data.shape[0]
# output, data: (N, -1)
output = data
B = 2 ** bits - 1
# avoid mn equals mx?
mn = mn - 1e-6
mx = mx + 1e-6
scale = B / (mx - mn)
# broadcast
output = (output - mn.view(-1, 1)) * scale.view(-1, 1)
# add random noise
if config.stochastic:
noise = output.new(output.shape).uniform_(-0.5, 0.5)
output.add_(noise)
output = F.relu(output)
output = output.round_().int()
else:
# Pack to bitstream
assert type(bits) == int
pack_func = ext_quantization.pack_single_precision
scale = (2 ** bits - 1) / (mx - mn)
# output is just a packed data
output = pack_func(data, mn, mx, scale.to(data.dtype), bits, config.stochastic)
if config.swap:
output = swap_to_cpu(output)
return output, scale
def dequantize_and_unpack(data, shape, bits, scale, mn):
if config.simulate:
data = data / scale.view(-1, 1) + mn.view(-1, 1)
else:
# swap strategy can save gpu memory as well
# when data need be used, copy it from cpu to gpu
if config.swap:
data = data.cuda()
N = shape[0]
num_features = int(np.prod(shape[1:]))
# Unpack bitstream
assert type(bits) == int
unpack_func = ext_quantization.unpack_single_precision
# num_features is group_size parameter in cu function
# in pack_func, group_size is calculated from data.shape[1]
data = unpack_func(data, bits, scale, mn, N, num_features)
return data
def no_scheme_compute_quantization_bits(input):
N = input.shape[0]
input_flatten = input.view(N, -1)
# torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]),indices=tensor([3, 0, 0, 1]))
# [0] is the max or min result, Nx1
mn, mx = torch.min(input_flatten, 1)[0], torch.max(input_flatten, 1)[0]
b = config.activation_compression_bits[0]
return input_flatten, b, mn, mx
def quantize_activation(input, scheme):
if not config.compress_activation:
if config.swap:
input = swap_to_cpu(input)
return input, None, None, None
if scheme:
# input_groups: (N, -1)
input_groups, q_bits, q_min, mx = scheme.compute_quantization_bits(input)
else:
input_groups, q_bits, q_min, mx = no_scheme_compute_quantization_bits(input)
q_input, q_scale = quantize_and_pack(input_groups, q_bits, q_min, mx)
if input.dtype == torch.float32:
# bfloat16 is brain float16, have the same range as float32, but smaller precision
# 2 bytes is half-precision float, 4 bytes is single-precision float, 8 bytes is double-precision float
return q_input, q_bits, q_scale.to(torch.bfloat16), q_min.to(torch.bfloat16)
else:
return q_input, q_bits, q_scale, q_min
def dequantize_activation(quantized, q_input_shape):
if not config.compress_activation:
ret = quantized[0]
if config.swap:
ret = ret.cuda(non_blocking=True)
return ret
q_input, q_bits, q_scale, q_min = quantized
if q_scale.dtype == torch.bfloat16:
q_scale = q_scale.to(torch.float32)
q_min = q_min.to(torch.float32)
input = dequantize_and_unpack(q_input, q_input_shape, q_bits, q_scale, q_min)
return input.contiguous()
linear_layer_ct = 0
qmatmul_layer_ct = 0
bn_layer_ct = 0
total_act_mem = 0
GPU = 0
@triton.jit
def gen_rad_mat_kernel(rm_ptr,
sqrt_feat_size,
seed,
N:tl.constexpr,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
ptr_offs = pid*BLOCK_SIZE+tl.arange(0, BLOCK_SIZE)
rm_ptrs = rm_ptr + ptr_offs
mask = ptr_offs<N
# print('seed', seed)
seeds = seed+ptr_offs
rand_number = tl.randint(seeds, ptr_offs)
rand_number = rand_number.to(tl.uint32)
two = tl.full((BLOCK_SIZE,), 2, dtype=tl.uint32)
rand_number = rand_number%two
rand_number = rand_number.to(tl.float32)
rand_number = (rand_number*2-1)/sqrt_feat_size
tl.store(rm_ptrs, rand_number, mask=mask)
@torch.no_grad()
def input2rp(input, kept_acts):
assert len(input.size()) == 2
rand_mat_size = (input.shape[1], kept_acts)
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
seed = int(time.time()*1000)
gen_rad_mat_kernel[grid](
rand_mat, kept_acts**0.5,int(time.time()*1000), n_elements,BLOCK_SIZE=256
)
output = torch.matmul(input, rand_mat)
return output, rand_mat
@torch.no_grad()
def rp2input(input, input_shape, rand_mat):
assert len(input.size()) == 2
assert input.is_cuda and rand_mat.is_cuda
output = torch.matmul(input, rand_mat.t())
return output.view(input_shape)
@torch.no_grad()
def triton_seed_input2rp(input, kept_acts):
assert len(input.size()) == 2
rand_mat_size = (input.shape[1], kept_acts)
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
seed = int(time.time()*1000)
gen_rad_mat_kernel[grid](
rand_mat, kept_acts**0.5,seed, n_elements,BLOCK_SIZE=256
)
# print('='*20, 'regenerate rm', '='*20)
# print(rand_mat)
output = torch.matmul(input, rand_mat)
return output,seed, rand_mat_size
@torch.no_grad()
def triton_seed_rp2input(input, seed, rand_mat_size):
assert len(input.size()) == 2
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
gen_rad_mat_kernel[grid](
rand_mat, rand_mat_size[1]**0.5,seed, n_elements,BLOCK_SIZE=256
)
# print('='*20, 'regenerate rm', '='*20)
# print(rand_mat)
output = torch.matmul(input, rand_mat.t())
return output
# @torch.no_grad()
# def input2rp(input, kept_acts):
# # kept_acts is a number, R
# assert len(input.size()) == 2
# rand_mat_size = (input.shape[1], kept_acts)
# # Create random matrix
# def gen_rad_mat(rm_size, feat_size, device, dtype):
# # 2 is the high value, 0 is the low value.
# # 0-2 random int number to be generated
# bern = torch.randint(2, size=rm_size, device=device, requires_grad=False, dtype=dtype)
# # rad_mat @ rad_mat^T = I
# return (2.0 * bern - 1) / feat_size **0.5
# rand_matrix = gen_rad_mat(rand_mat_size, kept_acts, input.device, input.dtype)
# dim_reduced_input = torch.matmul(input, rand_matrix)
# return dim_reduced_input, rand_matrix
# @torch.no_grad()
# def rp2input(dim_reduced_input, input_shape, rand_matrix):
# assert len(dim_reduced_input.size()) == 2
# input = torch.matmul(dim_reduced_input, rand_matrix.t())
# return input.view(input_shape)
class qlinear(Function):
@staticmethod
# define the forward data type is float16
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, weight, bias=None, quantized=None, randmat=None, scheme=None, rp=True):
if quantized is not None:
# have been quantized?
if randmat is None:
assert rp is False or config.kept_frac == 1.0
# quantized somewhere before
ori_input_shape, proj_input_shape = input.shape, input.shape
else:
assert (rp is True and config.kept_frac < 1.0) and (input.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = input.shape, torch.Size([input.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
# kept_frac is used to calculate the size of random matrix
# compression ratio R/N
if config.kept_frac < 1.0 and rp:
kept_acts = int(config.kept_frac * input.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(input, kept_acts)
ori_input_shape, proj_input_shape = input.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = input, None
ori_input_shape, proj_input_shape = input.shape, input.shape
quantized = quantize_activation(dim_reduced_input, scheme)
# empty cache if used mem / allocated mem < threshold ratio
empty_cache(config.empty_cache_threshold)
ctx.scheme = scheme
ctx.saved = quantized, weight, bias, randmat
ctx.other_args = ori_input_shape, proj_input_shape, rp
res = F.linear(input, weight, bias)
return res
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
quantized, weight, bias, randmat = ctx.saved
ori_input_shape, q_input_shape, rp = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0 and rp:
input = rp2input(input, ori_input_shape, randmat)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# use cuda linear_backward to speed up
grad_input, grad_weight, grad_bias = ext_backward_func.linear_backward(grad_output, input, weight, bias)
# grad_input = grad_output.mm(weight)
# grad_weight = grad_output.t().mm(input)
# if bias is not None:
# grad_bias = grad_output.sum(0)
# else:
# grad_bias = None
del input, grad_output
empty_cache(config.empty_cache_threshold)
return grad_input, grad_weight, grad_bias, None, None, None, None
# only quantized, no rp
class qelu(Function):
@staticmethod
def forward(ctx, input, alpha, scheme=None):
quantized = quantize_activation(input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.scheme = scheme
ctx.saved = quantized
ctx.other_args = input.shape, alpha
res = F.elu(input, alpha)
return res
@staticmethod
def backward(ctx, grad_output):
# if ctx.scheme:
# ctx.scheme.set_scale(grad_output)
quantized = ctx.saved
q_input_shape, alpha = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_input = ext_backward_func._elu_backward_cuda(grad_output, input, alpha)
return grad_input, None, None
class qbatch_norm(Function):
@staticmethod
def forward(ctx, input, running_mean, running_var, weight, bias,
training, exponential_average_factor, eps, scheme):
quantized = quantize_activation(input, scheme)
if training:
output, save_mean, save_var, reserve, _ = ext_backward_func._batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps, True)
else:
output, save_mean, save_var = ext_backward_func.native_batch_norm(
input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps)
reserve = None
# output, save_mean, save_var = ext_backward_func.native_batch_norm(
# input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps)
# reserve = None
ctx.scheme = scheme
ctx.other_args = input.shape
ctx.saved = (quantized, weight, running_mean, running_var, save_mean, save_var, training, eps, reserve)
return output
@staticmethod
def backward(ctx, grad_output):
quantized, weight, running_mean, running_var, save_mean, save_var, training, eps, reserve = ctx.saved
q_input_shape = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# if training:
# input = input.contiguous()
# grad_input, grad_weight, grad_bias = ext_backward_func.cudnn_batch_norm_backward(
# input, grad_output, weight, running_mean, running_var, save_mean, save_var, eps, reserve)
# else:
# grad_input, grad_weight, grad_bias = ext_backward_func.native_batch_norm_backward(
# grad_output, input, weight, running_mean, running_var, save_mean, save_var, training, eps,
# [ctx.needs_input_grad[0], ctx.needs_input_grad[3], ctx.needs_input_grad[4]]
# )
grad_input, grad_weight, grad_bias = ext_backward_func.native_batch_norm_backward(
grad_output, input, weight, running_mean, running_var, save_mean, save_var, training, eps,
[ctx.needs_input_grad[0], ctx.needs_input_grad[3], ctx.needs_input_grad[4]]
)
return grad_input, None, None, grad_weight, grad_bias, None, None, None, None
class qspmm_sum(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, row, rowptr, col, value, colptr, csr2csc, has_value, other, quantized=None, randmat=None, scheme=None):
result = spmm.spmm_sum_fw(row, rowptr, col, value, colptr, csr2csc, other)
if quantized is not None:
if randmat is None:
assert config.kept_frac == 1.0
# quantized somewhere before
ori_input_shape, proj_input_shape = other.shape, other.shape
else:
assert config.kept_frac < 1.0 and (other.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = other.shape, torch.Size([other.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
if config.kept_frac < 1.0:
kept_acts = int(config.kept_frac * other.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(other, kept_acts)
ori_input_shape, proj_input_shape = other.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = other, None
ori_input_shape, proj_input_shape = other.shape, other.shape
quantized = quantize_activation(dim_reduced_input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.saved = row, rowptr, col, value, colptr, csr2csc, quantized, randmat
ctx.other_args = has_value, ori_input_shape, proj_input_shape, value.requires_grad if has_value else False, other.requires_grad
ctx.scheme = scheme
return result
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
row, rowptr, col, value, colptr, csr2csc, quantized, randmat = ctx.saved
row = col if row is None else row
value = col if value is None else value
colptr = col if colptr is None else colptr
csr2csc = col if csr2csc is None else csr2csc
has_value, ori_input_shape, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0:
other = rp2input(other, ori_input_shape, randmat)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_sum_bw(row, rowptr, col, value, colptr, csr2csc, other, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
del other
empty_cache(config.empty_cache_threshold)
return None, None, None, grad_value, None, None, None, grad_mat, None, None, None
class qspmm_mean(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, row, rowptr, col, value, rowcount, colptr, csr2csc, has_value, other,
quantized=None, randmat=None, scheme=None):
result = spmm.spmm_mean_fw(row, rowptr, col, value, rowcount, colptr, csr2csc, other)
if quantized is not None:
if randmat is None:
assert config.kept_frac == 1.0
# quantized somewhere before without random projection
ori_input_shape, proj_input_shape = other.shape, other.shape
else:
assert config.kept_frac < 1.0 and (other.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = other.shape, torch.Size([other.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
if config.kept_frac < 1.0:
kept_acts = int(config.kept_frac * other.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(other, kept_acts)
ori_input_shape, proj_input_shape = other.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = other, None
ori_input_shape, proj_input_shape = other.shape, other.shape
quantized = quantize_activation(dim_reduced_input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.saved = row, rowptr, col, value, rowcount, colptr, csr2csc, quantized, randmat
ctx.other_args = has_value, ori_input_shape, proj_input_shape, value.requires_grad if has_value else False, other.requires_grad
ctx.scheme = scheme
return result
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
row, rowptr, col, value, rowcount, colptr, csr2csc, quantized, randmat = ctx.saved
row = col if row is None else row
value = col if value is None else value
rowcount = col if rowcount is None else rowcount
colptr = col if colptr is None else colptr
csr2csc = col if csr2csc is None else csr2csc
has_value, ori_input_shape, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
# here is one ugly trick: if we know value does not need gradient,
# we actually do not need the ``other'' matrix to calculate the gradient.
# So here we just pass a dummy matrix to the CUDA kernel.
# TODO: engineering optimization.
if value_requires_grad:
other = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0:
other = rp2input(other, ori_input_shape, randmat)
else:
if quantized[2].dtype == torch.bfloat16:
dtype = torch.float
else:
dtype = quantized[2].dtype
other = torch.tensor([1.], dtype=dtype, device=quantized[2].device)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_mean_bw(row, rowptr, col, value, rowcount, colptr, csr2csc, other, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
del other
empty_cache(config.empty_cache_threshold)
return None, None, None, grad_value, None, None, None, None, grad_mat, None, None, None
class qspmm_max(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, rowptr, col, value, has_value, other,
quantized=None, randmat=None, scheme=None):
output, arg_out = spmm.spmm_max_fw(rowptr, col, value, other)
if quantized is None:
quantized = quantize_activation(other, scheme)
else:
assert isinstance(quantized, tuple)
empty_cache(config.empty_cache_threshold)
ctx.saved = col, value, quantized, arg_out
ctx.other_args = has_value, other.shape, value.requires_grad if has_value else False, other.requires_grad
ctx.mark_non_differentiable(arg_out)
ctx.scheme = scheme
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
col, value, quantized, arg_out = ctx.saved
value = col if value is None else value
has_value, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_max_bw(col, value, other, arg_out, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
return None, None, grad_value, None, grad_mat, None, None, None
class qspmm_min(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, rowptr, col, value, has_value, other,
quantized=None, randmat=None, scheme=None):
output, arg_out = spmm.spmm_min_fw(rowptr, col, value, other)
if quantized is None:
quantized = quantize_activation(other, scheme)
else:
assert isinstance(quantized, tuple)
empty_cache(config.empty_cache_threshold)
ctx.saved = col, value, quantized, arg_out
ctx.other_args = has_value, other.shape, value.requires_grad if has_value else False, other.requires_grad
ctx.mark_non_differentiable(arg_out)
ctx.scheme = scheme
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
col, value, quantized, arg_out = ctx.saved
value = col if value is None else value
has_value, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# if ctx.scheme:
# ctx.scheme.set_scale(grad_outputs)
grad_value, grad_mat = spmm.spmm_min_bw(col, value, other, arg_out, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
return None, None, grad_value, None, grad_mat, None, None, None
|
@triton.jit
def gen_rad_mat_kernel(rm_ptr,
sqrt_feat_size,
seed,
N:tl.constexpr,
BLOCK_SIZE: tl.constexpr):
pid = tl.program_id(0)
ptr_offs = pid*BLOCK_SIZE+tl.arange(0, BLOCK_SIZE)
rm_ptrs = rm_ptr + ptr_offs
mask = ptr_offs<N
# print('seed', seed)
seeds = seed+ptr_offs
rand_number = tl.randint(seeds, ptr_offs)
rand_number = rand_number.to(tl.uint32)
two = tl.full((BLOCK_SIZE,), 2, dtype=tl.uint32)
rand_number = rand_number%two
rand_number = rand_number.to(tl.float32)
rand_number = (rand_number*2-1)/sqrt_feat_size
tl.store(rm_ptrs, rand_number, mask=mask)
@torch.no_grad()
def input2rp(input, kept_acts):
assert len(input.size()) == 2
rand_mat_size = (input.shape[1], kept_acts)
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
seed = int(time.time()*1000)
gen_rad_mat_kernel[grid](
rand_mat, kept_acts**0.5,int(time.time()*1000), n_elements,BLOCK_SIZE=256
)
output = torch.matmul(input, rand_mat)
return output, rand_mat
@torch.no_grad()
def rp2input(input, input_shape, rand_mat):
assert len(input.size()) == 2
assert input.is_cuda and rand_mat.is_cuda
output = torch.matmul(input, rand_mat.t())
return output.view(input_shape)
@torch.no_grad()
def triton_seed_input2rp(input, kept_acts):
assert len(input.size()) == 2
rand_mat_size = (input.shape[1], kept_acts)
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
seed = int(time.time()*1000)
gen_rad_mat_kernel[grid](
rand_mat, kept_acts**0.5,seed, n_elements,BLOCK_SIZE=256
)
# print('='*20, 'regenerate rm', '='*20)
# print(rand_mat)
output = torch.matmul(input, rand_mat)
return output,seed, rand_mat_size
@torch.no_grad()
def triton_seed_rp2input(input, seed, rand_mat_size):
assert len(input.size()) == 2
rand_mat = torch.empty(rand_mat_size, dtype=torch.float32).cuda()
assert rand_mat.is_cuda and input.is_cuda
assert rand_mat.is_contiguous and input.is_contiguous
n_elements = rand_mat.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
gen_rad_mat_kernel[grid](
rand_mat, rand_mat_size[1]**0.5,seed, n_elements,BLOCK_SIZE=256
)
# print('='*20, 'regenerate rm', '='*20)
# print(rand_mat)
output = torch.matmul(input, rand_mat.t())
return output
# @torch.no_grad()
# def input2rp(input, kept_acts):
# # kept_acts is a number, R
# assert len(input.size()) == 2
# rand_mat_size = (input.shape[1], kept_acts)
# # Create random matrix
# def gen_rad_mat(rm_size, feat_size, device, dtype):
# # 2 is the high value, 0 is the low value.
# # 0-2 random int number to be generated
# bern = torch.randint(2, size=rm_size, device=device, requires_grad=False, dtype=dtype)
# # rad_mat @ rad_mat^T = I
# return (2.0 * bern - 1) / feat_size **0.5
# rand_matrix = gen_rad_mat(rand_mat_size, kept_acts, input.device, input.dtype)
# dim_reduced_input = torch.matmul(input, rand_matrix)
# return dim_reduced_input, rand_matrix
# @torch.no_grad()
# def rp2input(dim_reduced_input, input_shape, rand_matrix):
# assert len(dim_reduced_input.size()) == 2
# input = torch.matmul(dim_reduced_input, rand_matrix.t())
# return input.view(input_shape)
class qlinear(Function):
@staticmethod
# define the forward data type is float16
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, input, weight, bias=None, quantized=None, randmat=None, scheme=None, rp=True):
if quantized is not None:
# have been quantized?
if randmat is None:
assert rp is False or config.kept_frac == 1.0
# quantized somewhere before
ori_input_shape, proj_input_shape = input.shape, input.shape
else:
assert (rp is True and config.kept_frac < 1.0) and (input.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = input.shape, torch.Size([input.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
# kept_frac is used to calculate the size of random matrix
# compression ratio R/N
if config.kept_frac < 1.0 and rp:
kept_acts = int(config.kept_frac * input.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(input, kept_acts)
ori_input_shape, proj_input_shape = input.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = input, None
ori_input_shape, proj_input_shape = input.shape, input.shape
quantized = quantize_activation(dim_reduced_input, scheme)
# empty cache if used mem / allocated mem < threshold ratio
empty_cache(config.empty_cache_threshold)
ctx.scheme = scheme
ctx.saved = quantized, weight, bias, randmat
ctx.other_args = ori_input_shape, proj_input_shape, rp
res = F.linear(input, weight, bias)
return res
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
quantized, weight, bias, randmat = ctx.saved
ori_input_shape, q_input_shape, rp = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0 and rp:
input = rp2input(input, ori_input_shape, randmat)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# use cuda linear_backward to speed up
grad_input, grad_weight, grad_bias = ext_backward_func.linear_backward(grad_output, input, weight, bias)
# grad_input = grad_output.mm(weight)
# grad_weight = grad_output.t().mm(input)
# if bias is not None:
# grad_bias = grad_output.sum(0)
# else:
# grad_bias = None
del input, grad_output
empty_cache(config.empty_cache_threshold)
return grad_input, grad_weight, grad_bias, None, None, None, None
# only quantized, no rp
class qelu(Function):
@staticmethod
def forward(ctx, input, alpha, scheme=None):
quantized = quantize_activation(input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.scheme = scheme
ctx.saved = quantized
ctx.other_args = input.shape, alpha
res = F.elu(input, alpha)
return res
@staticmethod
def backward(ctx, grad_output):
# if ctx.scheme:
# ctx.scheme.set_scale(grad_output)
quantized = ctx.saved
q_input_shape, alpha = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_input = ext_backward_func._elu_backward_cuda(grad_output, input, alpha)
return grad_input, None, None
class qbatch_norm(Function):
@staticmethod
def forward(ctx, input, running_mean, running_var, weight, bias,
training, exponential_average_factor, eps, scheme):
quantized = quantize_activation(input, scheme)
if training:
output, save_mean, save_var, reserve, _ = ext_backward_func._batch_norm_impl_index(input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps, True)
else:
output, save_mean, save_var = ext_backward_func.native_batch_norm(
input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps)
reserve = None
# output, save_mean, save_var = ext_backward_func.native_batch_norm(
# input, weight, bias, running_mean, running_var, training, exponential_average_factor, eps)
# reserve = None
ctx.scheme = scheme
ctx.other_args = input.shape
ctx.saved = (quantized, weight, running_mean, running_var, save_mean, save_var, training, eps, reserve)
return output
@staticmethod
def backward(ctx, grad_output):
quantized, weight, running_mean, running_var, save_mean, save_var, training, eps, reserve = ctx.saved
q_input_shape = ctx.other_args
input = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# if training:
# input = input.contiguous()
# grad_input, grad_weight, grad_bias = ext_backward_func.cudnn_batch_norm_backward(
# input, grad_output, weight, running_mean, running_var, save_mean, save_var, eps, reserve)
# else:
# grad_input, grad_weight, grad_bias = ext_backward_func.native_batch_norm_backward(
# grad_output, input, weight, running_mean, running_var, save_mean, save_var, training, eps,
# [ctx.needs_input_grad[0], ctx.needs_input_grad[3], ctx.needs_input_grad[4]]
# )
grad_input, grad_weight, grad_bias = ext_backward_func.native_batch_norm_backward(
grad_output, input, weight, running_mean, running_var, save_mean, save_var, training, eps,
[ctx.needs_input_grad[0], ctx.needs_input_grad[3], ctx.needs_input_grad[4]]
)
return grad_input, None, None, grad_weight, grad_bias, None, None, None, None
class qspmm_sum(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, row, rowptr, col, value, colptr, csr2csc, has_value, other, quantized=None, randmat=None, scheme=None):
result = spmm.spmm_sum_fw(row, rowptr, col, value, colptr, csr2csc, other)
if quantized is not None:
if randmat is None:
assert config.kept_frac == 1.0
# quantized somewhere before
ori_input_shape, proj_input_shape = other.shape, other.shape
else:
assert config.kept_frac < 1.0 and (other.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = other.shape, torch.Size([other.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
if config.kept_frac < 1.0:
kept_acts = int(config.kept_frac * other.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(other, kept_acts)
ori_input_shape, proj_input_shape = other.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = other, None
ori_input_shape, proj_input_shape = other.shape, other.shape
quantized = quantize_activation(dim_reduced_input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.saved = row, rowptr, col, value, colptr, csr2csc, quantized, randmat
ctx.other_args = has_value, ori_input_shape, proj_input_shape, value.requires_grad if has_value else False, other.requires_grad
ctx.scheme = scheme
return result
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
row, rowptr, col, value, colptr, csr2csc, quantized, randmat = ctx.saved
row = col if row is None else row
value = col if value is None else value
colptr = col if colptr is None else colptr
csr2csc = col if csr2csc is None else csr2csc
has_value, ori_input_shape, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0:
other = rp2input(other, ori_input_shape, randmat)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_sum_bw(row, rowptr, col, value, colptr, csr2csc, other, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
del other
empty_cache(config.empty_cache_threshold)
return None, None, None, grad_value, None, None, None, grad_mat, None, None, None
class qspmm_mean(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, row, rowptr, col, value, rowcount, colptr, csr2csc, has_value, other,
quantized=None, randmat=None, scheme=None):
result = spmm.spmm_mean_fw(row, rowptr, col, value, rowcount, colptr, csr2csc, other)
if quantized is not None:
if randmat is None:
assert config.kept_frac == 1.0
# quantized somewhere before without random projection
ori_input_shape, proj_input_shape = other.shape, other.shape
else:
assert config.kept_frac < 1.0 and (other.shape[1] == randmat.shape[0])
ori_input_shape, proj_input_shape = other.shape, torch.Size([other.shape[0], randmat.shape[1]])
# this is quantized random projected data
else:
if config.kept_frac < 1.0:
kept_acts = int(config.kept_frac * other.shape[1] + 0.999)
dim_reduced_input, randmat = input2rp(other, kept_acts)
ori_input_shape, proj_input_shape = other.shape, dim_reduced_input.shape
else:
dim_reduced_input, randmat = other, None
ori_input_shape, proj_input_shape = other.shape, other.shape
quantized = quantize_activation(dim_reduced_input, scheme)
empty_cache(config.empty_cache_threshold)
ctx.saved = row, rowptr, col, value, rowcount, colptr, csr2csc, quantized, randmat
ctx.other_args = has_value, ori_input_shape, proj_input_shape, value.requires_grad if has_value else False, other.requires_grad
ctx.scheme = scheme
return result
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
row, rowptr, col, value, rowcount, colptr, csr2csc, quantized, randmat = ctx.saved
row = col if row is None else row
value = col if value is None else value
rowcount = col if rowcount is None else rowcount
colptr = col if colptr is None else colptr
csr2csc = col if csr2csc is None else csr2csc
has_value, ori_input_shape, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
# here is one ugly trick: if we know value does not need gradient,
# we actually do not need the ``other'' matrix to calculate the gradient.
# So here we just pass a dummy matrix to the CUDA kernel.
# TODO: engineering optimization.
if value_requires_grad:
other = dequantize_activation(quantized, q_input_shape)
if config.kept_frac < 1.0:
other = rp2input(other, ori_input_shape, randmat)
else:
if quantized[2].dtype == torch.bfloat16:
dtype = torch.float
else:
dtype = quantized[2].dtype
other = torch.tensor([1.], dtype=dtype, device=quantized[2].device)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_mean_bw(row, rowptr, col, value, rowcount, colptr, csr2csc, other, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
del other
empty_cache(config.empty_cache_threshold)
return None, None, None, grad_value, None, None, None, None, grad_mat, None, None, None
class qspmm_max(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, rowptr, col, value, has_value, other,
quantized=None, randmat=None, scheme=None):
output, arg_out = spmm.spmm_max_fw(rowptr, col, value, other)
if quantized is None:
quantized = quantize_activation(other, scheme)
else:
assert isinstance(quantized, tuple)
empty_cache(config.empty_cache_threshold)
ctx.saved = col, value, quantized, arg_out
ctx.other_args = has_value, other.shape, value.requires_grad if has_value else False, other.requires_grad
ctx.mark_non_differentiable(arg_out)
ctx.scheme = scheme
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
col, value, quantized, arg_out = ctx.saved
value = col if value is None else value
has_value, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
grad_value, grad_mat = spmm.spmm_max_bw(col, value, other, arg_out, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
return None, None, grad_value, None, grad_mat, None, None, None
class qspmm_min(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float16)
def forward(ctx, rowptr, col, value, has_value, other,
quantized=None, randmat=None, scheme=None):
output, arg_out = spmm.spmm_min_fw(rowptr, col, value, other)
if quantized is None:
quantized = quantize_activation(other, scheme)
else:
assert isinstance(quantized, tuple)
empty_cache(config.empty_cache_threshold)
ctx.saved = col, value, quantized, arg_out
ctx.other_args = has_value, other.shape, value.requires_grad if has_value else False, other.requires_grad
ctx.mark_non_differentiable(arg_out)
ctx.scheme = scheme
return output
@staticmethod
@custom_bwd
def backward(ctx, grad_outputs):
col, value, quantized, arg_out = ctx.saved
value = col if value is None else value
has_value, q_input_shape, value_requires_grad, mat_requires_grad = ctx.other_args
other = dequantize_activation(quantized, q_input_shape)
del quantized, ctx.saved
empty_cache(config.empty_cache_threshold)
# if ctx.scheme:
# ctx.scheme.set_scale(grad_outputs)
grad_value, grad_mat = spmm.spmm_min_bw(col, value, other, arg_out, grad_outputs,
has_value, value_requires_grad, mat_requires_grad)
return None, None, grad_value, None, grad_mat, None, None, None
|
lingeringlight/START
|
models/csm_triton.py
|
https://github.com/lingeringlight/START/blob/898ac4c3a64ac440f5550b3796cc1e876c24bbc2/models/csm_triton.py
|
# triton cross scan, 2x speed than pytorch implementation =========================
import torch
import triton
import triton.language as tl
@triton.jit
def triton_cross_scan(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_x = tl.load(p_x + _idx, mask=_mask_hw)
tl.store(p_y1 + _idx, _x, mask=_mask_hw)
tl.store(p_y2 + _idx, _x, mask=_mask_hw)
tl.store(p_y3 + _idx, _x, mask=_mask_hw)
tl.store(p_y4 + _idx, _x, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_y1 = tl.load(p_y1 + _idx, mask=_mask_hw)
_y2 = tl.load(p_y2 + _idx, mask=_mask_hw)
_y3 = tl.load(p_y3 + _idx, mask=_mask_hw)
_y4 = tl.load(p_y4 + _idx, mask=_mask_hw)
tl.store(p_x + _idx, _y1 + _y2 + _y3 + _y4, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_scan_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_y1 + _idx, tl.load(p_x1 + _idx), mask=_mask_hw)
tl.store(p_y2 + _idx, tl.load(p_x2 + _idx), mask=_mask_hw)
tl.store(p_y3 + _idx, tl.load(p_x3 + _idx), mask=_mask_hw)
tl.store(p_y4 + _idx, tl.load(p_x4 + _idx), mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_x1 + _idx, tl.load(p_y1 + _idx), mask=_mask_hw)
tl.store(p_x2 + _idx, tl.load(p_y2 + _idx), mask=_mask_hw)
tl.store(p_x3 + _idx, tl.load(p_y3 + _idx), mask=_mask_hw)
tl.store(p_x4 + _idx, tl.load(p_y4 + _idx), mask=_mask_hw)
tl.debug_barrier()
class CrossScanTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
class CrossMergeTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, y: torch.Tensor):
B, K, C, H, W = y.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x.view(B, C, -1)
@staticmethod
def backward(ctx, x: torch.Tensor):
# out: (b, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y
class CrossScanTriton1b1(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, K, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, 4, C, H, W))
triton_cross_merge_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
|
@triton.jit
def triton_cross_scan(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_x = tl.load(p_x + _idx, mask=_mask_hw)
tl.store(p_y1 + _idx, _x, mask=_mask_hw)
tl.store(p_y2 + _idx, _x, mask=_mask_hw)
tl.store(p_y3 + _idx, _x, mask=_mask_hw)
tl.store(p_y4 + _idx, _x, mask=_mask_hw)
tl.debug_barrier()
|
lingeringlight/START
|
models/csm_triton.py
|
https://github.com/lingeringlight/START/blob/898ac4c3a64ac440f5550b3796cc1e876c24bbc2/models/csm_triton.py
|
# triton cross scan, 2x speed than pytorch implementation =========================
import torch
import triton
import triton.language as tl
@triton.jit
def triton_cross_scan(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_x = tl.load(p_x + _idx, mask=_mask_hw)
tl.store(p_y1 + _idx, _x, mask=_mask_hw)
tl.store(p_y2 + _idx, _x, mask=_mask_hw)
tl.store(p_y3 + _idx, _x, mask=_mask_hw)
tl.store(p_y4 + _idx, _x, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_y1 = tl.load(p_y1 + _idx, mask=_mask_hw)
_y2 = tl.load(p_y2 + _idx, mask=_mask_hw)
_y3 = tl.load(p_y3 + _idx, mask=_mask_hw)
_y4 = tl.load(p_y4 + _idx, mask=_mask_hw)
tl.store(p_x + _idx, _y1 + _y2 + _y3 + _y4, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_scan_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_y1 + _idx, tl.load(p_x1 + _idx), mask=_mask_hw)
tl.store(p_y2 + _idx, tl.load(p_x2 + _idx), mask=_mask_hw)
tl.store(p_y3 + _idx, tl.load(p_x3 + _idx), mask=_mask_hw)
tl.store(p_y4 + _idx, tl.load(p_x4 + _idx), mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_x1 + _idx, tl.load(p_y1 + _idx), mask=_mask_hw)
tl.store(p_x2 + _idx, tl.load(p_y2 + _idx), mask=_mask_hw)
tl.store(p_x3 + _idx, tl.load(p_y3 + _idx), mask=_mask_hw)
tl.store(p_x4 + _idx, tl.load(p_y4 + _idx), mask=_mask_hw)
tl.debug_barrier()
class CrossScanTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
class CrossMergeTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, y: torch.Tensor):
B, K, C, H, W = y.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x.view(B, C, -1)
@staticmethod
def backward(ctx, x: torch.Tensor):
# out: (b, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y
class CrossScanTriton1b1(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, K, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, 4, C, H, W))
triton_cross_merge_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
|
@triton.jit
def triton_cross_merge(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_y1 = tl.load(p_y1 + _idx, mask=_mask_hw)
_y2 = tl.load(p_y2 + _idx, mask=_mask_hw)
_y3 = tl.load(p_y3 + _idx, mask=_mask_hw)
_y4 = tl.load(p_y4 + _idx, mask=_mask_hw)
tl.store(p_x + _idx, _y1 + _y2 + _y3 + _y4, mask=_mask_hw)
tl.debug_barrier()
|
lingeringlight/START
|
models/csm_triton.py
|
https://github.com/lingeringlight/START/blob/898ac4c3a64ac440f5550b3796cc1e876c24bbc2/models/csm_triton.py
|
# triton cross scan, 2x speed than pytorch implementation =========================
import torch
import triton
import triton.language as tl
@triton.jit
def triton_cross_scan(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_x = tl.load(p_x + _idx, mask=_mask_hw)
tl.store(p_y1 + _idx, _x, mask=_mask_hw)
tl.store(p_y2 + _idx, _x, mask=_mask_hw)
tl.store(p_y3 + _idx, _x, mask=_mask_hw)
tl.store(p_y4 + _idx, _x, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_y1 = tl.load(p_y1 + _idx, mask=_mask_hw)
_y2 = tl.load(p_y2 + _idx, mask=_mask_hw)
_y3 = tl.load(p_y3 + _idx, mask=_mask_hw)
_y4 = tl.load(p_y4 + _idx, mask=_mask_hw)
tl.store(p_x + _idx, _y1 + _y2 + _y3 + _y4, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_scan_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_y1 + _idx, tl.load(p_x1 + _idx), mask=_mask_hw)
tl.store(p_y2 + _idx, tl.load(p_x2 + _idx), mask=_mask_hw)
tl.store(p_y3 + _idx, tl.load(p_x3 + _idx), mask=_mask_hw)
tl.store(p_y4 + _idx, tl.load(p_x4 + _idx), mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_x1 + _idx, tl.load(p_y1 + _idx), mask=_mask_hw)
tl.store(p_x2 + _idx, tl.load(p_y2 + _idx), mask=_mask_hw)
tl.store(p_x3 + _idx, tl.load(p_y3 + _idx), mask=_mask_hw)
tl.store(p_x4 + _idx, tl.load(p_y4 + _idx), mask=_mask_hw)
tl.debug_barrier()
class CrossScanTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
class CrossMergeTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, y: torch.Tensor):
B, K, C, H, W = y.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x.view(B, C, -1)
@staticmethod
def backward(ctx, x: torch.Tensor):
# out: (b, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y
class CrossScanTriton1b1(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, K, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, 4, C, H, W))
triton_cross_merge_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
|
@triton.jit
def triton_cross_scan_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_y1 + _idx, tl.load(p_x1 + _idx), mask=_mask_hw)
tl.store(p_y2 + _idx, tl.load(p_x2 + _idx), mask=_mask_hw)
tl.store(p_y3 + _idx, tl.load(p_x3 + _idx), mask=_mask_hw)
tl.store(p_y4 + _idx, tl.load(p_x4 + _idx), mask=_mask_hw)
tl.debug_barrier()
|
lingeringlight/START
|
models/csm_triton.py
|
https://github.com/lingeringlight/START/blob/898ac4c3a64ac440f5550b3796cc1e876c24bbc2/models/csm_triton.py
|
# triton cross scan, 2x speed than pytorch implementation =========================
import torch
import triton
import triton.language as tl
@triton.jit
def triton_cross_scan(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_x = tl.load(p_x + _idx, mask=_mask_hw)
tl.store(p_y1 + _idx, _x, mask=_mask_hw)
tl.store(p_y2 + _idx, _x, mask=_mask_hw)
tl.store(p_y3 + _idx, _x, mask=_mask_hw)
tl.store(p_y4 + _idx, _x, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_x = x + i_b * _tmp1 + _tmp2
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
for idxc in range(_for_C):
_idx = idxc * DH * DW
_y1 = tl.load(p_y1 + _idx, mask=_mask_hw)
_y2 = tl.load(p_y2 + _idx, mask=_mask_hw)
_y3 = tl.load(p_y3 + _idx, mask=_mask_hw)
_y4 = tl.load(p_y4 + _idx, mask=_mask_hw)
tl.store(p_x + _idx, _y1 + _y2 + _y3 + _y4, mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_scan_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_y1 + _idx, tl.load(p_x1 + _idx), mask=_mask_hw)
tl.store(p_y2 + _idx, tl.load(p_x2 + _idx), mask=_mask_hw)
tl.store(p_y3 + _idx, tl.load(p_x3 + _idx), mask=_mask_hw)
tl.store(p_y4 + _idx, tl.load(p_x4 + _idx), mask=_mask_hw)
tl.debug_barrier()
@triton.jit
def triton_cross_merge_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_x1 + _idx, tl.load(p_y1 + _idx), mask=_mask_hw)
tl.store(p_x2 + _idx, tl.load(p_y2 + _idx), mask=_mask_hw)
tl.store(p_x3 + _idx, tl.load(p_y3 + _idx), mask=_mask_hw)
tl.store(p_x4 + _idx, tl.load(p_y4 + _idx), mask=_mask_hw)
tl.debug_barrier()
class CrossScanTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
class CrossMergeTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, y: torch.Tensor):
B, K, C, H, W = y.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x.view(B, C, -1)
@staticmethod
def backward(ctx, x: torch.Tensor):
# out: (b, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y
class CrossScanTriton1b1(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, K, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, 4, C, H, W))
triton_cross_merge_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
|
@triton.jit
def triton_cross_merge_1b1(
x, # (B, C, H, W)
y, # (B, 4, C, H, W)
BC: tl.constexpr,
BH: tl.constexpr,
BW: tl.constexpr,
DC: tl.constexpr,
DH: tl.constexpr,
DW: tl.constexpr,
NH: tl.constexpr,
NW: tl.constexpr,
):
i_hw, i_c, i_b = tl.program_id(0), tl.program_id(1), tl.program_id(2)
i_h, i_w = (i_hw // NW), (i_hw % NW)
_mask_h = (i_h * BH + tl.arange(0, BH)) < DH
_mask_w = (i_w * BW + tl.arange(0, BW)) < DW
_mask_hw = _mask_h[:, None] & _mask_w[None, :]
_for_C = min(DC - i_c * BC, BC)
_tmp0 = i_c * BC * DH * DW
_tmp1 = DC * DH * DW
_tmp2 = _tmp0 + i_h * BH * DW + tl.arange(0, BH)[:, None] * DW + i_w * BW + tl.arange(0, BW)[None, :]
p_y1 = y + i_b * 4 * _tmp1 + _tmp2 # same
p_y2 = y + i_b * 4 * _tmp1 + _tmp1 + _tmp0 + i_w * BW * DH + tl.arange(0, BW)[None, :] * DH + i_h * BH + tl.arange(
0, BH)[:, None] # trans
p_y3 = y + i_b * 4 * _tmp1 + 2 * _tmp1 + _tmp0 + (NH - i_h - 1) * BH * DW + (
BH - 1 - tl.arange(0, BH)[:, None]) * DW + (NW - i_w - 1) * BW + (
BW - 1 - tl.arange(0, BW)[None, :]) + (DH - NH * BH) * DW + (DW - NW * BW) # flip
p_y4 = y + i_b * 4 * _tmp1 + 3 * _tmp1 + _tmp0 + (NW - i_w - 1) * BW * DH + (
BW - 1 - tl.arange(0, BW)[None, :]) * DH + (NH - i_h - 1) * BH + (
BH - 1 - tl.arange(0, BH)[:, None]) + (DH - NH * BH) + (DW - NW * BW) * DH # trans + flip
p_x1 = x + i_b * 4 * _tmp1 + _tmp2
p_x2 = p_x1 + _tmp1
p_x3 = p_x2 + _tmp1
p_x4 = p_x3 + _tmp1
for idxc in range(_for_C):
_idx = idxc * DH * DW
tl.store(p_x1 + _idx, tl.load(p_y1 + _idx), mask=_mask_hw)
tl.store(p_x2 + _idx, tl.load(p_y2 + _idx), mask=_mask_hw)
tl.store(p_x3 + _idx, tl.load(p_y3 + _idx), mask=_mask_hw)
tl.store(p_x4 + _idx, tl.load(p_y4 + _idx), mask=_mask_hw)
tl.debug_barrier()
class CrossScanTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
class CrossMergeTriton(torch.autograd.Function):
@staticmethod
def forward(ctx, y: torch.Tensor):
B, K, C, H, W = y.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, C, H, W))
triton_cross_merge[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x.view(B, C, -1)
@staticmethod
def backward(ctx, x: torch.Tensor):
# out: (b, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y
class CrossScanTriton1b1(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.Tensor):
B, K, C, H, W = x.shape
B, C, H, W = int(B), int(C), int(H), int(W)
BC, BH, BW = min(triton.next_power_of_2(C), 1), min(triton.next_power_of_2(H), 64), min(
triton.next_power_of_2(W), 64)
NH, NW, NC = triton.cdiv(H, BH), triton.cdiv(W, BW), triton.cdiv(C, BC)
ctx.shape = (B, C, H, W)
ctx.triton_shape = (BC, BH, BW, NC, NH, NW)
x = x.contiguous()
y = x.new_empty((B, 4, C, H, W))
triton_cross_scan_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return y.view(B, 4, C, -1)
@staticmethod
def backward(ctx, y: torch.Tensor):
# out: (b, k, d, l)
B, C, H, W = ctx.shape
BC, BH, BW, NC, NH, NW = ctx.triton_shape
y = y.contiguous().view(B, 4, C, H, W)
x = y.new_empty((B, 4, C, H, W))
triton_cross_merge_1b1[(NH * NW, NC, B)](x, y, BC, BH, BW, C, H, W, NH, NW)
return x
|
eiz/adrastea
|
py/triton/matmul.py
|
https://github.com/eiz/adrastea/blob/e6c80cce32b91aef2a623ab91f725bf2b01c7897/py/triton/matmul.py
|
# This file was part of Triton.
#
# Copyright 2018-2020 Philippe Tillet
# Copyright 2020-2022 OpenAI
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import triton
import triton.language as tl
from triton.compiler import compile_artifacts
#
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
],
key=["M", "N", "K"],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`.
@triton.jit
def leaky_relu(x):
x = x + 1
return tl.where(x >= 0, x, 0.01 * x)
# %%
# We can now create a convenience wrapper function that only takes two input tensors,
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel.
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
)
matmul_kernel[grid](
a,
b,
c,
M,
N,
K,
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
c.stride(0),
c.stride(1),
ACTIVATION=activation,
)
return c
|
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`.
|
eiz/adrastea
|
py/triton/matmul.py
|
https://github.com/eiz/adrastea/blob/e6c80cce32b91aef2a623ab91f725bf2b01c7897/py/triton/matmul.py
|
# This file was part of Triton.
#
# Copyright 2018-2020 Philippe Tillet
# Copyright 2020-2022 OpenAI
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import triton
import triton.language as tl
from triton.compiler import compile_artifacts
#
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
# - A list of `triton.Config` objects that define different configurations of
# meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
# - An auto-tuning *key* whose change in values will trigger evaluation of all the
# provided configs
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
],
key=["M", "N", "K"],
)
@triton.jit
def matmul_kernel(
# Pointers to matrices
a_ptr,
b_ptr,
c_ptr,
# Matrix dimensions
M,
N,
K,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
# by to get the element one row down (A has M rows).
stride_am,
stride_ak,
stride_bk,
stride_bn,
stride_cm,
stride_cn,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
ACTIVATION: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
# -----------------------------------------------------------
# Map program ids `pid` to the block of C it should compute.
# This is done in a grouped ordering to promote L2 data reuse.
# See above `L2 Cache Optimizations` section for details.
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# ----------------------------------------------------------
# Create pointers for the first blocks of A and B.
# We will advance this pointer as we move in the K direction
# and accumulate
# `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
# `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
# See above `Pointer Arithmetics` section for details
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
# -----------------------------------------------------------
# Iterate to compute a block of the C matrix.
# We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
# of fp32 values for higher accuracy.
# `accumulator` will be converted back to fp16 after the loop.
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
# Load the next block of A and B, generate a mask by checking the K dimension.
# If it is out of bounds, set it to 0.
a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
# We accumulate along the K dimension.
accumulator += tl.dot(a, b)
# Advance the ptrs to the next K block.
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs += BLOCK_SIZE_K * stride_bk
# You can fuse arbitrary activation functions here
# while the accumulator is still in FP32!
if ACTIVATION == "leaky_relu":
accumulator = leaky_relu(accumulator)
c = accumulator.to(tl.float16)
# -----------------------------------------------------------
# Write back the block of the output matrix C with masks.
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul`.
@triton.jit
def leaky_relu(x):
x = x + 1
return tl.where(x >= 0, x, 0.01 * x)
# %%
# We can now create a convenience wrapper function that only takes two input tensors,
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel.
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
)
matmul_kernel[grid](
a,
b,
c,
M,
N,
K,
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
c.stride(0),
c.stride(1),
ACTIVATION=activation,
)
return c
|
@triton.jit
def leaky_relu(x):
x = x + 1
return tl.where(x >= 0, x, 0.01 * x)
# %%
# We can now create a convenience wrapper function that only takes two input tensors,
# and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel.
def matmul(a, b, activation=""):
# Check constraints.
assert a.shape[1] == b.shape[0], "Incompatible dimensions"
assert a.is_contiguous(), "Matrix A must be contiguous"
assert b.is_contiguous(), "Matrix B must be contiguous"
M, K = a.shape
K, N = b.shape
# Allocates output.
c = torch.empty((M, N), device=a.device, dtype=a.dtype)
# 1D launch kernel where each block gets its own program.
grid = lambda META: (
triton.cdiv(M, META["BLOCK_SIZE_M"]) * triton.cdiv(N, META["BLOCK_SIZE_N"]),
)
matmul_kernel[grid](
a,
b,
c,
M,
N,
K,
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
c.stride(0),
c.stride(1),
ACTIVATION=activation,
)
return c
|
lweitkamp/triton_exercises
|
code/sum_row.py
|
https://github.com/lweitkamp/triton_exercises/blob/da201eab871cc03453f6d4b70956ace092c3f8b5/code/sum_row.py
|
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def sum_row_kernel(
A_ptr: tl.tensor, outputs_ptr: tl.tensor,
M: tl.constexpr, N: tl.constexpr,
input_stride_x, input_stride_y,
):
"""Calculate the sum of a row of the input tensor, storing the result in
the output. We assume the input row fits into SRAM.
Args:
input_ptr: Pointer to the input tensor.
output_ptr: Pointer to the output tensor.
M: Number of rows in the input tensor.
N: Number of columns in the input tensor.
input_stride_x: Stride of the input tensor along the row dim.
input_stride_y: Stride of the input tensor along the column dim.
"""
program_id = tl.program_id(axis=0)
input_block_ptr = tl.make_block_ptr(
base=A_ptr,
shape=(M, N),
strides=(input_stride_x, input_stride_y),
offsets=(program_id, 0),
block_shape=(1, N),
order=(1, 0),
)
output_block_ptr = tl.make_block_ptr(
base=outputs_ptr,
shape=(M, ),
strides=(1, ),
offsets=(program_id, ),
block_shape=(1, ),
order=(0, ),
)
input_block = tl.load(input_block_ptr)
tl.store(output_block_ptr, tl.sum(input_block))
def sum_row(inputs: torch.Tensor) -> torch.Tensor:
"""Calculate the sum of a tensor along the final dim.
Args:
inputs: Tensor of shape (M, N) containing the input values.
Returns:
Tensor of shape (M, ) containing the summed values.
"""
M, N = inputs.shape
outputs = torch.empty((M,), dtype=inputs.dtype, device=inputs.device)
sum_row_kernel[(M, )](
A_ptr=inputs, outputs_ptr=outputs,
M=M, N=N,
input_stride_x=inputs.stride(0), input_stride_y=inputs.stride(1),
)
return outputs
@pytest.mark.parametrize("M, N", [(16, 16), (32, 16)])
def test_sum_row(M: int, N: int):
inputs = torch.randn((M, N), device='cuda')
outputs = sum_row(inputs)
torch.testing.assert_close(inputs.sum(dim=1), outputs)
|
@triton.jit
def sum_row_kernel(
A_ptr: tl.tensor, outputs_ptr: tl.tensor,
M: tl.constexpr, N: tl.constexpr,
input_stride_x, input_stride_y,
):
"""Calculate the sum of a row of the input tensor, storing the result in
the output. We assume the input row fits into SRAM.
Args:
input_ptr: Pointer to the input tensor.
output_ptr: Pointer to the output tensor.
M: Number of rows in the input tensor.
N: Number of columns in the input tensor.
input_stride_x: Stride of the input tensor along the row dim.
input_stride_y: Stride of the input tensor along the column dim.
"""
program_id = tl.program_id(axis=0)
input_block_ptr = tl.make_block_ptr(
base=A_ptr,
shape=(M, N),
strides=(input_stride_x, input_stride_y),
offsets=(program_id, 0),
block_shape=(1, N),
order=(1, 0),
)
output_block_ptr = tl.make_block_ptr(
base=outputs_ptr,
shape=(M, ),
strides=(1, ),
offsets=(program_id, ),
block_shape=(1, ),
order=(0, ),
)
input_block = tl.load(input_block_ptr)
tl.store(output_block_ptr, tl.sum(input_block))
def sum_row(inputs: torch.Tensor) -> torch.Tensor:
"""Calculate the sum of a tensor along the final dim.
Args:
inputs: Tensor of shape (M, N) containing the input values.
Returns:
Tensor of shape (M, ) containing the summed values.
"""
M, N = inputs.shape
outputs = torch.empty((M,), dtype=inputs.dtype, device=inputs.device)
sum_row_kernel[(M, )](
A_ptr=inputs, outputs_ptr=outputs,
M=M, N=N,
input_stride_x=inputs.stride(0), input_stride_y=inputs.stride(1),
)
return outputs
@pytest.mark.parametrize("M, N", [(16, 16), (32, 16)])
def test_sum_row(M: int, N: int):
inputs = torch.randn((M, N), device='cuda')
outputs = sum_row(inputs)
torch.testing.assert_close(inputs.sum(dim=1), outputs)
|
spikerheado1234/triton-fun
|
SPLAT/r_sddmm.py
|
https://github.com/spikerheado1234/triton-fun/blob/889b72fd892adbc12cf6bee891e17d3a8b6253eb/SPLAT/r_sddmm.py
|
"""
This is a fun implementation of the RSDDMM kernel described in my paper, SPLAT.
"""
import triton
import triton.language as tl
from acsr_helpers import create_blocked_mask, create_acsr
from functools import reduce
import torch
import pdb
import time
from typing import Any
## This is a matrix multiplication of: m*k by k*n -> m*n matrix. NOTE, this is a general mat-mul kernel.
@triton.jit
def rsddmm_kernel(x_ptr, y_ptr,
out_ptr, dTos_linear_trf, dTos_translations,
sTod_linear_trf, sTod_translations, nnzs,
m, n, k, trailing_dim, tb_mapping_x, tb_mapping_y,
BLOCK_SIZE_Y : tl.constexpr, BLOCK_SIZE_X : tl.constexpr):
bx = tl.program_id(axis=0)
by = tl.program_id(axis=1)
batch_head_offset_x_input = by * m * k
batch_head_offset_y_input = by * n * k
batch_head_offset_output = by * m * trailing_dim
## We first unpack the tb_maps to uncover the top left x and y coordinate.
bx_start = tl.load(tb_mapping_x+bx, mask=True)
by_start = tl.load(tb_mapping_y+bx, mask=True)
bx_start = bx_start.to(tl.int32)
by_start = by_start.to(tl.int32)
inner_tile_dim : tl.constexpr = 128
x_ptrs = batch_head_offset_x_input + by_start*k + tl.arange(0, BLOCK_SIZE_Y)[:,None]*k + tl.arange(0, inner_tile_dim)[None,:]
y_ptrs = batch_head_offset_y_input + bx_start + tl.arange(0, inner_tile_dim)[:,None]*n + tl.arange(0, BLOCK_SIZE_X)[None,:]
accumulator = tl.zeros((BLOCK_SIZE_Y, BLOCK_SIZE_X), dtype=tl.float32)
for i in range(tl.cdiv(k, inner_tile_dim)):
## Let's do this naively at first.
mask_x_ptrs = i*inner_tile_dim + tl.arange(0, inner_tile_dim)[None,:] < k ## The first constraint
mask_x_ptrs = mask_x_ptrs & (tl.arange(0, BLOCK_SIZE_Y)[:,None] + by_start < m)
mask_y_ptrs = i*inner_tile_dim + tl.arange(0, inner_tile_dim)[:, None] < k
mask_y_ptrs = mask_y_ptrs & (tl.arange(0, BLOCK_SIZE_X)[None, :] + bx_start < n)
x_tile = tl.load(x_ptr + x_ptrs, mask=mask_x_ptrs, other=0.0)
y_tile = tl.load(y_ptr + y_ptrs, mask=mask_y_ptrs, other=0.0)
accumulator += tl.dot(x_tile, y_tile, allow_tf32=True)
## Increment x and y pointers here now.
x_ptrs += inner_tile_dim
y_ptrs += inner_tile_dim*n
accumulator = accumulator.to(out_ptr.dtype.element_ty)
## This uses the sTOd affine-indices for scaling the indices of where to store.
linear_transforms = tl.load(sTod_linear_trf+by_start+tl.arange(0,BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m, other=1.0)
translations = tl.load(sTod_translations+by_start+tl.arange(0, BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m,other=0.0)
nnz = tl.load(nnzs+by_start+tl.arange(0,BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m, other=0.0)
## Now, we have to use these to recover the true-indices.
## We do this in 5 steps.
## First: we compute the col_indices pertinent to this TB.
## Second: we scale the col_indices using the linear_transforms and translations array.
## Third: We convert the col_indices into ptrs.
## Fourth: We generate the mask.
## Fifth: We store into the ACSR array.
## Step 1
## Interestingly, this line throws a ValueError thinking its not wrapped
## within a trion jitted function. We use tl.zeros instead.
#col_idx = tl.full((BLOCK_SIZE_Y,), 0, tl.int32)
col_idx = tl.zeros((BLOCK_SIZE_Y,), dtype=tl.int32)
col_idx = col_idx[:,None] + tl.arange(0, BLOCK_SIZE_X)[None,:] + bx_start
## Step 2
col_idx /= linear_transforms[:,None]
## Intresting bug. Setting interpreter=True,
## tl.int64 throws an error whilst torch.int64 does not. Turning off interpreter mode, the reverse is true.
#col_idx -= translations[:,None].to(torch.int64)
col_idx -= translations[:,None].to(tl.int64)
## Step 3
output_ptrs = col_idx + tl.arange(0, BLOCK_SIZE_Y)[:,None]*trailing_dim + by_start*trailing_dim
## Type casting required for tl.store compatibililty.
## Intresting bug. Setting interpreter=True,
## tl.int64 throws an error whilst torch.int64 does not. Turning off interpreter mode, the reverse is true.
#output_ptrs = output_ptrs.to(torch.int64)
output_ptrs = output_ptrs.to(tl.int64) + batch_head_offset_output
## Step 4.
## First, we check for OOB conditions due to translations.
output_mask = col_idx >= 0
## Next, we check if a column index maps to a valid contraction (modulo check).
## Unfortunately, broadcast semantics don't apply to the "==" operator.
## So we have to do design a new bolean operator: ~op1 && ~op2
'''For some reason, this is no longer working. We replace it with the equivalent col_idx % linear_transforms[:, None] == 0 check.
op_one = (col_idx % linear_transforms[:, None]).to(tl.int64) > 0
op_two = (col_idx % linear_transforms[:,None]).to(tl.int64) < 0
output_mask = output_mask & ((not op_one) & (not op_two))
'''
output_mask = output_mask & (col_idx % linear_transforms[:, None].to(tl.int64) == 0)
## Lastly, we check for OOB due to exceeding nnz count.
output_mask = output_mask & (col_idx < nnz[:,None])
tl.store(out_ptr + output_ptrs, accumulator, mask=output_mask)
def naive_block_mappings(mask : list[list[int]], BLOCK_HEIGHT : int, BLOCK_WIDTH : int, GPU_ID : int) -> tuple[torch.Tensor, torch.Tensor]:
x_coords = []
y_coords = []
## We populate the anchor points.
## We place x coord in x_coords
## We place y coord in y_coords
for block_number in range(max((len(mask)//BLOCK_HEIGHT), 1)):
## Now we have to find the min and max col_idxs for the block_number. ##
min_col_idx = len(mask[0])
max_col_idx = 0
for row in range(block_number*BLOCK_HEIGHT, (block_number+1)*BLOCK_HEIGHT):
for col in range(len(mask[0])):
if row < len(mask): ## Check required due to irregular boundary conditions.
if mask[row][col]:
min_col_idx = min(min_col_idx, col)
max_col_idx = max(max_col_idx, col)
## Now, after we have found min_col_idx & max_col_idx,
## we compute the thread-block anchor-points.
curr_idx = min_col_idx
while curr_idx < max_col_idx:
x_coords.append(curr_idx)
y_coords.append(block_number*BLOCK_HEIGHT)
curr_idx += BLOCK_WIDTH
assert len(x_coords) == len(y_coords) and len(x_coords) > 0, "Issues with generating arrangement!"
return (torch.tensor(x_coords, dtype=torch.int32).to(GPU_ID), torch.tensor(y_coords, dtype=torch.int32).to(GPU_ID))
## for now, we just do a simple naive tiling, TODO, change to SPLAT's special tiling later.
def gen_block_mappings(mask : list[list[int]], BLOCK_HEIGHT : int,
BLOCK_WIDTH : int, GPU_ID : int, is_naive : bool = True) -> tuple[torch.Tensor, torch.Tensor]:
return naive_block_mappings(mask, BLOCK_HEIGHT, BLOCK_WIDTH, GPU_ID)
def rsddmm_preamble(mask : list[list[int]], output_shape: tuple[int], BLOCK_SIZE_X : int,
BLOCK_SIZE_Y : int, GPU_ID : int, out_dtype : torch.dtype):
output : torch.Tensor = torch.empty((output_shape), dtype=out_dtype).to(GPU_ID)
## Next, we compute the tiling blocks.
tb_map_x, tb_map_y = gen_block_mappings(mask, BLOCK_SIZE_Y, BLOCK_SIZE_X, GPU_ID)
assert tb_map_x.shape == tb_map_y.shape, "Incorrect tiling arrangement!"
## Finally, we can launch the kernel
grid_dim = (tb_map_x.shape[0],output_shape[0]*output_shape[1])
return (
output, grid_dim, tb_map_x, tb_map_y
)
def rsddmm_launcher(x : torch.Tensor, y : torch.Tensor, output : torch.Tensor,
dTos_linear_transformations : torch.Tensor, dTos_translations : torch.Tensor,
sTod_linear_transformations : torch.Tensor, sTod_translations : torch.Tensor,
trailing_dim : int, nnzs : torch.Tensor, grid_dim : tuple[int],
tb_map_x : torch.Tensor, tb_map_y : torch.Tensor,
BLOCK_SIZE_Y : int, BLOCK_SIZE_X : int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
rsddmm_kernel[grid_dim](x,y,output,
dTos_linear_transformations,dTos_translations,
sTod_linear_transformations,sTod_translations,nnzs,
x.shape[2],y.shape[3],x.shape[3], trailing_dim, tb_map_x, tb_map_y,
BLOCK_SIZE_Y=BLOCK_SIZE_Y, BLOCK_SIZE_X=BLOCK_SIZE_X, num_warps=2)
## We return the sTod arrays for correctness checking only.
return (output, sTod_linear_transformations, sTod_translations, nnzs)
def truth(x : torch.Tensor, y: torch.Tensor, GPU_ID : int) -> torch.Tensor:
return torch.einsum('bnqd, bndk -> bnqk', x, y)
#return torch.matmul(x,y).to(GPU_ID)
## Define checker later, figure out good practice. TODO.
def is_correct(out_torch : torch.Tensor, out_rsddmm : torch.Tensor,
sTod_linear_transofrmations : torch.Tensor,
sTod_translations : torch.Tensor, nnzs: torch.Tensor,
batch_size : int, num_heads : int,
mask : list[list[int]]) -> bool:
out_torch_list = out_torch.tolist() ## Question: What are the sizes of these tensors?!
out_rsddmm_list = out_rsddmm.tolist()
sTod_linear_transformations_list = sTod_linear_transofrmations.tolist()
sTod_translations_list = sTod_translations.tolist()
nnzs_list = nnzs.tolist()
num_deviations : int = 0
mse_error : float = 0
for b in range(batch_size):
for h in range(num_heads):
for row in range(len(mask)):
for nnz_col_id in range(len(out_rsddmm_list[0][0][0])):
## We convert to the dense index.
dense_col_id : int = round(nnz_col_id * sTod_linear_transformations_list[row] + sTod_translations_list[row])
if nnz_col_id < nnzs_list[row] and abs(out_torch_list[b][h][row][dense_col_id] - out_rsddmm_list[b][h][row][nnz_col_id]) > 1e-3:
#print(f'failed at: {row} {dense_col_id}')
mse_error += abs(out_torch_list[b][h][row][dense_col_id] - out_rsddmm_list[b][h][row][nnz_col_id])
num_deviations += 1
if num_deviations > 0:
print(f'test case failed average mse: {mse_error}')
return False
else:
print(f'test case passed!')
return True
## Multiply a: m*k and k*n matrix.
def test(m: int, k : int, n : int, num_heads : int, batch_size : int,
mask : list[list[int]], GPU_ID : int, BLOCK_SIZE_Y : int, BLOCK_SIZE_X : int, out_dtype : torch.dtype):
## Some simple test-cases for me to try out.
assert m==n, "We only need to consider the case when m=n."
#left : torch.Tensor = torch.randn((m,k),dtype=torch.float32).to(GPU_ID)
#right : torch.Tensor = torch.randn((k,n),dtype=torch.float32).to(GPU_ID)
left : torch.Tensor = torch.randint(0, 100, (batch_size,num_heads,m,k),dtype=out_dtype).to(GPU_ID)
right : torch.Tensor = torch.randint(0, 100, (batch_size,num_heads,k,n),dtype=out_dtype).to(GPU_ID)
dTos_linear_transformations, dTos_translations, \
sTod_linear_transformations, sTod_translations, nnzs, \
acsr_trailing_dimension, _, _ = create_acsr(
mask, BLOCK_SIZE_X, GPU_ID
)
output_tensor, grid_dim, \
tb_map_x, tb_map_y = rsddmm_preamble(mask, (batch_size, num_heads, m, acsr_trailing_dimension),
BLOCK_SIZE_X, BLOCK_SIZE_Y, GPU_ID, out_dtype)
## Call the rsddmm launcher.
rsddmm_output, sTod_linear_transformations, \
sTod_translations, nnzs = rsddmm_launcher(left, right, output_tensor,
dTos_linear_transformations, dTos_translations,
sTod_linear_transformations, sTod_translations,
acsr_trailing_dimension, nnzs, grid_dim,
tb_map_x, tb_map_y,
BLOCK_SIZE_Y, BLOCK_SIZE_X)
## Verify correctness.
torch_output = truth(left, right, GPU_ID)
is_correct(torch_output, rsddmm_output,
sTod_linear_transformations, sTod_translations,
nnzs, batch_size, num_heads, mask)
if __name__ == "__main__":
## Just a sample unit test over here.
## Small unit-tests
def test_one():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 2 ## Sparsity parameter.
GPU_ID : Any = 0
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, torch.bfloat16)
def test_two():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 5 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_three():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 7 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_four():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 16
m: int = 16
k: int = 16
p: int = 5 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_five():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 16
m: int = 16
k: int = 16
p: int = 16 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_six():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 10 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_seven():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 20 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_eight():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 32 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_nine():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 128
m: int = 128
k: int = 128
p: int = 57 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
## These are pretty small tests.
test_one()
test_two()
test_three()
test_four()
test_five()
test_six()
test_seven()
test_eight()
test_nine()
## Larger tests.
def test_ten():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 256 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_eleven():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 328 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_twelve():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 512 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
test_ten()
test_eleven()
test_twelve()
|
@triton.jit
def rsddmm_kernel(x_ptr, y_ptr,
out_ptr, dTos_linear_trf, dTos_translations,
sTod_linear_trf, sTod_translations, nnzs,
m, n, k, trailing_dim, tb_mapping_x, tb_mapping_y,
BLOCK_SIZE_Y : tl.constexpr, BLOCK_SIZE_X : tl.constexpr):
bx = tl.program_id(axis=0)
by = tl.program_id(axis=1)
batch_head_offset_x_input = by * m * k
batch_head_offset_y_input = by * n * k
batch_head_offset_output = by * m * trailing_dim
## We first unpack the tb_maps to uncover the top left x and y coordinate.
bx_start = tl.load(tb_mapping_x+bx, mask=True)
by_start = tl.load(tb_mapping_y+bx, mask=True)
bx_start = bx_start.to(tl.int32)
by_start = by_start.to(tl.int32)
inner_tile_dim : tl.constexpr = 128
x_ptrs = batch_head_offset_x_input + by_start*k + tl.arange(0, BLOCK_SIZE_Y)[:,None]*k + tl.arange(0, inner_tile_dim)[None,:]
y_ptrs = batch_head_offset_y_input + bx_start + tl.arange(0, inner_tile_dim)[:,None]*n + tl.arange(0, BLOCK_SIZE_X)[None,:]
accumulator = tl.zeros((BLOCK_SIZE_Y, BLOCK_SIZE_X), dtype=tl.float32)
for i in range(tl.cdiv(k, inner_tile_dim)):
## Let's do this naively at first.
mask_x_ptrs = i*inner_tile_dim + tl.arange(0, inner_tile_dim)[None,:] < k ## The first constraint
mask_x_ptrs = mask_x_ptrs & (tl.arange(0, BLOCK_SIZE_Y)[:,None] + by_start < m)
mask_y_ptrs = i*inner_tile_dim + tl.arange(0, inner_tile_dim)[:, None] < k
mask_y_ptrs = mask_y_ptrs & (tl.arange(0, BLOCK_SIZE_X)[None, :] + bx_start < n)
x_tile = tl.load(x_ptr + x_ptrs, mask=mask_x_ptrs, other=0.0)
y_tile = tl.load(y_ptr + y_ptrs, mask=mask_y_ptrs, other=0.0)
accumulator += tl.dot(x_tile, y_tile, allow_tf32=True)
## Increment x and y pointers here now.
x_ptrs += inner_tile_dim
y_ptrs += inner_tile_dim*n
accumulator = accumulator.to(out_ptr.dtype.element_ty)
## This uses the sTOd affine-indices for scaling the indices of where to store.
linear_transforms = tl.load(sTod_linear_trf+by_start+tl.arange(0,BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m, other=1.0)
translations = tl.load(sTod_translations+by_start+tl.arange(0, BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m,other=0.0)
nnz = tl.load(nnzs+by_start+tl.arange(0,BLOCK_SIZE_Y),
mask=by_start+tl.arange(0,BLOCK_SIZE_Y)<m, other=0.0)
## Now, we have to use these to recover the true-indices.
## We do this in 5 steps.
## First: we compute the col_indices pertinent to this TB.
## Second: we scale the col_indices using the linear_transforms and translations array.
## Third: We convert the col_indices into ptrs.
## Fourth: We generate the mask.
## Fifth: We store into the ACSR array.
## Step 1
## Interestingly, this line throws a ValueError thinking its not wrapped
## within a trion jitted function. We use tl.zeros instead.
#col_idx = tl.full((BLOCK_SIZE_Y,), 0, tl.int32)
col_idx = tl.zeros((BLOCK_SIZE_Y,), dtype=tl.int32)
col_idx = col_idx[:,None] + tl.arange(0, BLOCK_SIZE_X)[None,:] + bx_start
## Step 2
col_idx /= linear_transforms[:,None]
## Intresting bug. Setting interpreter=True,
## tl.int64 throws an error whilst torch.int64 does not. Turning off interpreter mode, the reverse is true.
#col_idx -= translations[:,None].to(torch.int64)
col_idx -= translations[:,None].to(tl.int64)
## Step 3
output_ptrs = col_idx + tl.arange(0, BLOCK_SIZE_Y)[:,None]*trailing_dim + by_start*trailing_dim
## Type casting required for tl.store compatibililty.
## Intresting bug. Setting interpreter=True,
## tl.int64 throws an error whilst torch.int64 does not. Turning off interpreter mode, the reverse is true.
#output_ptrs = output_ptrs.to(torch.int64)
output_ptrs = output_ptrs.to(tl.int64) + batch_head_offset_output
## Step 4.
## First, we check for OOB conditions due to translations.
output_mask = col_idx >= 0
## Next, we check if a column index maps to a valid contraction (modulo check).
## Unfortunately, broadcast semantics don't apply to the "==" operator.
## So we have to do design a new bolean operator: ~op1 && ~op2
'''For some reason, this is no longer working. We replace it with the equivalent col_idx % linear_transforms[:, None] == 0 check.
op_one = (col_idx % linear_transforms[:, None]).to(tl.int64) > 0
op_two = (col_idx % linear_transforms[:,None]).to(tl.int64) < 0
output_mask = output_mask & ((not op_one) & (not op_two))
'''
output_mask = output_mask & (col_idx % linear_transforms[:, None].to(tl.int64) == 0)
## Lastly, we check for OOB due to exceeding nnz count.
output_mask = output_mask & (col_idx < nnz[:,None])
tl.store(out_ptr + output_ptrs, accumulator, mask=output_mask)
def naive_block_mappings(mask : list[list[int]], BLOCK_HEIGHT : int, BLOCK_WIDTH : int, GPU_ID : int) -> tuple[torch.Tensor, torch.Tensor]:
x_coords = []
y_coords = []
## We populate the anchor points.
## We place x coord in x_coords
## We place y coord in y_coords
for block_number in range(max((len(mask)//BLOCK_HEIGHT), 1)):
## Now we have to find the min and max col_idxs for the block_number. ##
min_col_idx = len(mask[0])
max_col_idx = 0
for row in range(block_number*BLOCK_HEIGHT, (block_number+1)*BLOCK_HEIGHT):
for col in range(len(mask[0])):
if row < len(mask): ## Check required due to irregular boundary conditions.
if mask[row][col]:
min_col_idx = min(min_col_idx, col)
max_col_idx = max(max_col_idx, col)
## Now, after we have found min_col_idx & max_col_idx,
## we compute the thread-block anchor-points.
curr_idx = min_col_idx
while curr_idx < max_col_idx:
x_coords.append(curr_idx)
y_coords.append(block_number*BLOCK_HEIGHT)
curr_idx += BLOCK_WIDTH
assert len(x_coords) == len(y_coords) and len(x_coords) > 0, "Issues with generating arrangement!"
return (torch.tensor(x_coords, dtype=torch.int32).to(GPU_ID), torch.tensor(y_coords, dtype=torch.int32).to(GPU_ID))
## for now, we just do a simple naive tiling, TODO, change to SPLAT's special tiling later.
def gen_block_mappings(mask : list[list[int]], BLOCK_HEIGHT : int,
BLOCK_WIDTH : int, GPU_ID : int, is_naive : bool = True) -> tuple[torch.Tensor, torch.Tensor]:
return naive_block_mappings(mask, BLOCK_HEIGHT, BLOCK_WIDTH, GPU_ID)
def rsddmm_preamble(mask : list[list[int]], output_shape: tuple[int], BLOCK_SIZE_X : int,
BLOCK_SIZE_Y : int, GPU_ID : int, out_dtype : torch.dtype):
output : torch.Tensor = torch.empty((output_shape), dtype=out_dtype).to(GPU_ID)
## Next, we compute the tiling blocks.
tb_map_x, tb_map_y = gen_block_mappings(mask, BLOCK_SIZE_Y, BLOCK_SIZE_X, GPU_ID)
assert tb_map_x.shape == tb_map_y.shape, "Incorrect tiling arrangement!"
## Finally, we can launch the kernel
grid_dim = (tb_map_x.shape[0],output_shape[0]*output_shape[1])
return (
output, grid_dim, tb_map_x, tb_map_y
)
def rsddmm_launcher(x : torch.Tensor, y : torch.Tensor, output : torch.Tensor,
dTos_linear_transformations : torch.Tensor, dTos_translations : torch.Tensor,
sTod_linear_transformations : torch.Tensor, sTod_translations : torch.Tensor,
trailing_dim : int, nnzs : torch.Tensor, grid_dim : tuple[int],
tb_map_x : torch.Tensor, tb_map_y : torch.Tensor,
BLOCK_SIZE_Y : int, BLOCK_SIZE_X : int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
rsddmm_kernel[grid_dim](x,y,output,
dTos_linear_transformations,dTos_translations,
sTod_linear_transformations,sTod_translations,nnzs,
x.shape[2],y.shape[3],x.shape[3], trailing_dim, tb_map_x, tb_map_y,
BLOCK_SIZE_Y=BLOCK_SIZE_Y, BLOCK_SIZE_X=BLOCK_SIZE_X, num_warps=2)
## We return the sTod arrays for correctness checking only.
return (output, sTod_linear_transformations, sTod_translations, nnzs)
def truth(x : torch.Tensor, y: torch.Tensor, GPU_ID : int) -> torch.Tensor:
return torch.einsum('bnqd, bndk -> bnqk', x, y)
#return torch.matmul(x,y).to(GPU_ID)
## Define checker later, figure out good practice. TODO.
def is_correct(out_torch : torch.Tensor, out_rsddmm : torch.Tensor,
sTod_linear_transofrmations : torch.Tensor,
sTod_translations : torch.Tensor, nnzs: torch.Tensor,
batch_size : int, num_heads : int,
mask : list[list[int]]) -> bool:
out_torch_list = out_torch.tolist() ## Question: What are the sizes of these tensors?!
out_rsddmm_list = out_rsddmm.tolist()
sTod_linear_transformations_list = sTod_linear_transofrmations.tolist()
sTod_translations_list = sTod_translations.tolist()
nnzs_list = nnzs.tolist()
num_deviations : int = 0
mse_error : float = 0
for b in range(batch_size):
for h in range(num_heads):
for row in range(len(mask)):
for nnz_col_id in range(len(out_rsddmm_list[0][0][0])):
## We convert to the dense index.
dense_col_id : int = round(nnz_col_id * sTod_linear_transformations_list[row] + sTod_translations_list[row])
if nnz_col_id < nnzs_list[row] and abs(out_torch_list[b][h][row][dense_col_id] - out_rsddmm_list[b][h][row][nnz_col_id]) > 1e-3:
#print(f'failed at: {row} {dense_col_id}')
mse_error += abs(out_torch_list[b][h][row][dense_col_id] - out_rsddmm_list[b][h][row][nnz_col_id])
num_deviations += 1
if num_deviations > 0:
print(f'test case failed average mse: {mse_error}')
return False
else:
print(f'test case passed!')
return True
## Multiply a: m*k and k*n matrix.
def test(m: int, k : int, n : int, num_heads : int, batch_size : int,
mask : list[list[int]], GPU_ID : int, BLOCK_SIZE_Y : int, BLOCK_SIZE_X : int, out_dtype : torch.dtype):
## Some simple test-cases for me to try out.
assert m==n, "We only need to consider the case when m=n."
#left : torch.Tensor = torch.randn((m,k),dtype=torch.float32).to(GPU_ID)
#right : torch.Tensor = torch.randn((k,n),dtype=torch.float32).to(GPU_ID)
left : torch.Tensor = torch.randint(0, 100, (batch_size,num_heads,m,k),dtype=out_dtype).to(GPU_ID)
right : torch.Tensor = torch.randint(0, 100, (batch_size,num_heads,k,n),dtype=out_dtype).to(GPU_ID)
dTos_linear_transformations, dTos_translations, \
sTod_linear_transformations, sTod_translations, nnzs, \
acsr_trailing_dimension, _, _ = create_acsr(
mask, BLOCK_SIZE_X, GPU_ID
)
output_tensor, grid_dim, \
tb_map_x, tb_map_y = rsddmm_preamble(mask, (batch_size, num_heads, m, acsr_trailing_dimension),
BLOCK_SIZE_X, BLOCK_SIZE_Y, GPU_ID, out_dtype)
## Call the rsddmm launcher.
rsddmm_output, sTod_linear_transformations, \
sTod_translations, nnzs = rsddmm_launcher(left, right, output_tensor,
dTos_linear_transformations, dTos_translations,
sTod_linear_transformations, sTod_translations,
acsr_trailing_dimension, nnzs, grid_dim,
tb_map_x, tb_map_y,
BLOCK_SIZE_Y, BLOCK_SIZE_X)
## Verify correctness.
torch_output = truth(left, right, GPU_ID)
is_correct(torch_output, rsddmm_output,
sTod_linear_transformations, sTod_translations,
nnzs, batch_size, num_heads, mask)
if __name__ == "__main__":
## Just a sample unit test over here.
## Small unit-tests
def test_one():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 2 ## Sparsity parameter.
GPU_ID : Any = 0
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, torch.bfloat16)
def test_two():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 5 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_three():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 10
m: int = 10
k: int = 10
p: int = 7 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_four():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 16
m: int = 16
k: int = 16
p: int = 5 ## Sparsity parameter.
GPU_ID : Any = 'cpu'
num_heads : int = 2
batch_size : int = 2
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, num_heads, batch_size, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_five():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 16
m: int = 16
k: int = 16
p: int = 16 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_six():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 10 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_seven():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 20 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_eight():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 32
m: int = 32
k: int = 32
p: int = 32 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_nine():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 128
m: int = 128
k: int = 128
p: int = 57 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
## These are pretty small tests.
test_one()
test_two()
test_three()
test_four()
test_five()
test_six()
test_seven()
test_eight()
test_nine()
## Larger tests.
def test_ten():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 256 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_eleven():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 328 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
def test_twelve():
## Basice parameters to multiply: m*k by k*n -> m*n matrix.
n: int = 1024
m: int = 1024
k: int = 1024
p: int = 512 ## Sparsity parameter.
GPU_ID : int = 0
BLOCK_SIZE_Y : int = 16
BLOCK_SIZE_X : int = 16
out_dtype : torch.dtype = torch.bfloat16
## Instantiate a mask.
mask = create_blocked_mask(n, p)
test(m, k, n, mask, GPU_ID, BLOCK_SIZE_Y, BLOCK_SIZE_X, out_dtype)
test_ten()
test_eleven()
test_twelve()
|
InfiniTensor/ninetoothed-examples
|
matmul.py
|
https://github.com/InfiniTensor/ninetoothed-examples/blob/ca5d81e4a57fdb973155894fa4ad84058833ee45/matmul.py
|
import ninetoothed
import ninetoothed.language as ntl
import torch
import triton
import triton.language as tl
from ninetoothed import Symbol, Tensor
def arrangement(lhs, rhs, output):
BLOCK_SIZE_M = Symbol("BLOCK_SIZE_M", meta=True)
BLOCK_SIZE_N = Symbol("BLOCK_SIZE_N", meta=True)
BLOCK_SIZE_K = Symbol("BLOCK_SIZE_K", meta=True)
output_tiled = output.tile((BLOCK_SIZE_M, BLOCK_SIZE_N))
lhs_tiled = (
lhs.tile((BLOCK_SIZE_M, BLOCK_SIZE_K))
.tile((1, -1))
.expand((-1, output_tiled.shape[1]))
)
lhs_tiled.dtype = lhs_tiled.dtype.squeeze(0)
rhs_tiled = (
rhs.tile((BLOCK_SIZE_K, BLOCK_SIZE_N))
.tile((-1, 1))
.expand((output_tiled.shape[0], -1))
)
rhs_tiled.dtype = rhs_tiled.dtype.squeeze(1)
return lhs_tiled, rhs_tiled, output_tiled
def application(lhs, rhs, output):
accumulator = ntl.zeros(output.shape, dtype=ntl.float32)
for k in range(lhs.shape[0]):
accumulator += ntl.dot(lhs[k], rhs[k])
output = accumulator.to(ntl.float16)
matmul_kernel = ninetoothed.make(
arrangement, application, (Tensor(2), Tensor(2), Tensor(2))
)
def matmul(lhs, rhs):
output = torch.empty(
(lhs.shape[0], rhs.shape[1]), device=lhs.device, dtype=torch.float16
)
matmul_kernel(lhs, rhs, output)
return output
@triton.autotune(
configs=[
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 64,
"GROUP_SIZE_M": 8,
},
num_stages=3,
num_warps=8,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 256,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 128,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 128,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=4,
num_warps=4,
),
triton.Config(
{
"BLOCK_SIZE_M": 64,
"BLOCK_SIZE_N": 32,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
triton.Config(
{
"BLOCK_SIZE_M": 32,
"BLOCK_SIZE_N": 64,
"BLOCK_SIZE_K": 32,
"GROUP_SIZE_M": 8,
},
num_stages=5,
num_warps=2,
),
],
key=["m", "n", "k"],
)
@triton.jit
def triton_matmul_kernel(
lhs_ptr,
rhs_ptr,
output_ptr,
m,
n,
k,
lhs_stride_m,
lhs_stride_k,
rhs_stride_k,
rhs_stride_n,
output_stride_m,
output_stride_n,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
pid = tl.program_id(0)
num_pid_m = tl.cdiv(m, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(n, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % m
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % n
offs_k = tl.arange(0, BLOCK_SIZE_K)
lhs_ptrs = lhs_ptr + (
offs_am[:, None] * lhs_stride_m + offs_k[None, :] * lhs_stride_k
)
rhs_ptrs = rhs_ptr + (
offs_k[:, None] * rhs_stride_k + offs_bn[None, :] * rhs_stride_n
)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, tl.cdiv(k, BLOCK_SIZE_K)):
lhs = tl.load(lhs_ptrs, mask=offs_k[None, :] < k - i * BLOCK_SIZE_K, other=0.0)
rhs = tl.load(rhs_ptrs, mask=offs_k[:, None] < k - i * BLOCK_SIZE_K, other=0.0)
accumulator = tl.dot(lhs, rhs, accumulator)
lhs_ptrs += BLOCK_SIZE_K * lhs_stride_k
rhs_ptrs += BLOCK_SIZE_K * rhs_stride_k
output = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
output_ptrs = (
output_ptr
+ output_stride_m * offs_cm[:, None]
+ output_stride_n * offs_cn[None, :]
)
output_mask = (offs_cm[:, None] < m) & (offs_cn[None, :] < n)
tl.store(output_ptrs, output, mask=output_mask)
def triton_matmul(lhs, rhs):
output = torch.empty(
(lhs.shape[0], rhs.shape[1]), device=lhs.device, dtype=torch.float16
)
def grid(meta):
return (
triton.cdiv(lhs.shape[0], meta["BLOCK_SIZE_M"])
* triton.cdiv(rhs.shape[1], meta["BLOCK_SIZE_N"]),
)
triton_matmul_kernel[grid](
lhs,
rhs,
output,
lhs.shape[0],
rhs.shape[1],
lhs.shape[1],
lhs.stride(0),
lhs.stride(1),
rhs.stride(0),
rhs.stride(1),
output.stride(0),
output.stride(1),
)
return output
if __name__ == "__main__":
torch.manual_seed(0)
shape = (512, 512)
lhs = torch.randn(shape, device="cuda", dtype=torch.float16)
rhs = torch.randn(shape, device="cuda", dtype=torch.float16)
ninetoothed_output = matmul(lhs, rhs)
torch_output = torch.matmul(lhs, rhs)
triton_output = triton_matmul(lhs, rhs)
print(ninetoothed_output)
print(torch_output)
print(triton_output)
if torch.allclose(ninetoothed_output, torch_output):
print("✅ NineToothed and PyTorch match.")
else:
print("❌ NineToothed and PyTorch differ.")
if torch.allclose(ninetoothed_output, triton_output):
print("✅ NineToothed and Triton match.")
else:
print("❌ NineToothed and Triton differ.")
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=["m", "n", "k"],
x_vals=[128 * i for i in range(2, 33)],
line_arg="provider",
line_vals=["ninetoothed", "torch", "triton"],
line_names=["NineToothed", "PyTorch", "Triton"],
styles=[("blue", "-"), ("green", "-"), ("orange", "-")],
ylabel="TFLOPS",
plot_name="matrix-multiplication-performance",
args={},
)
)
def benchmark(m, n, k, provider):
lhs = torch.randn((m, k), device="cuda", dtype=torch.float16)
rhs = torch.randn((k, n), device="cuda", dtype=torch.float16)
quantiles = [0.5, 0.2, 0.8]
if provider == "ninetoothed":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: matmul(lhs, rhs), quantiles=quantiles
)
elif provider == "torch":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch.matmul(lhs, rhs), quantiles=quantiles
)
elif provider == "triton":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: triton_matmul(lhs, rhs), quantiles=quantiles
)
def perf(ms):
return 2 * m * n * k * 1e-12 / (ms * 1e-3)
return perf(ms), perf(max_ms), perf(min_ms)
benchmark.run(show_plots=True, print_data=True, save_path=".")
|
@triton.jit
def triton_matmul_kernel(
lhs_ptr,
rhs_ptr,
output_ptr,
m,
n,
k,
lhs_stride_m,
lhs_stride_k,
rhs_stride_k,
rhs_stride_n,
output_stride_m,
output_stride_n,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
pid = tl.program_id(0)
num_pid_m = tl.cdiv(m, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(n, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE_M * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE_M
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % m
offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % n
offs_k = tl.arange(0, BLOCK_SIZE_K)
lhs_ptrs = lhs_ptr + (
offs_am[:, None] * lhs_stride_m + offs_k[None, :] * lhs_stride_k
)
rhs_ptrs = rhs_ptr + (
offs_k[:, None] * rhs_stride_k + offs_bn[None, :] * rhs_stride_n
)
accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for i in range(0, tl.cdiv(k, BLOCK_SIZE_K)):
lhs = tl.load(lhs_ptrs, mask=offs_k[None, :] < k - i * BLOCK_SIZE_K, other=0.0)
rhs = tl.load(rhs_ptrs, mask=offs_k[:, None] < k - i * BLOCK_SIZE_K, other=0.0)
accumulator = tl.dot(lhs, rhs, accumulator)
lhs_ptrs += BLOCK_SIZE_K * lhs_stride_k
rhs_ptrs += BLOCK_SIZE_K * rhs_stride_k
output = accumulator.to(tl.float16)
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
output_ptrs = (
output_ptr
+ output_stride_m * offs_cm[:, None]
+ output_stride_n * offs_cn[None, :]
)
output_mask = (offs_cm[:, None] < m) & (offs_cn[None, :] < n)
tl.store(output_ptrs, output, mask=output_mask)
def triton_matmul(lhs, rhs):
output = torch.empty(
(lhs.shape[0], rhs.shape[1]), device=lhs.device, dtype=torch.float16
)
def grid(meta):
return (
triton.cdiv(lhs.shape[0], meta["BLOCK_SIZE_M"])
* triton.cdiv(rhs.shape[1], meta["BLOCK_SIZE_N"]),
)
triton_matmul_kernel[grid](
lhs,
rhs,
output,
lhs.shape[0],
rhs.shape[1],
lhs.shape[1],
lhs.stride(0),
lhs.stride(1),
rhs.stride(0),
rhs.stride(1),
output.stride(0),
output.stride(1),
)
return output
if __name__ == "__main__":
torch.manual_seed(0)
shape = (512, 512)
lhs = torch.randn(shape, device="cuda", dtype=torch.float16)
rhs = torch.randn(shape, device="cuda", dtype=torch.float16)
ninetoothed_output = matmul(lhs, rhs)
torch_output = torch.matmul(lhs, rhs)
triton_output = triton_matmul(lhs, rhs)
print(ninetoothed_output)
print(torch_output)
print(triton_output)
if torch.allclose(ninetoothed_output, torch_output):
print("✅ NineToothed and PyTorch match.")
else:
print("❌ NineToothed and PyTorch differ.")
if torch.allclose(ninetoothed_output, triton_output):
print("✅ NineToothed and Triton match.")
else:
print("❌ NineToothed and Triton differ.")
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=["m", "n", "k"],
x_vals=[128 * i for i in range(2, 33)],
line_arg="provider",
line_vals=["ninetoothed", "torch", "triton"],
line_names=["NineToothed", "PyTorch", "Triton"],
styles=[("blue", "-"), ("green", "-"), ("orange", "-")],
ylabel="TFLOPS",
plot_name="matrix-multiplication-performance",
args={},
)
)
def benchmark(m, n, k, provider):
lhs = torch.randn((m, k), device="cuda", dtype=torch.float16)
rhs = torch.randn((k, n), device="cuda", dtype=torch.float16)
quantiles = [0.5, 0.2, 0.8]
if provider == "ninetoothed":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: matmul(lhs, rhs), quantiles=quantiles
)
elif provider == "torch":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: torch.matmul(lhs, rhs), quantiles=quantiles
)
elif provider == "triton":
ms, min_ms, max_ms = triton.testing.do_bench(
lambda: triton_matmul(lhs, rhs), quantiles=quantiles
)
def perf(ms):
return 2 * m * n * k * 1e-12 / (ms * 1e-3)
return perf(ms), perf(max_ms), perf(min_ms)
benchmark.run(show_plots=True, print_data=True, save_path=".")
|
LiuTaowen-Tony/flash-qlora
|
triton_dense_v1.py
|
https://github.com/LiuTaowen-Tony/flash-qlora/blob/57ce887d668430693e3b2d76cc707223ee0c4b82/triton_dense_v1.py
|
import torch
import triton
import triton.language as tl
import common
def get_configs_io_bound():
configs = []
for block_n in [256, 128, 64, 32, 16]:
for block_m in [256, 128, 64, 32]:
for block_k in [256, 128, 64]:
for num_stages in [5, 4, 3]:
for num_warps in [4, 8]:
for num_ctas in [1]:
if block_m * block_n * block_k >= 16 * 64 * 64 and block_m * block_n * block_k <= 128 * 128 * 256:
configs.append(
triton.Config({'block_M': block_m, 'block_N': block_n, 'block_K': block_k, 'R': 16, 'GROUP_SIZE_M': 8},
num_stages=num_stages, num_warps=num_warps, num_ctas=num_ctas))
# for split_k in [2, 4, 8, 16]:
# configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
# num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
return configs
@triton.autotune(
configs=common.get_autotune_config(),
# configs=get_configs_io_bound(),
key=["M", "N", "K"],
)
@triton.jit
def triton_dense_forward_kernel(
x_ptr,
w_ptr,
c_ptr,
stride_xm,
stride_xk,
stride_wk,
stride_wn,
M: int,
N: int,
K: int,
R: tl.constexpr,
block_M: tl.constexpr,
block_N: tl.constexpr,
block_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
pid_m, pid_n = common.reorder_pid(pid_m, pid_n, M, N, block_M, block_N, GROUP_SIZE_M)
# Block starting positions
offs_m = pid_m * block_M
offs_n = pid_n * block_N
# Initialize fp and int accumulators
fp_acc = tl.zeros((block_M, block_N), dtype=tl.float32)
# R: 16 block_N: 256 block_K: 32 block_M: 64
for i in range(0, K, block_K):
# Load blocks of X, W, and U
w_blk = tl.load(
w_ptr
+ (i + tl.arange(0, block_K))[:, None] * stride_wk
+ tl.arange(0, block_N)
)
x_blk = tl.load(
x_ptr
+ (offs_m + tl.arange(0, block_M))[:, None] * stride_xm
+ (i + tl.arange(0, block_K))
)
fp_acc = tl.dot(x_blk, w_blk, fp_acc)
tl.store(
c_ptr + (offs_m + tl.arange(0, block_M))[:, None] * N + tl.arange(0, block_N),
fp_acc,
)
def triton_dense_forward(
x: torch.Tensor, w: torch.Tensor,
) -> torch.Tensor:
# Allocate result tensor on the GPU
m, k = x.shape
_, n = w.shape
assert w.shape[0] == k
c = torch.empty((m, n), dtype=x.dtype, device="cuda")
grid = lambda opt: (triton.cdiv(m, opt["block_M"]), triton.cdiv(n, opt["block_N"]))
# Launch the Triton kernel with auto-tuned configurations
triton_dense_forward_kernel[grid](
x,
w,
c,
x.stride(0),
x.stride(1),
w.stride(0),
w.stride(1),
M=m,
N=n,
K=k,
)
return c
|
@triton.jit
def triton_dense_forward_kernel(
x_ptr,
w_ptr,
c_ptr,
stride_xm,
stride_xk,
stride_wk,
stride_wn,
M: int,
N: int,
K: int,
R: tl.constexpr,
block_M: tl.constexpr,
block_N: tl.constexpr,
block_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
pid_m, pid_n = common.reorder_pid(pid_m, pid_n, M, N, block_M, block_N, GROUP_SIZE_M)
# Block starting positions
offs_m = pid_m * block_M
offs_n = pid_n * block_N
# Initialize fp and int accumulators
fp_acc = tl.zeros((block_M, block_N), dtype=tl.float32)
# R: 16 block_N: 256 block_K: 32 block_M: 64
for i in range(0, K, block_K):
# Load blocks of X, W, and U
w_blk = tl.load(
w_ptr
+ (i + tl.arange(0, block_K))[:, None] * stride_wk
+ tl.arange(0, block_N)
)
x_blk = tl.load(
x_ptr
+ (offs_m + tl.arange(0, block_M))[:, None] * stride_xm
+ (i + tl.arange(0, block_K))
)
fp_acc = tl.dot(x_blk, w_blk, fp_acc)
tl.store(
c_ptr + (offs_m + tl.arange(0, block_M))[:, None] * N + tl.arange(0, block_N),
fp_acc,
)
def triton_dense_forward(
x: torch.Tensor, w: torch.Tensor,
) -> torch.Tensor:
# Allocate result tensor on the GPU
m, k = x.shape
_, n = w.shape
assert w.shape[0] == k
c = torch.empty((m, n), dtype=x.dtype, device="cuda")
grid = lambda opt: (triton.cdiv(m, opt["block_M"]), triton.cdiv(n, opt["block_N"]))
# Launch the Triton kernel with auto-tuned configurations
triton_dense_forward_kernel[grid](
x,
w,
c,
x.stride(0),
x.stride(1),
w.stride(0),
w.stride(1),
M=m,
N=n,
K=k,
)
return c
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
|
jsalt2024-evaluating-llms-for-astronomy/retrieval
|
saerch/kernels.py
|
https://github.com/jsalt2024-evaluating-llms-for-astronomy/retrieval/blob/f8ec8914e49dbf4680b56efd7771264549b9a2da/saerch/kernels.py
|
### kernels.py ###
import torch
import triton
import triton.language as tl
## kernels
def triton_sparse_transpose_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
"""
calculates sparse.T @ dense (i.e reducing along the collated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (A, B)
output is shape (N, B)
"""
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
K = sparse_indices.shape[1]
A = dense.shape[0]
B = dense.shape[1]
assert sparse_indices.shape[0] == A
# COO-format and sorted
sorted_indices = sparse_indices.view(-1).sort()
coo_indices = torch.stack(
[
torch.arange(A, device=sparse_indices.device).repeat_interleave(K)[
sorted_indices.indices
],
sorted_indices.values,
]
) # shape (2, A * K)
coo_values = sparse_values.view(-1)[sorted_indices.indices] # shape (A * K,)
return triton_coo_sparse_dense_matmul(coo_indices, coo_values, dense, N, BLOCK_SIZE_AK)
def triton_coo_sparse_dense_matmul(
coo_indices: torch.Tensor,
coo_values: torch.Tensor,
dense: torch.Tensor,
N: int,
BLOCK_SIZE_AK=128,
) -> torch.Tensor:
AK = coo_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(N, B, device=dense.device, dtype=coo_values.dtype)
grid = lambda META: (
triton.cdiv(AK, META["BLOCK_SIZE_AK"]),
1,
)
triton_sparse_transpose_dense_matmul_kernel[grid](
coo_indices,
coo_values,
dense,
out,
stride_da=dense.stride(0),
stride_db=dense.stride(1),
B=B,
N=N,
AK=AK,
BLOCK_SIZE_AK=BLOCK_SIZE_AK,
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_transpose_dense_matmul_kernel(
coo_indices_ptr,
coo_values_ptr,
dense_ptr,
out_ptr,
stride_da,
stride_db,
B,
N,
AK,
BLOCK_SIZE_AK: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
coo_indices is shape (2, AK)
coo_values is shape (AK,)
dense is shape (A, B), contiguous along B
out is shape (N, B)
"""
pid_ak = tl.program_id(0)
pid_b = tl.program_id(1)
coo_offsets = tl.arange(0, BLOCK_SIZE_AK)
b_offsets = tl.arange(0, BLOCK_SIZE_B)
A_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
K_coords = tl.load(
coo_indices_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets + AK,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
values = tl.load(
coo_values_ptr + pid_ak * BLOCK_SIZE_AK + coo_offsets,
mask=pid_ak * BLOCK_SIZE_AK + coo_offsets < AK,
)
last_k = tl.min(K_coords)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for ind in range(BLOCK_SIZE_AK):
if ind + pid_ak * BLOCK_SIZE_AK < AK:
# workaround to do A_coords[ind]
a = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
A_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
k = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
K_coords,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.int64),
)
)
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_AK) == ind,
values,
tl.zeros((BLOCK_SIZE_AK,), dtype=tl.float32),
)
)
tl.device_assert(k < N)
if k != last_k:
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
accum *= 0
last_k = k
if v != 0:
accum += v * tl.load(dense_ptr + a * stride_da + b_offsets, mask=b_offsets < B)
tl.atomic_add(
out_ptr + last_k * B + BLOCK_SIZE_B * pid_b + b_offsets,
accum,
mask=BLOCK_SIZE_B * pid_b + b_offsets < B,
)
def triton_sparse_dense_matmul(
sparse_indices: torch.Tensor,
sparse_values: torch.Tensor,
dense: torch.Tensor,
) -> torch.Tensor:
"""
calculates sparse @ dense (i.e reducing along the uncollated dimension of sparse)
dense must be contiguous along dim 0 (in other words, dense.T is contiguous)
sparse_indices is shape (A, k)
sparse_values is shape (A, k)
dense is shape (N, B)
output is shape (A, B)
"""
N = dense.shape[0]
assert sparse_indices.shape == sparse_values.shape
assert sparse_indices.is_contiguous()
assert sparse_values.is_contiguous()
assert dense.is_contiguous() # contiguous along B
A = sparse_indices.shape[0]
K = sparse_indices.shape[1]
B = dense.shape[1]
out = torch.zeros(A, B, device=dense.device, dtype=sparse_values.dtype)
triton_sparse_dense_matmul_kernel[(A,)](
sparse_indices,
sparse_values,
dense,
out,
stride_dn=dense.stride(0),
stride_db=dense.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_K=triton.next_power_of_2(K),
BLOCK_SIZE_B=triton.next_power_of_2(B),
)
return out
@triton.jit
def triton_sparse_dense_matmul_kernel(
sparse_indices_ptr,
sparse_values_ptr,
dense_ptr,
out_ptr,
stride_dn,
stride_db,
A,
B,
N,
K,
BLOCK_SIZE_K: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
"""
sparse_indices is shape (A, K)
sparse_values is shape (A, K)
dense is shape (N, B), contiguous along B
out is shape (A, B)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
sparse_indices = tl.load(
sparse_indices_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
sparse_values = tl.load(
sparse_values_ptr + pid * K + offsets_k, mask=offsets_k < K
) # shape (K,)
accum = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
for k in range(K):
# workaround to do sparse_indices[k]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
# workaround to do sparse_values[k]
v = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
sparse_values,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32),
)
)
tl.device_assert(i < N)
if v != 0:
accum += v * tl.load(
dense_ptr + i * stride_dn + offsets_b * stride_db, mask=offsets_b < B
)
tl.store(out_ptr + pid * B + offsets_b, accum.to(sparse_values.dtype), mask=offsets_b < B)
def triton_dense_dense_sparseout_matmul(
dense1: torch.Tensor,
dense2: torch.Tensor,
at_indices: torch.Tensor,
) -> torch.Tensor:
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
calculates dense1 @ dense2 only for the indices in at_indices
equivalent to (dense1 @ dense2).gather(1, at_indices)
"""
A, B = dense1.shape
N = dense2.shape[1]
assert dense2.shape[0] == B
assert at_indices.shape[0] == A
K = at_indices.shape[1]
assert at_indices.is_contiguous()
assert dense1.stride(1) == 1, "dense1 must be contiguous along B"
assert dense2.stride(0) == 1, "dense2 must be contiguous along B"
if K > 512:
# print("WARN - using naive matmul for large K")
# naive is more efficient for large K
return (dense1 @ dense2).gather(1, at_indices)
out = torch.zeros(A, K, device=dense1.device, dtype=dense1.dtype)
# grid = lambda META: (triton.cdiv(A, META['BLOCK_SIZE_A']),)
triton_dense_dense_sparseout_matmul_kernel[(A,)](
dense1,
dense2,
at_indices,
out,
stride_d1a=dense1.stride(0),
stride_d1b=dense1.stride(1),
stride_d2b=dense2.stride(0),
stride_d2n=dense2.stride(1),
A=A,
B=B,
N=N,
K=K,
BLOCK_SIZE_B=triton.next_power_of_2(B),
BLOCK_SIZE_N=triton.next_power_of_2(N),
BLOCK_SIZE_K=triton.next_power_of_2(K),
)
return out
@triton.jit
def triton_dense_dense_sparseout_matmul_kernel(
dense1_ptr,
dense2_ptr,
at_indices_ptr,
out_ptr,
stride_d1a,
stride_d1b,
stride_d2b,
stride_d2n,
A,
B,
N,
K,
BLOCK_SIZE_B: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
BLOCK_SIZE_K: tl.constexpr,
):
"""
dense1: shape (A, B)
dense2: shape (B, N)
at_indices: shape (A, K)
out values: shape (A, K)
"""
pid = tl.program_id(0)
offsets_k = tl.arange(0, BLOCK_SIZE_K)
at_indices = tl.load(at_indices_ptr + pid * K + offsets_k, mask=offsets_k < K) # shape (K,)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
dense1 = tl.load(
dense1_ptr + pid * stride_d1a + offsets_b * stride_d1b, mask=offsets_b < B
) # shape (B,)
accum = tl.zeros((BLOCK_SIZE_K,), dtype=tl.float32)
for k in range(K):
# workaround to do at_indices[b]
i = tl.sum(
tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
at_indices,
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
)
tl.device_assert(i < N)
dense2col = tl.load(
dense2_ptr + offsets_b * stride_d2b + i * stride_d2n, mask=offsets_b < B
) # shape (B,)
accum += tl.where(
tl.arange(0, BLOCK_SIZE_K) == k,
tl.sum(dense1 * dense2col),
tl.zeros((BLOCK_SIZE_K,), dtype=tl.int64),
)
tl.store(out_ptr + pid * K + offsets_k, accum, mask=offsets_k < K)
class TritonDecoderAutograd(torch.autograd.Function):
@staticmethod
def forward(ctx, sparse_indices, sparse_values, decoder_weight):
ctx.save_for_backward(sparse_indices, sparse_values, decoder_weight)
return triton_sparse_dense_matmul(sparse_indices, sparse_values, decoder_weight.T)
@staticmethod
def backward(ctx, grad_output):
sparse_indices, sparse_values, decoder_weight = ctx.saved_tensors
assert grad_output.is_contiguous(), "grad_output must be contiguous; this is probably because the subsequent op was a .sum() or something like that, which returns a non contiguous gradient"
decoder_grad = triton_sparse_transpose_dense_matmul(
sparse_indices, sparse_values, grad_output, N=decoder_weight.shape[1]
).T
return (
None,
triton_dense_dense_sparseout_matmul(grad_output, decoder_weight, sparse_indices),
# decoder is contiguous when transposed so this is a matching layout
decoder_grad,
None,
)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
@triton.jit
def triton_add_mul_kernel(
x_ptr,
a_ptr,
b_ptr,
c,
stride_x0,
stride_x1,
stride_a0,
stride_a1,
stride_b0,
stride_b1,
BLOCK_SIZE_M: tl.constexpr,
BLOCK_SIZE_N: tl.constexpr,
M: tl.constexpr,
N: tl.constexpr,
):
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
offsets_m = tl.arange(0, BLOCK_SIZE_M) + pid_m * BLOCK_SIZE_M
offsets_n = tl.arange(0, BLOCK_SIZE_N) + pid_n * BLOCK_SIZE_N
x = tl.load(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
a = tl.load(
a_ptr + offsets_m[:, None] * stride_a0 + offsets_n[None, :] * stride_a1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
b = tl.load(
b_ptr + offsets_m[:, None] * stride_b0 + offsets_n[None, :] * stride_b1,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
x_dtype = x.dtype
x = (x.to(tl.float32) + a.to(tl.float32) * b.to(tl.float32) * c).to(x_dtype)
tl.store(
x_ptr + offsets_m[:, None] * stride_x0 + offsets_n[None, :] * stride_x1,
x,
mask=(offsets_m[:, None] < M) & (offsets_n[None, :] < N),
)
def triton_sum_dim0_in_fp32(xs):
a, b = xs.shape
assert xs.is_contiguous()
assert xs.dtype == torch.float16
BLOCK_SIZE_A = min(triton.next_power_of_2(a), 512)
BLOCK_SIZE_B = 64 # cache line is 128 bytes
out = torch.zeros(b, dtype=torch.float32, device=xs.device)
grid = lambda META: (triton.cdiv(b, META["BLOCK_SIZE_B"]),)
triton_sum_dim0_in_fp32_kernel[grid](
xs,
out,
stride_a=xs.stride(0),
a=a,
b=b,
BLOCK_SIZE_A=BLOCK_SIZE_A,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@triton.jit
def triton_sum_dim0_in_fp32_kernel(
xs_ptr,
out_ptr,
stride_a,
a,
b,
BLOCK_SIZE_A: tl.constexpr,
BLOCK_SIZE_B: tl.constexpr,
):
# each program handles 64 columns of xs
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B) + pid * BLOCK_SIZE_B
all_out = tl.zeros((BLOCK_SIZE_B,), dtype=tl.float32)
for i in range(0, a, BLOCK_SIZE_A):
offsets_a = tl.arange(0, BLOCK_SIZE_A) + i
xs = tl.load(
xs_ptr + offsets_a[:, None] * stride_a + offsets_b[None, :],
mask=(offsets_a < a)[:, None] & (offsets_b < b)[None, :],
other=0,
)
xs = xs.to(tl.float32)
out = tl.sum(xs, axis=0)
all_out += out
tl.store(out_ptr + offsets_b, all_out, mask=offsets_b < b)
def mse(
output,
target,
): # fusing fp32 cast and MSE to save memory
assert output.shape == target.shape
assert len(output.shape) == 2
assert output.stride(1) == 1
assert target.stride(1) == 1
a, b = output.shape
BLOCK_SIZE_B = triton.next_power_of_2(b)
class _MSE(torch.autograd.Function):
@staticmethod
def forward(ctx, output, target):
ctx.save_for_backward(output, target)
out = torch.zeros(a, dtype=torch.float32, device=output.device)
triton_mse_loss_fp16_kernel[(a,)](
output,
target,
out,
stride_a_output=output.stride(0),
stride_a_target=target.stride(0),
a=a,
b=b,
BLOCK_SIZE_B=BLOCK_SIZE_B,
)
return out
@staticmethod
def backward(ctx, grad_output):
output, target = ctx.saved_tensors
res = (output - target).float()
res *= grad_output[:, None] * 2 / b
return res, None
return _MSE.apply(output, target).mean()
def normalized_mse(recon: torch.Tensor, xs: torch.Tensor) -> torch.Tensor:
# only used for auxk
xs_mu = (
triton_sum_dim0_in_fp32(xs) / xs.shape[0]
if xs.dtype == torch.float16
else xs.mean(dim=0)
)
loss = mse(recon, xs) / mse(
xs_mu[None, :].broadcast_to(xs.shape), xs
)
return loss
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
@triton.jit
def triton_mse_loss_fp16_kernel(
output_ptr,
target_ptr,
out_ptr,
stride_a_output,
stride_a_target,
a,
b,
BLOCK_SIZE_B: tl.constexpr,
):
pid = tl.program_id(0)
offsets_b = tl.arange(0, BLOCK_SIZE_B)
output = tl.load(
output_ptr + pid * stride_a_output + offsets_b,
mask=offsets_b < b,
)
target = tl.load(
target_ptr + pid * stride_a_target + offsets_b,
mask=offsets_b < b,
)
output = output.to(tl.float32)
target = target.to(tl.float32)
mse = tl.sum((output - target) * (output - target)) / b
tl.store(out_ptr + pid, mse)
def triton_add_mul_(
x: torch.Tensor,
a: torch.Tensor,
b: torch.Tensor,
c: float,
):
"""
does
x += a * b * c
x : [m, n]
a : [m, n]
b : [m, n]
c : float
"""
if len(a.shape) == 1:
a = a[None, :].broadcast_to(x.shape)
if len(b.shape) == 1:
b = b[None, :].broadcast_to(x.shape)
assert x.shape == a.shape == b.shape
BLOCK_SIZE_M = 64
BLOCK_SIZE_N = 64
grid = lambda META: (
triton.cdiv(x.shape[0], META["BLOCK_SIZE_M"]),
triton.cdiv(x.shape[1], META["BLOCK_SIZE_N"]),
)
triton_add_mul_kernel[grid](
x,
a,
b,
c,
x.stride(0),
x.stride(1),
a.stride(0),
a.stride(1),
b.stride(0),
b.stride(1),
BLOCK_SIZE_M,
BLOCK_SIZE_N,
x.shape[0],
x.shape[1],
)
|
zjhellofss/KuiperTriton
|
kernel/rmsnorm.py
|
https://github.com/zjhellofss/KuiperTriton/blob/1abdb405768b4c2251ab259ffd34f1e853ed3e0c/kernel/rmsnorm.py
|
import torch
import triton
import triton.language as tl
import torch
from torch import nn
@triton.jit
def rmsnorm_triton(x_ptr, rms_w_ptr, output_ptr,
stride_x_batch, stride_x_m, stride_x_k,
stride_rms_w, stride_out_batch, stride_out_m, stride_out_k,
head_size, eps, BLOCK_N_SIZE: tl.constexpr):
pid_b = tl.program_id(0)
pid_m = tl.program_id(1)
offset_m = pid_b * stride_x_batch + pid_m * stride_x_m
block_n = tl.arange(0, BLOCK_N_SIZE)
var = tl.zeros((BLOCK_N_SIZE,), tl.float32)
for block_idx in range(0, head_size, BLOCK_N_SIZE):
offset_n = block_idx + block_n
x_ptr_mask = offset_n < head_size
x = tl.load(x_ptr + offset_m + offset_n * stride_x_k, mask=x_ptr_mask, other=0.0)
var += x * x
var = tl.sum(var, axis=0) / head_size
rstd = 1 / tl.sqrt(var + eps)
for block_idx in range(0, head_size, BLOCK_N_SIZE):
offset_n = block_idx + block_n
x_ptr_mask = offset_n < head_size
rms_w = tl.load(rms_w_ptr + offset_n * stride_rms_w, mask=x_ptr_mask)
x = tl.load(x_ptr + offset_m + offset_n * stride_x_k, mask=x_ptr_mask, other=0.0).to(tl.float32)
x_hat = x * rstd
out = x_hat * rms_w
out_off = pid_b * stride_out_batch + pid_m * stride_out_m + offset_n * stride_out_k
tl.store(output_ptr + out_off, out, mask=x_ptr_mask)
def rmsnorm(input: torch.Tensor, weight: torch.Tensor, output: torch.Tensor):
assert torch.cuda.is_available()
assert input.is_cuda
assert output.is_cuda
batch_size = input.size(0)
seq_len = input.size(1)
head_size = input.size(2)
stride_x_batch = input.stride(0)
stride_x_m = input.stride(1)
stride_x_k = input.stride(2)
stride_rms_w = weight.stride(0)
stride_out_batch = output.stride(0)
stride_out_m = output.stride(1)
stride_out_k = output.stride(2)
eps = 1e-6
BLOCK_N_SIZE = 128
def grid(meta): return batch_size, seq_len
rmsnorm_triton[grid](input, weight, output, stride_x_batch, stride_x_m, stride_x_k, stride_rms_w, stride_out_batch,
stride_out_m, stride_out_k, head_size, eps, BLOCK_N_SIZE)
|
@triton.jit
def rmsnorm_triton(x_ptr, rms_w_ptr, output_ptr,
stride_x_batch, stride_x_m, stride_x_k,
stride_rms_w, stride_out_batch, stride_out_m, stride_out_k,
head_size, eps, BLOCK_N_SIZE: tl.constexpr):
pid_b = tl.program_id(0)
pid_m = tl.program_id(1)
offset_m = pid_b * stride_x_batch + pid_m * stride_x_m
block_n = tl.arange(0, BLOCK_N_SIZE)
var = tl.zeros((BLOCK_N_SIZE,), tl.float32)
for block_idx in range(0, head_size, BLOCK_N_SIZE):
offset_n = block_idx + block_n
x_ptr_mask = offset_n < head_size
x = tl.load(x_ptr + offset_m + offset_n * stride_x_k, mask=x_ptr_mask, other=0.0)
var += x * x
var = tl.sum(var, axis=0) / head_size
rstd = 1 / tl.sqrt(var + eps)
for block_idx in range(0, head_size, BLOCK_N_SIZE):
offset_n = block_idx + block_n
x_ptr_mask = offset_n < head_size
rms_w = tl.load(rms_w_ptr + offset_n * stride_rms_w, mask=x_ptr_mask)
x = tl.load(x_ptr + offset_m + offset_n * stride_x_k, mask=x_ptr_mask, other=0.0).to(tl.float32)
x_hat = x * rstd
out = x_hat * rms_w
out_off = pid_b * stride_out_batch + pid_m * stride_out_m + offset_n * stride_out_k
tl.store(output_ptr + out_off, out, mask=x_ptr_mask)
def rmsnorm(input: torch.Tensor, weight: torch.Tensor, output: torch.Tensor):
assert torch.cuda.is_available()
assert input.is_cuda
assert output.is_cuda
batch_size = input.size(0)
seq_len = input.size(1)
head_size = input.size(2)
stride_x_batch = input.stride(0)
stride_x_m = input.stride(1)
stride_x_k = input.stride(2)
stride_rms_w = weight.stride(0)
stride_out_batch = output.stride(0)
stride_out_m = output.stride(1)
stride_out_k = output.stride(2)
eps = 1e-6
BLOCK_N_SIZE = 128
def grid(meta): return batch_size, seq_len
rmsnorm_triton[grid](input, weight, output, stride_x_batch, stride_x_m, stride_x_k, stride_rms_w, stride_out_batch,
stride_out_m, stride_out_k, head_size, eps, BLOCK_N_SIZE)
|
Jokeren/triton-samples
|
ops/full.py
|
https://github.com/Jokeren/triton-samples/blob/e8fb3733750056411da55a52bc4c1ed54faa416e/ops/full.py
|
"""Compare different methods of creating constant tensors in Triton."""
import torch
import triton
import triton.language as tl
@triton.jit
def kernel1(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with tl.full constant matrix."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = tl.full([BLOCK_SIZE, BLOCK_SIZE], 1.5, tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
@triton.jit
def kernel2(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with explicit constant matrix creation."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = (tl.zeros([BLOCK_SIZE, BLOCK_SIZE], dtype=tl.float16) + 1.5).to(tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
BLOCK_SIZE = 128
x = torch.randn((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float16)
y_0 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
y_1 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
for _ in range(100):
kernel1[(1024,)](x, y_0, BLOCK_SIZE)
kernel2[(1024,)](x, y_1, BLOCK_SIZE)
torch.allclose(y_0, y_1)
|
@triton.jit
def kernel1(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with tl.full constant matrix."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = tl.full([BLOCK_SIZE, BLOCK_SIZE], 1.5, tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
|
Jokeren/triton-samples
|
ops/full.py
|
https://github.com/Jokeren/triton-samples/blob/e8fb3733750056411da55a52bc4c1ed54faa416e/ops/full.py
|
"""Compare different methods of creating constant tensors in Triton."""
import torch
import triton
import triton.language as tl
@triton.jit
def kernel1(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with tl.full constant matrix."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = tl.full([BLOCK_SIZE, BLOCK_SIZE], 1.5, tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
@triton.jit
def kernel2(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with explicit constant matrix creation."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = (tl.zeros([BLOCK_SIZE, BLOCK_SIZE], dtype=tl.float16) + 1.5).to(tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
BLOCK_SIZE = 128
x = torch.randn((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float16)
y_0 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
y_1 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
for _ in range(100):
kernel1[(1024,)](x, y_0, BLOCK_SIZE)
kernel2[(1024,)](x, y_1, BLOCK_SIZE)
torch.allclose(y_0, y_1)
|
@triton.jit
def kernel2(x_ptr, y_ptr, BLOCK_SIZE: tl.constexpr):
"""Matrix multiplication with explicit constant matrix creation."""
tid = (
tl.arange(0, BLOCK_SIZE)[:, None] * BLOCK_SIZE
+ tl.arange(0, BLOCK_SIZE)[None, :]
)
x_offset = x_ptr + tid
y_offset = y_ptr + tid
lhs = tl.load(x_offset)
rhs = (tl.zeros([BLOCK_SIZE, BLOCK_SIZE], dtype=tl.float16) + 1.5).to(tl.float16)
tl.store(y_offset, tl.dot(lhs, rhs).to(tl.float32))
BLOCK_SIZE = 128
x = torch.randn((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float16)
y_0 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
y_1 = torch.zeros((BLOCK_SIZE, BLOCK_SIZE), device="cuda", dtype=torch.float32)
for _ in range(100):
kernel1[(1024,)](x, y_0, BLOCK_SIZE)
kernel2[(1024,)](x, y_1, BLOCK_SIZE)
torch.allclose(y_0, y_1)
|
fishmingyu/inductor_test_gather_scatter
|
gpu/spmm_kernel.py
|
https://github.com/fishmingyu/inductor_test_gather_scatter/blob/721a6967658a88d10be015f52133876ff421959c/gpu/spmm_kernel.py
|
import triton
import triton.language as tl
import numpy as np
@triton.jit
def spmm_csr(A_ptr, A_ind, B, C, feature_size: tl.constexpr):
# Global index corresponds to the node id
node_id = tl.program_id(0)
# Use tl.arange to get the feature id for each thread within the block
feature_id = tl.arange(0, feature_size)
# Using a local temporary variable to accumulate results
acc = tl.load(C + node_id * feature_size + feature_id)
# CSR loop for the specific node
start = tl.load(A_ptr + node_id)
end = tl.load(A_ptr + node_id + 1)
for j in range(start, end):
col = tl.load(A_ind + j)
acc += tl.load(B + col * feature_size + feature_id)
# Store the result back to C using tl.store
tl.store(C + node_id * feature_size + feature_id, acc)
@triton.jit
def spmm_atomic(edge_index, B, C, num_edges, feature_size: tl.constexpr, XBLOCK: tl.constexpr):
group_id = tl.program_id(0)
xoffset = group_id * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)
x1 = xindex // feature_size
x2 = xindex % feature_size
mask = x1 < num_edges
in_node = tl.load(edge_index + x1, mask)
out_node = tl.load(edge_index + x1 + num_edges, mask)
in_val = tl.load(B + in_node * feature_size + x2, mask)
tl.atomic_add(C + out_node * feature_size + x2, in_val, mask)
def spmm_atomic_wrapper(edge_index, B, C):
feature_size = B.shape[1]
num_edges = edge_index.shape[1]
XBLOCK = 128
spmm_atomic[(feature_size * num_edges // XBLOCK, )](edge_index, B, C, num_edges,
feature_size, XBLOCK=XBLOCK)
def spmm_csr_wrapper(rowptr, col, B, C):
feature_size = B.shape[1]
num_nodes = rowptr.shape[0] - 1
spmm_csr[(num_nodes,)](rowptr, col, B, C, feature_size)
|
@triton.jit
def spmm_csr(A_ptr, A_ind, B, C, feature_size: tl.constexpr):
# Global index corresponds to the node id
node_id = tl.program_id(0)
# Use tl.arange to get the feature id for each thread within the block
feature_id = tl.arange(0, feature_size)
# Using a local temporary variable to accumulate results
acc = tl.load(C + node_id * feature_size + feature_id)
# CSR loop for the specific node
start = tl.load(A_ptr + node_id)
end = tl.load(A_ptr + node_id + 1)
for j in range(start, end):
col = tl.load(A_ind + j)
acc += tl.load(B + col * feature_size + feature_id)
# Store the result back to C using tl.store
tl.store(C + node_id * feature_size + feature_id, acc)
|
fishmingyu/inductor_test_gather_scatter
|
gpu/spmm_kernel.py
|
https://github.com/fishmingyu/inductor_test_gather_scatter/blob/721a6967658a88d10be015f52133876ff421959c/gpu/spmm_kernel.py
|
import triton
import triton.language as tl
import numpy as np
@triton.jit
def spmm_csr(A_ptr, A_ind, B, C, feature_size: tl.constexpr):
# Global index corresponds to the node id
node_id = tl.program_id(0)
# Use tl.arange to get the feature id for each thread within the block
feature_id = tl.arange(0, feature_size)
# Using a local temporary variable to accumulate results
acc = tl.load(C + node_id * feature_size + feature_id)
# CSR loop for the specific node
start = tl.load(A_ptr + node_id)
end = tl.load(A_ptr + node_id + 1)
for j in range(start, end):
col = tl.load(A_ind + j)
acc += tl.load(B + col * feature_size + feature_id)
# Store the result back to C using tl.store
tl.store(C + node_id * feature_size + feature_id, acc)
@triton.jit
def spmm_atomic(edge_index, B, C, num_edges, feature_size: tl.constexpr, XBLOCK: tl.constexpr):
group_id = tl.program_id(0)
xoffset = group_id * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)
x1 = xindex // feature_size
x2 = xindex % feature_size
mask = x1 < num_edges
in_node = tl.load(edge_index + x1, mask)
out_node = tl.load(edge_index + x1 + num_edges, mask)
in_val = tl.load(B + in_node * feature_size + x2, mask)
tl.atomic_add(C + out_node * feature_size + x2, in_val, mask)
def spmm_atomic_wrapper(edge_index, B, C):
feature_size = B.shape[1]
num_edges = edge_index.shape[1]
XBLOCK = 128
spmm_atomic[(feature_size * num_edges // XBLOCK, )](edge_index, B, C, num_edges,
feature_size, XBLOCK=XBLOCK)
def spmm_csr_wrapper(rowptr, col, B, C):
feature_size = B.shape[1]
num_nodes = rowptr.shape[0] - 1
spmm_csr[(num_nodes,)](rowptr, col, B, C, feature_size)
|
@triton.jit
def spmm_atomic(edge_index, B, C, num_edges, feature_size: tl.constexpr, XBLOCK: tl.constexpr):
group_id = tl.program_id(0)
xoffset = group_id * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)
x1 = xindex // feature_size
x2 = xindex % feature_size
mask = x1 < num_edges
in_node = tl.load(edge_index + x1, mask)
out_node = tl.load(edge_index + x1 + num_edges, mask)
in_val = tl.load(B + in_node * feature_size + x2, mask)
tl.atomic_add(C + out_node * feature_size + x2, in_val, mask)
def spmm_atomic_wrapper(edge_index, B, C):
feature_size = B.shape[1]
num_edges = edge_index.shape[1]
XBLOCK = 128
spmm_atomic[(feature_size * num_edges // XBLOCK, )](edge_index, B, C, num_edges,
feature_size, XBLOCK=XBLOCK)
def spmm_csr_wrapper(rowptr, col, B, C):
feature_size = B.shape[1]
num_nodes = rowptr.shape[0] - 1
spmm_csr[(num_nodes,)](rowptr, col, B, C, feature_size)
|
yuezhouhu/2by4-pretrain
|
sparse/decay.py
|
https://github.com/yuezhouhu/2by4-pretrain/blob/9e330125dea71e5a3dee235f4efb8869f9e4cdd0/sparse/decay.py
|
import torch
import triton
import triton.language as tl
@triton.jit
def masked_add_kernel(grad_ptr,
p_ptr,
p_mask_ptr,
n_elements,
alpha,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
p_mask = tl.load(p_mask_ptr + offsets, mask=mask).to(tl.int1)
mask = mask & ~p_mask
p = tl.load(p_ptr + offsets, mask=mask)
grad = tl.load(grad_ptr + offsets, mask=mask)
grad += p * alpha
tl.store(grad_ptr + offsets, grad, mask=mask)
def masked_add_(grad: torch.Tensor, p_data: torch.Tensor, p_mask: torch.Tensor, alpha: float = 0):
'''
equivalent to
grad.add_(p.data * (1 - p.mask), alpha=decay)
'''
assert grad.is_cuda and p_data.is_cuda and p_mask.is_cuda
assert (grad.layout, p_data.layout, p_mask.layout) == (torch.strided, torch.strided, torch.strided)
assert grad.stride() == p_data.stride() == p_mask.stride()
n_elements = grad.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
masked_add_kernel[grid](grad, p_data, p_mask, n_elements, alpha, BLOCK_SIZE=1024)
if __name__ == "__main__":
grad = torch.tensor([1., 1., 1., 1.]).cuda()
p = torch.tensor([1., 2., 3., 4.]).cuda()
p_mask = torch.tensor([1., 0., 1., 0.]).cuda()
alpha = 0.03
masked_add_(grad, p, p_mask, alpha=0.03)
print(grad)
|
@triton.jit
def masked_add_kernel(grad_ptr,
p_ptr,
p_mask_ptr,
n_elements,
alpha,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
p_mask = tl.load(p_mask_ptr + offsets, mask=mask).to(tl.int1)
mask = mask & ~p_mask
p = tl.load(p_ptr + offsets, mask=mask)
grad = tl.load(grad_ptr + offsets, mask=mask)
grad += p * alpha
tl.store(grad_ptr + offsets, grad, mask=mask)
def masked_add_(grad: torch.Tensor, p_data: torch.Tensor, p_mask: torch.Tensor, alpha: float = 0):
'''
equivalent to
grad.add_(p.data * (1 - p.mask), alpha=decay)
'''
assert grad.is_cuda and p_data.is_cuda and p_mask.is_cuda
assert (grad.layout, p_data.layout, p_mask.layout) == (torch.strided, torch.strided, torch.strided)
assert grad.stride() == p_data.stride() == p_mask.stride()
n_elements = grad.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
masked_add_kernel[grid](grad, p_data, p_mask, n_elements, alpha, BLOCK_SIZE=1024)
if __name__ == "__main__":
grad = torch.tensor([1., 1., 1., 1.]).cuda()
p = torch.tensor([1., 2., 3., 4.]).cuda()
p_mask = torch.tensor([1., 0., 1., 0.]).cuda()
alpha = 0.03
masked_add_(grad, p, p_mask, alpha=0.03)
print(grad)
|
FlagOpen/FlagGems
|
src/flag_gems/ops/ge.py
|
https://github.com/FlagOpen/FlagGems/blob/2437f4ffa2d644e38c26aacbf1249263a2016bb2/src/flag_gems/ops/ge.py
|
import logging
import triton
import triton.language as tl
from ..utils import pointwise_dynamic
@pointwise_dynamic(promotion_methods=[(0, 1, "ALWAYS_BOOL")])
@triton.jit
def ge_func(x, y):
return x.to(tl.float32) >= y
def ge(A, B):
logging.debug("GEMS GE")
return ge_func(A, B)
@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "ALWAYS_BOOL")])
@triton.jit
def ge_func_scalar(x, y):
return x.to(tl.float32) >= y
def ge_scalar(A, B):
logging.debug("GEMS GE SCALAR")
return ge_func_scalar(A, B)
|
@triton.jit
def ge_func(x, y):
return x.to(tl.float32) >= y
def ge(A, B):
logging.debug("GEMS GE")
return ge_func(A, B)
@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "ALWAYS_BOOL")])
|
FlagOpen/FlagGems
|
src/flag_gems/ops/ge.py
|
https://github.com/FlagOpen/FlagGems/blob/2437f4ffa2d644e38c26aacbf1249263a2016bb2/src/flag_gems/ops/ge.py
|
import logging
import triton
import triton.language as tl
from ..utils import pointwise_dynamic
@pointwise_dynamic(promotion_methods=[(0, 1, "ALWAYS_BOOL")])
@triton.jit
def ge_func(x, y):
return x.to(tl.float32) >= y
def ge(A, B):
logging.debug("GEMS GE")
return ge_func(A, B)
@pointwise_dynamic(is_tensor=[True, False], promotion_methods=[(0, 1, "ALWAYS_BOOL")])
@triton.jit
def ge_func_scalar(x, y):
return x.to(tl.float32) >= y
def ge_scalar(A, B):
logging.debug("GEMS GE SCALAR")
return ge_func_scalar(A, B)
|
@triton.jit
def ge_func_scalar(x, y):
return x.to(tl.float32) >= y
def ge_scalar(A, B):
logging.debug("GEMS GE SCALAR")
return ge_func_scalar(A, B)
|
jli943/transformer_triton
|
triton/dropout.py
|
https://github.com/jli943/transformer_triton/blob/e3821f6e302e1974fc7d8bed8602a105654725a4/triton/dropout.py
|
# https://triton-lang.org/main/getting-started/tutorials/04-low-memory-dropout.html#sphx-glr-getting-started-tutorials-04-low-memory-dropout-py
import torch
import triton
import triton.language as tl
@triton.jit
def dropout_kernel(
x_ptr, dropout_mask_ptr, out_ptr, n_elements, p, BLOCK_SIZE: tl.constexpr
):
pid=tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets<n_elements
x = tl.load(x_ptr + offsets, mask=mask)
dropout_mask=tl.load(dropout_mask_ptr + offsets, mask=mask)
output=tl.where(dropout_mask, x/(1-p), 0.0)
tl.store(out_ptr + offsets, output, mask=mask)
def dropout(x, dropout_mask, p):
output=torch.empty_like(x)
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
dropout_kernel[grid](x, dropout_mask, output, n_elements, p, BLOCK_SIZE=1024)
return output
def test():
device="cuda"
x = torch.randn(4,4).to(device)
p = 0.5
dropout_mask = (torch.rand(size=(4, 4)) > p).to(torch.int32).to(device)
output = dropout(x, dropout_mask, p=p)
print(output)
if __name__ == "__main__":
test()
|
@triton.jit
def dropout_kernel(
x_ptr, dropout_mask_ptr, out_ptr, n_elements, p, BLOCK_SIZE: tl.constexpr
):
pid=tl.program_id(axis=0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets<n_elements
x = tl.load(x_ptr + offsets, mask=mask)
dropout_mask=tl.load(dropout_mask_ptr + offsets, mask=mask)
output=tl.where(dropout_mask, x/(1-p), 0.0)
tl.store(out_ptr + offsets, output, mask=mask)
def dropout(x, dropout_mask, p):
output=torch.empty_like(x)
n_elements = x.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
dropout_kernel[grid](x, dropout_mask, output, n_elements, p, BLOCK_SIZE=1024)
return output
def test():
device="cuda"
x = torch.randn(4,4).to(device)
p = 0.5
dropout_mask = (torch.rand(size=(4, 4)) > p).to(torch.int32).to(device)
output = dropout(x, dropout_mask, p=p)
print(output)
if __name__ == "__main__":
test()
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
|
bjmsong/hands-on-kernels
|
fa/v2.py
|
https://github.com/bjmsong/hands-on-kernels/blob/c219e87282d2e2895e4218175f774cda757df022/fa/v2.py
|
import pytest
import torch
import triton
import triton.language as tl
from utils import _test_memory
def is_hip():
return triton.runtime.driver.active.get_current_target().backend == "hip"
@triton.jit
def _attn_fwd_inner(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
qk = tl.dot(q, k)
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
return acc, l_i, m_i
# We don't run auto-tuning every time to keep the tutorial fast. Keeping
# the code below and commenting out the equivalent parameters is convenient for
# re-tuning.
configs = [
triton.Config({'BLOCK_M': BM, 'BLOCK_N': BN}, num_stages=s, num_warps=w) \
for BM in [64, 128]\
for BN in [32, 64]\
for s in ([1] if is_hip() else [3, 4, 7])\
for w in [4, 8]\
]
def keep(conf):
BLOCK_M = conf.kwargs["BLOCK_M"]
BLOCK_N = conf.kwargs["BLOCK_N"]
if BLOCK_M * BLOCK_N < 128 * 128 and conf.num_warps == 8:
return False
return True
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd(Q, K, V, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
@triton.jit
def _attn_bwd_preprocess(O, DO, #
Delta, #
Z, H, N_CTX, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr #
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_hz = tl.program_id(1)
off_n = tl.arange(0, HEAD_DIM)
# load
o = tl.load(O + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :])
do = tl.load(DO + off_hz * HEAD_DIM * N_CTX + off_m[:, None] * HEAD_DIM + off_n[None, :]).to(tl.float32)
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_hz * N_CTX + off_m, delta)
# The main inner-loop logic for computing dK and dV.
@triton.jit
def _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
HEAD_DIM: tl.constexpr, #
# Filled in by the wrapper.
start_n, start_m, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M1)
offs_n = start_n + tl.arange(0, BLOCK_N1)
offs_k = tl.arange(0, HEAD_DIM)
qT_ptrs = Q + offs_m[None, :] * stride_tok + offs_k[:, None] * stride_d
do_ptrs = DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
# BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
curr_m = start_m
step_m = BLOCK_M1
for blk_idx in range(num_steps):
qT = tl.load(qT_ptrs)
# Load m before computing qk to reduce pipeline stall.
offs_m = curr_m + tl.arange(0, BLOCK_M1)
m = tl.load(M + offs_m)
qkT = tl.dot(k, qT)
pT = tl.math.exp2(qkT - m[None, :])
# Autoregressive masking.
if MASK:
mask = (offs_m[None, :] >= offs_n[:, None])
pT = tl.where(mask, pT, 0.0)
do = tl.load(do_ptrs)
# Compute dV.
ppT = pT
ppT = ppT.to(tl.float16)
dv += tl.dot(ppT, do)
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# Compute dP and dS.
dpT = tl.dot(v, tl.trans(do)).to(tl.float32)
dsT = pT * (dpT - Di[None, :])
dsT = dsT.to(tl.float16)
dk += tl.dot(dsT, tl.trans(qT))
# Increment pointers.
curr_m += step_m
qT_ptrs += step_m * stride_tok
do_ptrs += step_m * stride_tok
return dk, dv
# the main inner-loop logic for computing dQ
@triton.jit
def _attn_bwd_dq(dq, q, K, V, #
do, m, D,
# shared by Q/K/V/DO.
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
HEAD_DIM: tl.constexpr,
# Filled in by the wrapper.
start_m, start_n, num_steps, #
MASK: tl.constexpr):
offs_m = start_m + tl.arange(0, BLOCK_M2)
offs_n = start_n + tl.arange(0, BLOCK_N2)
offs_k = tl.arange(0, HEAD_DIM)
kT_ptrs = K + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
vT_ptrs = V + offs_n[None, :] * stride_tok + offs_k[:, None] * stride_d
# D (= delta) is pre-divided by ds_scale.
Di = tl.load(D + offs_m)
# BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)
curr_n = start_n
step_n = BLOCK_N2
for blk_idx in range(num_steps):
kT = tl.load(kT_ptrs)
vT = tl.load(vT_ptrs)
qk = tl.dot(q, kT)
p = tl.math.exp2(qk - m)
# Autoregressive masking.
if MASK:
offs_n = curr_n + tl.arange(0, BLOCK_N2)
mask = (offs_m[:, None] >= offs_n[None, :])
p = tl.where(mask, p, 0.0)
# Compute dP and dS.
dp = tl.dot(do, vT).to(tl.float32)
ds = p * (dp - Di[:, None])
ds = ds.to(tl.float16)
# Compute dQ.
# NOTE: We need to de-scale dq in the end, because kT was pre-scaled.
dq += tl.dot(ds, tl.trans(kT))
# Increment pointers.
curr_n += step_n
kT_ptrs += step_n * stride_tok
vT_ptrs += step_n * stride_tok
return dq
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
@triton.jit
def _attn_bwd(Q, K, V, sm_scale, #
DO, #
DQ, DK, DV, #
M, D,
# shared by Q/K/V/DO.
stride_z, stride_h, stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1: tl.constexpr, #
BLOCK_N1: tl.constexpr, #
BLOCK_M2: tl.constexpr, #
BLOCK_N2: tl.constexpr, #
BLK_SLICE_FACTOR: tl.constexpr, #
HEAD_DIM: tl.constexpr):
LN2: tl.constexpr = 0.6931471824645996 # = ln(2)
bhid = tl.program_id(2)
off_chz = (bhid * N_CTX).to(tl.int64)
adj = (stride_h * (bhid % H) + stride_z * (bhid // H)).to(tl.int64)
pid = tl.program_id(0)
# offset pointers for batch/head
Q += adj
K += adj
V += adj
DO += adj
DQ += adj
DK += adj
DV += adj
M += off_chz
D += off_chz
# load scales
offs_k = tl.arange(0, HEAD_DIM)
start_n = pid * BLOCK_N1
start_m = start_n
MASK_BLOCK_M1: tl.constexpr = BLOCK_M1 // BLK_SLICE_FACTOR
offs_n = start_n + tl.arange(0, BLOCK_N1)
dv = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
dk = tl.zeros([BLOCK_N1, HEAD_DIM], dtype=tl.float32)
# load K and V: they stay in SRAM throughout the inner loop.
k = tl.load(K + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
v = tl.load(V + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d)
num_steps = BLOCK_N1 // MASK_BLOCK_M1
dk, dv = _attn_bwd_dkdv(dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
MASK_BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=True #
)
start_m += num_steps * MASK_BLOCK_M1
num_steps = (N_CTX - start_m) // BLOCK_M1
# Compute dK and dV for non-masked blocks.
dk, dv = _attn_bwd_dkdv( #
dk, dv, #
Q, k, v, sm_scale, #
DO, #
M, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M1, BLOCK_N1, HEAD_DIM, #
start_n, start_m, num_steps, #
MASK=False #
)
dv_ptrs = DV + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dv_ptrs, dv)
# Write back dK.
dk *= sm_scale
dk_ptrs = DK + offs_n[:, None] * stride_tok + offs_k[None, :] * stride_d
tl.store(dk_ptrs, dk)
# THIS BLOCK DOES DQ:
start_m = pid * BLOCK_M2
end_n = start_m + BLOCK_M2
MASK_BLOCK_N2: tl.constexpr = BLOCK_N2 // BLK_SLICE_FACTOR
offs_m = start_m + tl.arange(0, BLOCK_M2)
q = tl.load(Q + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
dq = tl.zeros([BLOCK_M2, HEAD_DIM], dtype=tl.float32)
do = tl.load(DO + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d)
m = tl.load(M + offs_m)
m = m[:, None]
# Compute dQ for masked (diagonal) blocks.
# NOTE: This code scans each row of QK^T backward (from right to left,
# but inside each call to _attn_bwd_dq, from left to right), but that's
# not due to anything important. I just wanted to reuse the loop
# structure for dK & dV above as much as possible.
num_steps = BLOCK_M2 // MASK_BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, MASK_BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * MASK_BLOCK_N2, num_steps, #
MASK=True #
)
end_n -= num_steps * MASK_BLOCK_N2
# stage 2
num_steps = end_n // BLOCK_N2
dq = _attn_bwd_dq(dq, q, K, V, #
do, m, D, #
stride_tok, stride_d, #
H, N_CTX, #
BLOCK_M2, BLOCK_N2, HEAD_DIM, #
start_m, end_n - num_steps * BLOCK_N2, num_steps, #
MASK=False #
)
# Write back dQ.
dq_ptrs = DQ + offs_m[:, None] * stride_tok + offs_k[None, :] * stride_d
dq *= LN2
tl.store(dq_ptrs, dq)
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1 # ?
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
# grid.x = seqlen/BLOCK_M
# grid_y = batch_size * head_dim
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd[grid](
q, k, v, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.save_for_backward(q, k, v, o, M)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
q, k, v, o, M = ctx.saved_tensors
assert do.is_contiguous()
assert q.stride() == k.stride() == v.stride() == o.stride() == do.stride()
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
BATCH, N_HEAD, N_CTX = q.shape[:3]
PRE_BLOCK = 128
NUM_WARPS, NUM_STAGES = 4, 5
BLOCK_M1, BLOCK_N1, BLOCK_M2, BLOCK_N2 = 32, 128, 128, 32
BLK_SLICE_FACTOR = 2
RCP_LN2 = 1.4426950408889634 # = 1.0 / ln(2)
arg_k = k
arg_k = arg_k * (ctx.sm_scale * RCP_LN2)
PRE_BLOCK = 128
assert N_CTX % PRE_BLOCK == 0
pre_grid = (N_CTX // PRE_BLOCK, BATCH * N_HEAD)
delta = torch.empty_like(M)
_attn_bwd_preprocess[pre_grid](
o, do, #
delta, #
BATCH, N_HEAD, N_CTX, #
BLOCK_M=PRE_BLOCK, HEAD_DIM=ctx.HEAD_DIM #
)
grid = (N_CTX // BLOCK_N1, 1, BATCH * N_HEAD)
_attn_bwd[grid](
q, arg_k, v, ctx.sm_scale, do, dq, dk, dv, #
M, delta, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
N_HEAD, N_CTX, #
BLOCK_M1=BLOCK_M1, BLOCK_N1=BLOCK_N1, #
BLOCK_M2=BLOCK_M2, BLOCK_N2=BLOCK_N2, #
BLK_SLICE_FACTOR=BLK_SLICE_FACTOR, #
HEAD_DIM=ctx.HEAD_DIM, #
num_warps=NUM_WARPS, #
num_stages=NUM_STAGES #
)
return dq, dk, dv, None, None
attention = _attention.apply
# batch size, head num, sequence length, head dim
@pytest.mark.parametrize("Z, H, N_CTX, HEAD_DIM", [(1, 2, 1024, 64)])
@pytest.mark.parametrize("causal", [True]) # causal mask
def test_op(Z, H, N_CTX, HEAD_DIM, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((Z, H, N_CTX, HEAD_DIM), dtype=dtype, device="cuda").normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation (PyTorch)
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
rtol = 0.0
# Relative tolerance workaround for known hardware limitation of MI200 GPU.
# For details see https://pytorch.org/docs/stable/notes/numerical_accuracy.html#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
if torch.version.hip is not None and triton.runtime.driver.active.get_current_target().arch == "gfx90a":
rtol = 1e-2
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=rtol)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=rtol)
try:
from flash_attn.flash_attn_interface import \
flash_attn_qkvpacked_func as flash_attn_func
HAS_FLASH = True
except BaseException:
HAS_FLASH = False
TORCH_HAS_FP8 = hasattr(torch, 'float8_e5m2')
BATCH, N_HEADS, HEAD_DIM = 4, 32, 64
# vary seq length for fixed head and batch=4
configs = []
for mode in ["fwd"]: # ["fwd", "bwd"]
for causal in [True]: # [True, False]
if mode == "bwd" and not causal:
continue
configs.append(
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(9, 13)],
line_arg="provider",
line_vals=["triton-fp16"] + (["triton-fp8"] if TORCH_HAS_FP8 else []) +
(["flash"] if HAS_FLASH else []) + ["torch"],
line_names=["Triton [FP16]"] + (["Triton [FP8]"] if TORCH_HAS_FP8 else []) +
(["Flash-2"] if HAS_FLASH else []) + ["torch"],
styles=[("red", "-"), ("blue", "-"), ("green", "-"), ("yellow", "-")],
ylabel="TFLOPS",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{HEAD_DIM}-{mode}-causal={causal}",
args={
"H": N_HEADS,
"BATCH": BATCH,
"HEAD_DIM": HEAD_DIM,
"mode": mode,
"causal": causal,
},
))
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, HEAD_DIM, causal, mode, provider, device="cuda"):
assert mode in ["fwd", "bwd"]
dtype = torch.float16
if "triton" in provider:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
if mode == "fwd" and "fp8" in provider:
q = q.to(torch.float8_e5m2)
k = k.to(torch.float8_e5m2)
v = v.permute(0, 1, 3, 2).contiguous()
v = v.permute(0, 1, 3, 2)
v = v.to(torch.float8_e5m2)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
fn = lambda: flash_attn_func(qkv, causal=causal)
if mode == "bwd":
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
if provider == "torch":
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * HEAD_DIM
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == "bwd":
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops * 1e-12 / (ms * 1e-3)
def peak_memory(backend):
dtype = torch.float16
device = 'cuda'
BATCH, H, HEAD_DIM = 4, 32, 64
for N_CTX in [2**i for i in range(9, 13)]:
q = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, HEAD_DIM), dtype=dtype, device=device, requires_grad=True)
sm_scale = 1.3
def torch_call():
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale # attention score
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
fn = lambda: torch.matmul(p, v)
ms = triton.testing.do_bench(fn)
def triton_call():
attention(q, k, v, causal, sm_scale)
QUANTILES = [0.5, 0.2, 0.8]
if backend == "triton":
mem_50, mem_20, mem_80 = _test_memory(triton_call, quantiles=QUANTILES)
print(f"Triton Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if backend == "torch":
mem_50, mem_20, mem_80 = _test_memory(torch_call, quantiles=QUANTILES)
print(f"Torch Peak Memory of {N_CTX} is {mem_50, mem_20, mem_80}")
if __name__ == "__main__":
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path=".", print_data=True)
# peak_memory("torch")
|
FlagOpen/FlagGems
|
src/flag_gems/ops/tanh.py
|
https://github.com/FlagOpen/FlagGems/blob/2437f4ffa2d644e38c26aacbf1249263a2016bb2/src/flag_gems/ops/tanh.py
|
import logging
import torch
import triton
import triton.language as tl
from ..utils import pointwise_dynamic, tl_extra_shim
pow = tl_extra_shim.pow
_tanh = tl_extra_shim.tanh
@pointwise_dynamic(promotion_methods=[(0, "INT_TO_FLOAT")])
@triton.jit
def tanh_forward(x):
return _tanh(x.to(tl.float32))
@pointwise_dynamic(promotion_methods=[(0, "INT_TO_FLOAT")])
@triton.jit
def tanh_backward(y, dy):
return dy * (1.0 - pow(y.to(tl.float32), 2))
class Tanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
return out.to(A.dtype)
else:
out = tanh_forward(A)
return out
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh(A):
return Tanh.apply(A)
class InplaceTanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH_ FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
A.copy_(out.to(A.dtype))
ctx.mark_dirty(A)
else:
tanh_forward(A, out0=A)
return A
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH_ BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh_(A):
InplaceTanh.apply(A)
return A
|
@triton.jit
def tanh_forward(x):
return _tanh(x.to(tl.float32))
@pointwise_dynamic(promotion_methods=[(0, "INT_TO_FLOAT")])
|
FlagOpen/FlagGems
|
src/flag_gems/ops/tanh.py
|
https://github.com/FlagOpen/FlagGems/blob/2437f4ffa2d644e38c26aacbf1249263a2016bb2/src/flag_gems/ops/tanh.py
|
import logging
import torch
import triton
import triton.language as tl
from ..utils import pointwise_dynamic, tl_extra_shim
pow = tl_extra_shim.pow
_tanh = tl_extra_shim.tanh
@pointwise_dynamic(promotion_methods=[(0, "INT_TO_FLOAT")])
@triton.jit
def tanh_forward(x):
return _tanh(x.to(tl.float32))
@pointwise_dynamic(promotion_methods=[(0, "INT_TO_FLOAT")])
@triton.jit
def tanh_backward(y, dy):
return dy * (1.0 - pow(y.to(tl.float32), 2))
class Tanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
return out.to(A.dtype)
else:
out = tanh_forward(A)
return out
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh(A):
return Tanh.apply(A)
class InplaceTanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH_ FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
A.copy_(out.to(A.dtype))
ctx.mark_dirty(A)
else:
tanh_forward(A, out0=A)
return A
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH_ BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh_(A):
InplaceTanh.apply(A)
return A
|
@triton.jit
def tanh_backward(y, dy):
return dy * (1.0 - pow(y.to(tl.float32), 2))
class Tanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
return out.to(A.dtype)
else:
out = tanh_forward(A)
return out
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh(A):
return Tanh.apply(A)
class InplaceTanh(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
logging.debug("GEMS TANH_ FORWARD")
if A.requires_grad is True:
out = tanh_forward(A.to(torch.float32))
ctx.save_for_backward(out)
A.copy_(out.to(A.dtype))
ctx.mark_dirty(A)
else:
tanh_forward(A, out0=A)
return A
@staticmethod
def backward(ctx, out_grad):
logging.debug("GEMS TANH_ BACKWARD")
(out,) = ctx.saved_tensors
in_grad = tanh_backward(out, out_grad)
return in_grad
def tanh_(A):
InplaceTanh.apply(A)
return A
|
LordXX505/SleepGPT
|
main/utils/triton.py
|
https://github.com/LordXX505/SleepGPT/blob/e6a4d375172e009a9753e541e8b22223028e200f/main/utils/triton.py
|
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
# clone param and exp_avg before autotuning takes place
# as those are updated in-place
def clone_inplace_updated_params(nargs):
nargs['p_ptr'] = nargs['p_ptr'].clone()
nargs['exp_avg_ptr'] = nargs['exp_avg_ptr'].clone()
# triton cuda kernel
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4, pre_hook = clone_inplace_updated_params),
triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8, pre_hook = clone_inplace_updated_params),
], key = ['n_elements'])
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
@triton.jit
def update_fn_kernel(
p_ptr,
grad_ptr,
exp_avg_ptr,
lr,
wd,
beta1,
beta2,
n_elements,
BLOCK_SIZE: tl.constexpr,
):
pid = tl.program_id(axis = 0)
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
mask = offsets < n_elements
# offsetted pointers
offset_p_ptr = p_ptr + offsets
offset_grad_ptr = grad_ptr + offsets
offset_exp_avg_ptr = exp_avg_ptr + offsets
# load
p = tl.load(offset_p_ptr, mask = mask)
grad = tl.load(offset_grad_ptr, mask = mask)
exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
# stepweight decay
p = p * (1 - lr * wd)
# diff between momentum running average and grad
diff = exp_avg - grad
# weight update
update = diff * beta1 + grad
# torch.sign
can_update = update != 0
update_sign = tl.where(update > 0, -lr, lr)
p = p + update_sign * can_update
# decay the momentum running average coefficient
exp_avg = diff * beta2 + grad
# store new params and momentum running average coefficient
tl.store(offset_p_ptr, p, mask = mask)
tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
p: torch.Tensor,
grad: torch.Tensor,
exp_avg: torch.Tensor,
lr: float,
wd: float,
beta1: float,
beta2: float
):
assert all([t.is_cuda for t in (p, grad, exp_avg)])
n_elements = p.numel()
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
update_fn_kernel[grid](
p,
grad,
exp_avg,
lr,
wd,
beta1,
beta2,
n_elements
)
|
LiuTaowen-Tony/flash-qlora
|
merged_forward_v1.py
|
https://github.com/LiuTaowen-Tony/flash-qlora/blob/57ce887d668430693e3b2d76cc707223ee0c4b82/merged_forward_v1.py
|
import torch
import triton
import triton.language as tl
import common
def get_configs_io_bound():
configs = []
for block_n in [256, 128, 64, 32, 16]:
for block_m in [256, 128, 64, 32]:
for block_k in [256, 128, 64]:
for num_stages in [5, 4, 3]:
for num_warps in [4, 8]:
for num_ctas in [1]:
if block_m * block_n * block_k >= 16 * 64 * 64 and block_m * block_n * block_k <= 128 * 128 * 256:
configs.append(
triton.Config({'block_M': block_m, 'block_N': block_n, 'block_K': block_k, 'R': 16, 'GROUP_SIZE_M': 8},
num_stages=num_stages, num_warps=num_warps, num_ctas=num_ctas))
# for split_k in [2, 4, 8, 16]:
# configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
# num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
return configs
@triton.autotune(
configs=common.get_autotune_config(),
# configs=get_configs_io_bound(),
key=["M", "N", "K"],
)
@triton.jit
def merged_qlora_forward_kernel(
x_ptr,
w_ptr,
u_ptr,
v_ptr,
c_ptr,
stride_xm,
stride_xk,
stride_wk,
stride_wn,
stride_uk,
stride_ur,
stride_vr,
stride_vn,
M: int,
N: int,
K: int,
R: tl.constexpr,
block_M: tl.constexpr,
block_N: tl.constexpr,
block_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
pid_m, pid_n = common.reorder_pid(pid_m, pid_n, M, N, block_M, block_N, GROUP_SIZE_M)
# Block starting positions
offs_m = pid_m * block_M
offs_n = pid_n * block_N
# Initialize fp and int accumulators
fp_acc = tl.zeros((block_M, block_N), dtype=tl.float32)
# Load block of V
v_blk = tl.load(
v_ptr + tl.arange(0, R)[:, None] * stride_vr + tl.arange(0, block_N)
)
# R: 16 block_N: 256 block_K: 32 block_M: 64
for i in range(0, K, block_K):
# Load blocks of X, W, and U
x_blk = tl.load(
x_ptr
+ (offs_m + tl.arange(0, block_M))[:, None] * stride_xm
+ (i + tl.arange(0, block_K))
)
w_blk = tl.load(
w_ptr
+ (i + tl.arange(0, block_K))[:, None] * stride_wk
+ tl.arange(0, block_N)
)
fp_acc = tl.dot(x_blk, w_blk, fp_acc)
u_blk = tl.load(
u_ptr + (i + tl.arange(0, block_K))[:, None] * stride_uk + tl.arange(0, R)
)
xu_blk = tl.dot(x_blk, u_blk)
xu_blk2 = tl.cast(xu_blk, dtype=tl.bfloat16)
fp_acc = tl.dot(xu_blk2, v_blk, fp_acc)
tl.store(
c_ptr + (offs_m + tl.arange(0, block_M))[:, None] * N + tl.arange(0, block_N),
fp_acc,
)
def merged_qlora_forward(
x: torch.Tensor, w: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
# Allocate result tensor on the GPU
m, k = x.shape
r, n = v.shape
assert k == u.shape[0]
assert u.shape[1] == r
assert w.shape[0] == k
assert w.shape[1] == n
c = torch.empty((m, n), dtype=x.dtype, device="cuda")
grid = lambda opt: (triton.cdiv(m, opt["block_M"]), triton.cdiv(n, opt["block_N"]))
# Launch the Triton kernel with auto-tuned configurations
merged_qlora_forward_kernel[grid](
x,
w,
u,
v,
c,
x.stride(0),
x.stride(1),
w.stride(0),
w.stride(1),
u.stride(0),
u.stride(1),
v.stride(0),
v.stride(1),
M=m,
N=n,
K=k,
)
return c
|
@triton.jit
def merged_qlora_forward_kernel(
x_ptr,
w_ptr,
u_ptr,
v_ptr,
c_ptr,
stride_xm,
stride_xk,
stride_wk,
stride_wn,
stride_uk,
stride_ur,
stride_vr,
stride_vn,
M: int,
N: int,
K: int,
R: tl.constexpr,
block_M: tl.constexpr,
block_N: tl.constexpr,
block_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr,
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
pid_m, pid_n = common.reorder_pid(pid_m, pid_n, M, N, block_M, block_N, GROUP_SIZE_M)
# Block starting positions
offs_m = pid_m * block_M
offs_n = pid_n * block_N
# Initialize fp and int accumulators
fp_acc = tl.zeros((block_M, block_N), dtype=tl.float32)
# Load block of V
v_blk = tl.load(
v_ptr + tl.arange(0, R)[:, None] * stride_vr + tl.arange(0, block_N)
)
# R: 16 block_N: 256 block_K: 32 block_M: 64
for i in range(0, K, block_K):
# Load blocks of X, W, and U
x_blk = tl.load(
x_ptr
+ (offs_m + tl.arange(0, block_M))[:, None] * stride_xm
+ (i + tl.arange(0, block_K))
)
w_blk = tl.load(
w_ptr
+ (i + tl.arange(0, block_K))[:, None] * stride_wk
+ tl.arange(0, block_N)
)
fp_acc = tl.dot(x_blk, w_blk, fp_acc)
u_blk = tl.load(
u_ptr + (i + tl.arange(0, block_K))[:, None] * stride_uk + tl.arange(0, R)
)
xu_blk = tl.dot(x_blk, u_blk)
xu_blk2 = tl.cast(xu_blk, dtype=tl.bfloat16)
fp_acc = tl.dot(xu_blk2, v_blk, fp_acc)
tl.store(
c_ptr + (offs_m + tl.arange(0, block_M))[:, None] * N + tl.arange(0, block_N),
fp_acc,
)
def merged_qlora_forward(
x: torch.Tensor, w: torch.Tensor, u: torch.Tensor, v: torch.Tensor
) -> torch.Tensor:
# Allocate result tensor on the GPU
m, k = x.shape
r, n = v.shape
assert k == u.shape[0]
assert u.shape[1] == r
assert w.shape[0] == k
assert w.shape[1] == n
c = torch.empty((m, n), dtype=x.dtype, device="cuda")
grid = lambda opt: (triton.cdiv(m, opt["block_M"]), triton.cdiv(n, opt["block_N"]))
# Launch the Triton kernel with auto-tuned configurations
merged_qlora_forward_kernel[grid](
x,
w,
u,
v,
c,
x.stride(0),
x.stride(1),
w.stride(0),
w.stride(1),
u.stride(0),
u.stride(1),
v.stride(0),
v.stride(1),
M=m,
N=n,
K=k,
)
return c
|
Ezio-csm/EzAtten
|
flash_atten_int8.py
|
https://github.com/Ezio-csm/EzAtten/blob/0c27c92cb468e012f206b3d385ecdd7e9ec13c52/flash_atten_int8.py
|
import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i, m_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_int8(Q, K, V, Q_scale, K_scale, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_int8(torch.autograd.Function):
@staticmethod # q, k: int8, v: float16, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd_int8[grid](
q, k, v, q_scale, k_scale, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_int8 = _attention_int8.apply
|
@triton.jit
def _attn_fwd_inner_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i, m_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
|
Ezio-csm/EzAtten
|
flash_atten_int8.py
|
https://github.com/Ezio-csm/EzAtten/blob/0c27c92cb468e012f206b3d385ecdd7e9ec13c52/flash_atten_int8.py
|
import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_int8(acc, l_i, m_i, q, #
K_block_ptr, V_block_ptr, #
q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr, #
N_CTX: tl.constexpr, fp8_v: tl.constexpr):
# range of values handled by this stage
if STAGE == 1:
lo, hi = 0, start_m * BLOCK_M
elif STAGE == 2:
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
lo = tl.multiple_of(lo, BLOCK_M)
# causal = False
else:
lo, hi = 0, N_CTX
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr)
k_scale = tl.load(K_block_scale_ptr)
qk = tl.dot(q, k).to(tl.float32)
qk = qk * q_scale[:, None]
qk = qk * k_scale
if STAGE == 2:
mask = offs_m[:, None] >= (start_n + offs_n[None, :])
qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
m_ij = tl.maximum(m_i, tl.max(qk, 1))
qk -= m_ij[:, None]
else:
m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
qk = qk * qk_scale - m_ij[:, None]
p = tl.math.exp2(qk)
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
alpha = tl.math.exp2(m_i - m_ij)
l_i = l_i * alpha + l_ij
# -- update output accumulator --
acc = acc * alpha[:, None]
# update acc
v = tl.load(V_block_ptr)
if fp8_v:
p = p.to(tl.float8e5)
else:
p = p.to(tl.float16)
acc = tl.dot(p, v, acc)
# update m_i and l_i
m_i = m_ij
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
return acc, l_i, m_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_int8(Q, K, V, Q_scale, K_scale, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_int8(torch.autograd.Function):
@staticmethod # q, k: int8, v: float16, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd_int8[grid](
q, k, v, q_scale, k_scale, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_int8 = _attention_int8.apply
|
@triton.jit
def _attn_fwd_int8(Q, K, V, Q_scale, K_scale, sm_scale, M, Out, #
stride_qz, stride_qh, stride_qm, stride_qk, #
stride_kz, stride_kh, stride_kn, stride_kk, #
stride_vz, stride_vh, stride_vk, stride_vn, #
stride_oz, stride_oh, stride_om, stride_on, #
stride_s1, stride_s2, stride_s3, #
Z, H, N_CTX, #
HEAD_DIM: tl.constexpr, #
BLOCK_M: tl.constexpr, #
BLOCK_N: tl.constexpr, #
STAGE: tl.constexpr #
):
tl.static_assert(BLOCK_N <= HEAD_DIM)
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
off_z = off_hz // H
off_h = off_hz % H
qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
# block pointers
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
v_order: tl.constexpr = (0, 1) if V.dtype.element_ty == tl.float8e5 else (1, 0)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, HEAD_DIM),
order=v_order,
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(HEAD_DIM, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(HEAD_DIM, BLOCK_N),
order=(0, 1),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, HEAD_DIM),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, HEAD_DIM),
order=(1, 0),
)
# scale vector pointers
Q_block_scale_ptr = tl.make_block_ptr(
base=Q_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(start_m * BLOCK_M,),
block_shape=(BLOCK_M,),
order=(0,),
)
K_block_scale_ptr = tl.make_block_ptr(
base=K_scale + scl_offset,
shape=(N_CTX,),
strides=(stride_s3,),
offsets=(0,),
block_shape=(BLOCK_N,),
order=(0,),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
# load scales
qk_scale = sm_scale
qk_scale *= 1.44269504 # 1/log(2)
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q_scale = tl.load(Q_block_scale_ptr)
# stage 1: off-band
# For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
# For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
if STAGE & 1:
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# stage 2: on-band
if STAGE & 2:
# barrier makes it easier for compielr to schedule the
# two loops independently
acc, l_i, m_i = _attn_fwd_inner_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, #
start_m, qk_scale, #
BLOCK_M, HEAD_DIM, BLOCK_N, #
2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5 #
)
# epilogue
m_i += tl.math.log2(l_i)
acc = acc / l_i[:, None]
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(m_ptrs, m_i)
tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_int8(torch.autograd.Function):
@staticmethod # q, k: int8, v: float16, q_scale, k_scale: float16
def forward(ctx, q, k, v, q_scale, k_scale, causal, sm_scale):
# shape constraints
HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
# when v is in float8_e5m2 it is transposed.
HEAD_DIM_V = v.shape[-1]
assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
assert HEAD_DIM_K in {16, 32, 64, 128, 256}
o = torch.empty_like(q)
stage = 3 if causal else 1
extra_kern_args = {}
# Tuning for AMD target
if is_hip():
waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
M = torch.empty((q.shape[0], q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
_attn_fwd_int8[grid](
q, k, v, q_scale, k_scale, sm_scale, M, o, #
q.stride(0), q.stride(1), q.stride(2), q.stride(3), #
k.stride(0), k.stride(1), k.stride(2), k.stride(3), #
v.stride(0), v.stride(1), v.stride(2), v.stride(3), #
o.stride(0), o.stride(1), o.stride(2), o.stride(3), #
q_scale.stride(0), q_scale.stride(1), q_scale.stride(2), #
q.shape[0], q.shape[1], #
N_CTX=q.shape[2], #
HEAD_DIM=HEAD_DIM_K, #
STAGE=stage, #
**extra_kern_args)
ctx.sm_scale = sm_scale
ctx.HEAD_DIM = HEAD_DIM_K
ctx.causal = causal
return o
attention_int8 = _attention_int8.apply
|
tongzhou80/TritonTests
|
blocksparse_csr_mm.py
|
https://github.com/tongzhou80/TritonTests/blob/9e0ceb996ee9caa332384f1e79d4f2f6e5df9124/blocksparse_csr_mm.py
|
import sys
import torch
print('imported torch')
import triton
import triton.language as tl
import utils
from utils import *
@triton.jit
def _kernel_mcsr_mm(a_rowptrs, a_cols, a_vals, b_vals, c_vals,
BM: tl.constexpr, BK: tl.constexpr, BN: tl.constexpr,
nBM: tl.constexpr, nBK: tl.constexpr, nBN: tl.constexpr,
):
m = tl.program_id(0)
n = tl.program_id(1)
a_block_size = BM * BK
b_block_size = BK * BN
a_ptrs = a_vals + a_block_size * nBK * m + \
tl.arange(0, BM)[:, None] * BK + tl.arange(0, BK)[None, :]
b_ptrs = b_vals + b_block_size * n + \
tl.arange(0, BK)[:, None] * BN + tl.arange(0, BN)[None, :]
# a_rowptrs_m = a_rowptrs + m
k_start = tl.load(a_rowptrs+m)
k_end = tl.load(a_rowptrs+m+1)
c = tl.zeros((BM, BN), dtype=tl.float32)
# for k in range(nBK):
# a = tl.load(a_ptrs)
# b = tl.load(b_ptrs)
# c += tl.dot(a, b)
# a_ptrs += a_block_size
# b_ptrs += b_block_size * nBN
for kp in range(k_start, k_end):
k = tl.load(a_cols+kp)
a = tl.load(a_ptrs+a_block_size*k)
b = tl.load(b_ptrs+b_block_size * nBN*k)
c += tl.dot(a, b)
c = c.to(tl.float16)
c_ptrs = c_vals + (m * nBN + n) * BM * BN + \
tl.arange(0, BM)[:, None] * BN + tl.arange(0, BN)[None, :]
tl.store(c_ptrs, c)
def mcsr_mm_inner(a_rowptrs, a_cols, a_vals, b_vals, c, num_warps=4, num_stages=3):
nBM, nBK, BM, BK = a_vals.shape
nBK, nBN, BK, BN = b_vals.shape
# TODO: this does not work when M does not divide BM
# Or maybe it works because C will also need to be padded
M = nBM * BM
N = nBN * BN
grid = (nBM, nBN)
binary = _kernel_mcsr_mm[grid](a_rowptrs, a_cols, a_vals, b_vals, c,
BM, BK, BN, nBM, nBK, nBN,
num_warps=num_warps, num_stages=num_stages
)
#print(binary.asm['ptx'])
return c
def mcsr_mm(a: MCSR, b: MCSR, c, num_warps=4, num_stages=3):
nBM, nBK, BM, BK = a.vals.shape
nBK, nBN, BK, BN = b.vals.shape
# TODO: this does not work when M does not divide BM
# Or maybe it works because C will also need to be padded
M = nBM * BM
N = nBN * BN
grid = (nBM, nBN)
#print(grid)
binary = _kernel_mcsr_mm[grid](a.rowptrs, a.cols, a.vals, b.vals, c[1],
BM, BK, BN, nBM, nBK, nBN,
num_warps=num_warps, num_stages=num_stages
)
#print(binary.asm['ptx'])
return c
def verify_run():
M = 32
K = M
N = M
BM = 16
BK = BM
BN = BM
a = gen_lower_triangular_mcsr_matrix(M, K, BM, BK)
b = gen_random_mcsr_matrix(K, N, BK, BN, density=1)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
a_ref = from_block_format(a.vals)
b_ref = from_block_format(b.vals)
c_ref = torch.mm(a_ref, b_ref)
c = mcsr_mm(a, b, c)
print('verify passes:', torch.allclose(c_ref, from_block_format(c[1])))
def test_random():
M = 1024
K = 1024
N = M
BMs = [32, 64, 128, 256]
BKs = [32, 64, 128, 256]
BNs = [32, 64, 128, 256]
#stages = [1,2,3,4,5]
#warps = [1,2,4,8]
stages = [2,3,4,5]
warps = [1,2,4]
TEST_RUN = False
if TEST_RUN:
BMs, BKs, BNs = [64], [64], [64]
stages, warps = [1,2,3,4,5], [1,2,4]
for BM in BMs:
for BK in BKs:
for BN in BNs:
#if BM * K != BK * M:
#continue
a = gen_random_mcsr_matrix(M, K, BM, BK, density=1)
#a = gen_lower_triangular_mcsr_matrix(M, K, BM, BK)
#a = gen_lower_half_mcsr_matrix(M, K, BM, BK)
b = gen_random_mcsr_matrix(K, N, BK, BN, density=1)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
a_ref = from_block_format(a.vals)
b_ref = from_block_format(b.vals)
c_ref = torch.empty(M, N, dtype=torch.float16, device='cuda')
ms, _, _ = triton.testing.do_bench(lambda: torch.mm(a_ref, b_ref, out=c_ref))
print(f'torch mm: {ms:.4f}')
times = []
ms = torch.inf
try:
for num_stages in stages:
for num_warps in warps:
ms, _, _ = triton.testing.do_bench(lambda: mcsr_mm(a, b, c, num_warps, num_stages), rep=50)
times.append((ms, BM, BK, BN, num_stages, num_warps))
except Exception as e:
print('run triton failed')
continue
print('verify passes:', torch.allclose(c_ref, from_block_format(c[1])))
times.sort(key=lambda x: x[0])
print(times[0])
print(f'blocksparse mm: {times[0][0]:.4f} ({BM} x {BK} x {BN})')
def test_lower_triangular():
M = 3072
K = M
N = M
dtype = torch.float16
a = torch.randn([M, K], dtype=dtype, device='cuda')
a[M//2:, :] = 0
#a[:, K//2:] = 0
#a = torch.tril(a)
b = torch.randn([K, N], dtype=dtype, device='cuda')
c_ref = torch.empty(M, N, dtype=dtype, device='cuda')
ms, _, _ = triton.testing.do_bench(lambda: torch.mm(a, b, out=c_ref))
print(f'torch mm: {ms:.4f}')
BMs = [32, 64, 128, 256]
BKs = [16, 32, 64, 128, 256]
BNs = [32, 64, 128]
#stages = [1,2,3,4,5]
#warps = [1,2,4,8]
stages = [2,3,4,5]
warps = [1,2,4]
TEST_RUN = False
if TEST_RUN:
s = 16
BMs, BKs, BNs = [s], [s], [s]
stages, warps = [1,2,3,4,5], [1,2,4]
best_time = torch.inf
for BM in BMs:
for BK in BKs:
for BN in BNs:
if BM * K != BK * M:
continue
a_block, a_mask = utils.to_block_format_with_mask(a, BM, BK)
#print(a_mask)
a_mask_rowptrs, a_mask_cols = utils.to_csr_ptrs(a_mask)
b_block = utils.to_block_format(b, BK, BN)
#print(a_mask_rowptrs, a_mask_cols)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
times = []
ms = torch.inf
try:
for num_stages in stages:
for num_warps in warps:
ms, _, _ = triton.testing.do_bench(lambda: mcsr_mm_inner(a_mask_rowptrs, a_mask_cols, a_block, b_block, c[1], num_warps, num_stages), rep=50)
times.append((ms, BM, BK, BN, num_stages, num_warps))
except Exception as e:
print('run triton failed')
continue
verified = torch.allclose(c_ref, utils.from_block_format(c[1]))
print('verify passes:', verified)
if verified:
times.sort(key=lambda x: x[0])
print(f'info: blocksparse mm: {times[0][0]:.4f} ({BM} x {BK} x {BN})')
if times[0][0] < best_time:
best_time = times[0][0]
print(f'blocksparse mm: {best_time:.5f}')
#test_random()
test_lower_triangular()
|
@triton.jit
def _kernel_mcsr_mm(a_rowptrs, a_cols, a_vals, b_vals, c_vals,
BM: tl.constexpr, BK: tl.constexpr, BN: tl.constexpr,
nBM: tl.constexpr, nBK: tl.constexpr, nBN: tl.constexpr,
):
m = tl.program_id(0)
n = tl.program_id(1)
a_block_size = BM * BK
b_block_size = BK * BN
a_ptrs = a_vals + a_block_size * nBK * m + \
tl.arange(0, BM)[:, None] * BK + tl.arange(0, BK)[None, :]
b_ptrs = b_vals + b_block_size * n + \
tl.arange(0, BK)[:, None] * BN + tl.arange(0, BN)[None, :]
# a_rowptrs_m = a_rowptrs + m
k_start = tl.load(a_rowptrs+m)
k_end = tl.load(a_rowptrs+m+1)
c = tl.zeros((BM, BN), dtype=tl.float32)
# for k in range(nBK):
# a = tl.load(a_ptrs)
# b = tl.load(b_ptrs)
# c += tl.dot(a, b)
# a_ptrs += a_block_size
# b_ptrs += b_block_size * nBN
for kp in range(k_start, k_end):
k = tl.load(a_cols+kp)
a = tl.load(a_ptrs+a_block_size*k)
b = tl.load(b_ptrs+b_block_size * nBN*k)
c += tl.dot(a, b)
c = c.to(tl.float16)
c_ptrs = c_vals + (m * nBN + n) * BM * BN + \
tl.arange(0, BM)[:, None] * BN + tl.arange(0, BN)[None, :]
tl.store(c_ptrs, c)
def mcsr_mm_inner(a_rowptrs, a_cols, a_vals, b_vals, c, num_warps=4, num_stages=3):
nBM, nBK, BM, BK = a_vals.shape
nBK, nBN, BK, BN = b_vals.shape
# TODO: this does not work when M does not divide BM
# Or maybe it works because C will also need to be padded
M = nBM * BM
N = nBN * BN
grid = (nBM, nBN)
binary = _kernel_mcsr_mm[grid](a_rowptrs, a_cols, a_vals, b_vals, c,
BM, BK, BN, nBM, nBK, nBN,
num_warps=num_warps, num_stages=num_stages
)
#print(binary.asm['ptx'])
return c
def mcsr_mm(a: MCSR, b: MCSR, c, num_warps=4, num_stages=3):
nBM, nBK, BM, BK = a.vals.shape
nBK, nBN, BK, BN = b.vals.shape
# TODO: this does not work when M does not divide BM
# Or maybe it works because C will also need to be padded
M = nBM * BM
N = nBN * BN
grid = (nBM, nBN)
#print(grid)
binary = _kernel_mcsr_mm[grid](a.rowptrs, a.cols, a.vals, b.vals, c[1],
BM, BK, BN, nBM, nBK, nBN,
num_warps=num_warps, num_stages=num_stages
)
#print(binary.asm['ptx'])
return c
def verify_run():
M = 32
K = M
N = M
BM = 16
BK = BM
BN = BM
a = gen_lower_triangular_mcsr_matrix(M, K, BM, BK)
b = gen_random_mcsr_matrix(K, N, BK, BN, density=1)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
a_ref = from_block_format(a.vals)
b_ref = from_block_format(b.vals)
c_ref = torch.mm(a_ref, b_ref)
c = mcsr_mm(a, b, c)
print('verify passes:', torch.allclose(c_ref, from_block_format(c[1])))
def test_random():
M = 1024
K = 1024
N = M
BMs = [32, 64, 128, 256]
BKs = [32, 64, 128, 256]
BNs = [32, 64, 128, 256]
#stages = [1,2,3,4,5]
#warps = [1,2,4,8]
stages = [2,3,4,5]
warps = [1,2,4]
TEST_RUN = False
if TEST_RUN:
BMs, BKs, BNs = [64], [64], [64]
stages, warps = [1,2,3,4,5], [1,2,4]
for BM in BMs:
for BK in BKs:
for BN in BNs:
#if BM * K != BK * M:
#continue
a = gen_random_mcsr_matrix(M, K, BM, BK, density=1)
#a = gen_lower_triangular_mcsr_matrix(M, K, BM, BK)
#a = gen_lower_half_mcsr_matrix(M, K, BM, BK)
b = gen_random_mcsr_matrix(K, N, BK, BN, density=1)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
a_ref = from_block_format(a.vals)
b_ref = from_block_format(b.vals)
c_ref = torch.empty(M, N, dtype=torch.float16, device='cuda')
ms, _, _ = triton.testing.do_bench(lambda: torch.mm(a_ref, b_ref, out=c_ref))
print(f'torch mm: {ms:.4f}')
times = []
ms = torch.inf
try:
for num_stages in stages:
for num_warps in warps:
ms, _, _ = triton.testing.do_bench(lambda: mcsr_mm(a, b, c, num_warps, num_stages), rep=50)
times.append((ms, BM, BK, BN, num_stages, num_warps))
except Exception as e:
print('run triton failed')
continue
print('verify passes:', torch.allclose(c_ref, from_block_format(c[1])))
times.sort(key=lambda x: x[0])
print(times[0])
print(f'blocksparse mm: {times[0][0]:.4f} ({BM} x {BK} x {BN})')
def test_lower_triangular():
M = 3072
K = M
N = M
dtype = torch.float16
a = torch.randn([M, K], dtype=dtype, device='cuda')
a[M//2:, :] = 0
#a[:, K//2:] = 0
#a = torch.tril(a)
b = torch.randn([K, N], dtype=dtype, device='cuda')
c_ref = torch.empty(M, N, dtype=dtype, device='cuda')
ms, _, _ = triton.testing.do_bench(lambda: torch.mm(a, b, out=c_ref))
print(f'torch mm: {ms:.4f}')
BMs = [32, 64, 128, 256]
BKs = [16, 32, 64, 128, 256]
BNs = [32, 64, 128]
#stages = [1,2,3,4,5]
#warps = [1,2,4,8]
stages = [2,3,4,5]
warps = [1,2,4]
TEST_RUN = False
if TEST_RUN:
s = 16
BMs, BKs, BNs = [s], [s], [s]
stages, warps = [1,2,3,4,5], [1,2,4]
best_time = torch.inf
for BM in BMs:
for BK in BKs:
for BN in BNs:
if BM * K != BK * M:
continue
a_block, a_mask = utils.to_block_format_with_mask(a, BM, BK)
#print(a_mask)
a_mask_rowptrs, a_mask_cols = utils.to_csr_ptrs(a_mask)
b_block = utils.to_block_format(b, BK, BN)
#print(a_mask_rowptrs, a_mask_cols)
c = gen_empty_matrix_dense_blocks(M, N, BM, BN)
times = []
ms = torch.inf
try:
for num_stages in stages:
for num_warps in warps:
ms, _, _ = triton.testing.do_bench(lambda: mcsr_mm_inner(a_mask_rowptrs, a_mask_cols, a_block, b_block, c[1], num_warps, num_stages), rep=50)
times.append((ms, BM, BK, BN, num_stages, num_warps))
except Exception as e:
print('run triton failed')
continue
verified = torch.allclose(c_ref, utils.from_block_format(c[1]))
print('verify passes:', verified)
if verified:
times.sort(key=lambda x: x[0])
print(f'info: blocksparse mm: {times[0][0]:.4f} ({BM} x {BK} x {BN})')
if times[0][0] < best_time:
best_time = times[0][0]
print(f'blocksparse mm: {best_time:.5f}')
#test_random()
test_lower_triangular()
|
itsdaniele/kernels
|
flash_attn_v1.py
|
https://github.com/itsdaniele/kernels/blob/afb01fb032ebe64a9fb1a4ada8d0e3d1df79615a/flash_attn_v1.py
|
"""
Fused Attention
===============
This is a Triton implementation of the Flash Attention algorithm
(see: Dao et al., https://arxiv.org/pdf/2205.14135v2.pdf; Rabe and Staats https://arxiv.org/pdf/2112.05682v2.pdf)
"""
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
L,
M,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr, # how many queries on a query block?
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr, # how many keys on a key block?
MODE: tl.constexpr,
):
start_m = tl.program_id(0) # which query are we starting from?
off_hz = tl.program_id(1) # which head are we processing in the batch?
# offset that takes us from the the pointer to the query, key or value tensor to the current query/key/value.
# stride_qh is the number of bytes it takes to get to the next head.
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset, # start from current head.
shape=(
N_CTX,
BLOCK_DMODEL,
), # This is the shape of the underlying tensor. We are going to be processing BLOCK_M queries at a time.
strides=(stride_qm, stride_qk),
offsets=(
start_m * BLOCK_M,
0,
), # start from the current query, depending on the program id.
block_shape=(
BLOCK_M,
BLOCK_DMODEL,
), # each block of query has shape (BLOCK_M, BLOCK_DMODEL)
order=(1, 0),
)
# Here, I am note sure why the shape is (BLOCK_DMODEL, N_CTX) and not (N_CTX, BLOCK_DMODEL). I supposed there is some optimization purpose.
# In any case, this will work because order is (0,1), so triton knows that the blog is laid out in memoory with the first axis being the inner dimension.
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset, # start from current head.
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1),
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N) # [0,1,...,127]
# initialize pointer to m and l
m_i = (
tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
) # shape is (128,). In the context of attention m is the maximum value of the query-key dot product.
l_i = tl.zeros(
[BLOCK_M], dtype=tl.float32
) # in flash attention, l is the sum of the softmax of the query-key dot product.
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# causal check on every loop iteration can be expensive
# and peeling the last iteration of the loop does not work well with ptxas
# so we have a mode to do the causal check in a separate kernel entirely
if MODE == 0: # entire non-causal attention
lo, hi = 0, N_CTX
if MODE == 1: # entire causal attention
lo, hi = (
0,
(start_m + 1) * BLOCK_M,
) # if working with causal attention, we only need to look at the first start_m blocks.
if MODE == 2: # off band-diagonal
lo, hi = 0, start_m * BLOCK_M
if MODE == 3: # on band-diagonal
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
m_i = tl.load(m_ptrs)
l_i = tl.load(l_ptrs)
acc += tl.load(O_block_ptr).to(tl.float32)
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
# credits to: Adam P. Goucher (https://github.com/apgoucher):
# scale sm_scale by 1/log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(
Q_block_ptr
) # load block of queries. This has shape (BLOCK_M, BLOCK_DMODEL).
q = (q * qk_scale).to(tl.float16)
# advance block pointers to first iteration of the loop
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr) # load block of keys. Shape is (BLOCK_DMODEL, BLOCK_N)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
if MODE == 1 or MODE == 3: # causal masking within the block
qk = tl.where(
offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")
) # if we are in the causal mode, we need to mask the values that are in the future.
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.math.exp2(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.math.exp2(m_i - m_i_new)
beta = tl.math.exp2(m_ij - m_i_new)
l_i *= alpha
l_i_new = l_i + beta * l_ij
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
# write back O
tl.store(O_block_ptr, acc.to(tl.float16))
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
#BLOCK = 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
BLOCK_M = 64
BLOCK_N = 64 if Lk <= 64 else 32
num_stages = 4 if Lk <= 64 else 3
o = torch.empty_like(q)
# According to this, we have ceil(N_CTX/128) programs running, and tl.program_id(0) tells us on which one we are in.
# For each of those, we have one program running for every head in the batch. tl.program_id(1) allows us to access those.
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty(
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
)
m = torch.empty(
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
)
num_warps = 4 if Lk <= 64 else 8
if causal:
modes = [1] if q.shape[2] <= 2048 else [2, 3]
else:
modes = [0]
for mode in modes:
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
L,
m,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
q.shape[0],
q.shape[1],
q.shape[2],
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
BLOCK_DMODEL=Lk,
MODE=mode,
num_warps=num_warps,
num_stages=num_stages,
)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
attention = _attention.apply
@pytest.mark.parametrize("Z, H, N_CTX, D_HEAD", [(6, 9, 1024, 64)])
@pytest.mark.parametrize("causal", [False, True])
def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
k = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
v = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
sm_scale = 0.5
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 15)],
line_arg="provider",
line_vals=["triton", "flash"],
line_names=["Triton", "Flash"],
styles=[("red", "-"), ("blue", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-fwd",
args={
"H": N_HEADS,
"BATCH": BATCH,
"D_HEAD": D_HEAD,
"dtype": torch.float16,
"mode": "fwd",
"causal": causal,
},
)
for causal in [False, True]
]
@triton.testing.perf_report(configs)
def bench_flash_attention(
BATCH,
H,
N_CTX,
D_HEAD,
causal,
mode,
provider,
dtype=torch.float16,
):
assert mode == "fwd"
warmup = 25
rep = 100
q = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
k = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
v = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
sm_scale = 1.3
if provider == "triton":
fn = lambda: attention(q, k, v, causal, sm_scale) # noqa: E731
else:
fn = lambda: torch.nn.functional.scaled_dot_product_attention( # noqa: E731
q, k, v, is_causal=causal, scale=sm_scale
)
# Benchmark
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * D_HEAD
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
return total_flops / ms * 1e-9
# bench_flash_attention.run(save_path=".", print_data=True)
|
@triton.jit
def _fwd_kernel(
Q,
K,
V,
sm_scale,
L,
M,
Out,
stride_qz,
stride_qh,
stride_qm,
stride_qk,
stride_kz,
stride_kh,
stride_kn,
stride_kk,
stride_vz,
stride_vh,
stride_vk,
stride_vn,
stride_oz,
stride_oh,
stride_om,
stride_on,
Z,
H,
N_CTX,
BLOCK_M: tl.constexpr, # how many queries on a query block?
BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr, # how many keys on a key block?
MODE: tl.constexpr,
):
start_m = tl.program_id(0) # which query are we starting from?
off_hz = tl.program_id(1) # which head are we processing in the batch?
# offset that takes us from the the pointer to the query, key or value tensor to the current query/key/value.
# stride_qh is the number of bytes it takes to get to the next head.
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset, # start from current head.
shape=(
N_CTX,
BLOCK_DMODEL,
), # This is the shape of the underlying tensor. We are going to be processing BLOCK_M queries at a time.
strides=(stride_qm, stride_qk),
offsets=(
start_m * BLOCK_M,
0,
), # start from the current query, depending on the program id.
block_shape=(
BLOCK_M,
BLOCK_DMODEL,
), # each block of query has shape (BLOCK_M, BLOCK_DMODEL)
order=(1, 0),
)
# Here, I am note sure why the shape is (BLOCK_DMODEL, N_CTX) and not (N_CTX, BLOCK_DMODEL). I supposed there is some optimization purpose.
# In any case, this will work because order is (0,1), so triton knows that the blog is laid out in memoory with the first axis being the inner dimension.
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset, # start from current head.
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1),
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0),
)
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0),
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N) # [0,1,...,127]
# initialize pointer to m and l
m_i = (
tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
) # shape is (128,). In the context of attention m is the maximum value of the query-key dot product.
l_i = tl.zeros(
[BLOCK_M], dtype=tl.float32
) # in flash attention, l is the sum of the softmax of the query-key dot product.
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# causal check on every loop iteration can be expensive
# and peeling the last iteration of the loop does not work well with ptxas
# so we have a mode to do the causal check in a separate kernel entirely
if MODE == 0: # entire non-causal attention
lo, hi = 0, N_CTX
if MODE == 1: # entire causal attention
lo, hi = (
0,
(start_m + 1) * BLOCK_M,
) # if working with causal attention, we only need to look at the first start_m blocks.
if MODE == 2: # off band-diagonal
lo, hi = 0, start_m * BLOCK_M
if MODE == 3: # on band-diagonal
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
m_i = tl.load(m_ptrs)
l_i = tl.load(l_ptrs)
acc += tl.load(O_block_ptr).to(tl.float32)
lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
# credits to: Adam P. Goucher (https://github.com/apgoucher):
# scale sm_scale by 1/log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(
Q_block_ptr
) # load block of queries. This has shape (BLOCK_M, BLOCK_DMODEL).
q = (q * qk_scale).to(tl.float16)
# advance block pointers to first iteration of the loop
K_block_ptr = tl.advance(K_block_ptr, (0, lo))
V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
# loop over k, v and update accumulator
for start_n in range(lo, hi, BLOCK_N):
start_n = tl.multiple_of(start_n, BLOCK_N)
# -- compute qk ----
k = tl.load(K_block_ptr) # load block of keys. Shape is (BLOCK_DMODEL, BLOCK_N)
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, k)
if MODE == 1 or MODE == 3: # causal masking within the block
qk = tl.where(
offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")
) # if we are in the causal mode, we need to mask the values that are in the future.
# -- compute m_ij, p, l_ij
m_ij = tl.max(qk, 1)
p = tl.math.exp2(qk - m_ij[:, None])
l_ij = tl.sum(p, 1)
# -- update m_i and l_i
m_i_new = tl.maximum(m_i, m_ij)
alpha = tl.math.exp2(m_i - m_i_new)
beta = tl.math.exp2(m_ij - m_i_new)
l_i *= alpha
l_i_new = l_i + beta * l_ij
# scale p
p_scale = beta / l_i_new
p = p * p_scale[:, None]
# scale acc
acc_scale = l_i / l_i_new
acc = acc * acc_scale[:, None]
# update acc
v = tl.load(V_block_ptr)
p = p.to(tl.float16)
acc += tl.dot(p, v)
# update m_i and l_i
l_i = l_i_new
m_i = m_i_new
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
l_ptrs = L + off_hz * N_CTX + offs_m
m_ptrs = M + off_hz * N_CTX + offs_m
tl.store(l_ptrs, l_i)
tl.store(m_ptrs, m_i)
# write back O
tl.store(O_block_ptr, acc.to(tl.float16))
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
#BLOCK = 64
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
BLOCK_M = 64
BLOCK_N = 64 if Lk <= 64 else 32
num_stages = 4 if Lk <= 64 else 3
o = torch.empty_like(q)
# According to this, we have ceil(N_CTX/128) programs running, and tl.program_id(0) tells us on which one we are in.
# For each of those, we have one program running for every head in the batch. tl.program_id(1) allows us to access those.
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty(
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
)
m = torch.empty(
(q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32
)
num_warps = 4 if Lk <= 64 else 8
if causal:
modes = [1] if q.shape[2] <= 2048 else [2, 3]
else:
modes = [0]
for mode in modes:
_fwd_kernel[grid](
q,
k,
v,
sm_scale,
L,
m,
o,
q.stride(0),
q.stride(1),
q.stride(2),
q.stride(3),
k.stride(0),
k.stride(1),
k.stride(2),
k.stride(3),
v.stride(0),
v.stride(1),
v.stride(2),
v.stride(3),
o.stride(0),
o.stride(1),
o.stride(2),
o.stride(3),
q.shape[0],
q.shape[1],
q.shape[2],
BLOCK_M=BLOCK_M,
BLOCK_N=BLOCK_N,
BLOCK_DMODEL=Lk,
MODE=mode,
num_warps=num_warps,
num_stages=num_stages,
)
ctx.save_for_backward(q, k, v, o, L, m)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
attention = _attention.apply
@pytest.mark.parametrize("Z, H, N_CTX, D_HEAD", [(6, 9, 1024, 64)])
@pytest.mark.parametrize("causal", [False, True])
def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
torch.manual_seed(20)
q = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
k = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
v = (
torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda")
.normal_(mean=0.0, std=0.5)
.requires_grad_()
)
sm_scale = 0.5
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [
triton.testing.Benchmark(
x_names=["N_CTX"],
x_vals=[2**i for i in range(10, 15)],
line_arg="provider",
line_vals=["triton", "flash"],
line_names=["Triton", "Flash"],
styles=[("red", "-"), ("blue", "-")],
ylabel="ms",
plot_name=f"fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-fwd",
args={
"H": N_HEADS,
"BATCH": BATCH,
"D_HEAD": D_HEAD,
"dtype": torch.float16,
"mode": "fwd",
"causal": causal,
},
)
for causal in [False, True]
]
@triton.testing.perf_report(configs)
def bench_flash_attention(
BATCH,
H,
N_CTX,
D_HEAD,
causal,
mode,
provider,
dtype=torch.float16,
):
assert mode == "fwd"
warmup = 25
rep = 100
q = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
k = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
v = torch.randn(
(BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True
)
sm_scale = 1.3
if provider == "triton":
fn = lambda: attention(q, k, v, causal, sm_scale) # noqa: E731
else:
fn = lambda: torch.nn.functional.scaled_dot_product_attention( # noqa: E731
q, k, v, is_causal=causal, scale=sm_scale
)
# Benchmark
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2.0 * BATCH * H * N_CTX * N_CTX * D_HEAD
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
return total_flops / ms * 1e-9
# bench_flash_attention.run(save_path=".", print_data=True)
|
hahnyuan/MiscTritonCuda
|
triton_ops/add.py
|
https://github.com/hahnyuan/MiscTritonCuda/blob/e2418b68f26d8b07701b7a23249c89d55ddd868b/triton_ops/add.py
|
import torch
import triton
import triton.language as tl
"""
https://triton-lang.org/main/getting-started/tutorials/01-vector-add.htm
"""
@triton.jit
def add_kernel(x_ptr, # *Pointer* to first input vector.
y_ptr, # *Pointer* to second input vector.
output_ptr, # *Pointer* to output vector.
n_elements, # Size of the vector.
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process.
# NOTE: `constexpr` so it can be used as a shape value.
):
# There are multiple 'programs' processing different data. We identify which program
# we are here:
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
# This program will process inputs that are offset from the initial data.
# For instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers:
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses.
mask = offsets < n_elements
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size.
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
# Write x + y back to DRAM.
tl.store(output_ptr + offsets, output, mask=mask)
def add(x: torch.Tensor, y: torch.Tensor):
# We need to preallocate the output.
output = torch.empty_like(x)
assert x.is_cuda and y.is_cuda and output.is_cuda
n_elements = output.numel()
# The SPMD launch grid denotes the number of kernel instances that run in parallel.
# It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int].
# In this case, we use a 1D grid where the size is the number of blocks:
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
# NOTE:
# - Each torch.tensor object is implicitly converted into a pointer to its first element.
# - `triton.jit`'ed functions can be indexed with a launch grid to obtain a callable GPU kernel.
# - Don't forget to pass meta-parameters as keywords arguments.
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
# running asynchronously at this point.
return output
if __name__=='__main__':
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
y = torch.rand(size, device='cuda')
output_torch = x + y
output_triton = add(x, y)
print(output_torch)
print(output_triton)
print(f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}')
# benchmark
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # Argument names to use as an x-axis for the plot.
x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`.
x_log=True, # x axis is logarithmic.
line_arg='provider', # Argument name whose value corresponds to a different line in the plot.
line_vals=['triton', 'torch'], # Possible values for `line_arg`.
line_names=['Triton', 'Torch'], # Label name for the lines.
styles=[('blue', '-'), ('green', '-')], # Line styles.
ylabel='GB/s', # Label name for the y-axis.
plot_name='vector-add-performance', # Name for the plot. Used also as a file name for saving the plot.
args={}, # Values for function arguments not in `x_names` and `y_name`.
))
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
y = torch.rand(size, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: x + y, quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: add(x, y), quantiles=quantiles)
gbps = lambda ms: 12 * size / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(print_data=True, show_plots=True)
|
@triton.jit
def add_kernel(x_ptr, # *Pointer* to first input vector.
y_ptr, # *Pointer* to second input vector.
output_ptr, # *Pointer* to output vector.
n_elements, # Size of the vector.
BLOCK_SIZE: tl.constexpr, # Number of elements each program should process.
# NOTE: `constexpr` so it can be used as a shape value.
):
# There are multiple 'programs' processing different data. We identify which program
# we are here:
pid = tl.program_id(axis=0) # We use a 1D launch grid so axis is 0.
# This program will process inputs that are offset from the initial data.
# For instance, if you had a vector of length 256 and block_size of 64, the programs
# would each access the elements [0:64, 64:128, 128:192, 192:256].
# Note that offsets is a list of pointers:
block_start = pid * BLOCK_SIZE
offsets = block_start + tl.arange(0, BLOCK_SIZE)
# Create a mask to guard memory operations against out-of-bounds accesses.
mask = offsets < n_elements
# Load x and y from DRAM, masking out any extra elements in case the input is not a
# multiple of the block size.
x = tl.load(x_ptr + offsets, mask=mask)
y = tl.load(y_ptr + offsets, mask=mask)
output = x + y
# Write x + y back to DRAM.
tl.store(output_ptr + offsets, output, mask=mask)
def add(x: torch.Tensor, y: torch.Tensor):
# We need to preallocate the output.
output = torch.empty_like(x)
assert x.is_cuda and y.is_cuda and output.is_cuda
n_elements = output.numel()
# The SPMD launch grid denotes the number of kernel instances that run in parallel.
# It is analogous to CUDA launch grids. It can be either Tuple[int], or Callable(metaparameters) -> Tuple[int].
# In this case, we use a 1D grid where the size is the number of blocks:
grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']), )
# NOTE:
# - Each torch.tensor object is implicitly converted into a pointer to its first element.
# - `triton.jit`'ed functions can be indexed with a launch grid to obtain a callable GPU kernel.
# - Don't forget to pass meta-parameters as keywords arguments.
add_kernel[grid](x, y, output, n_elements, BLOCK_SIZE=1024)
# We return a handle to z but, since `torch.cuda.synchronize()` hasn't been called, the kernel is still
# running asynchronously at this point.
return output
if __name__=='__main__':
torch.manual_seed(0)
size = 98432
x = torch.rand(size, device='cuda')
y = torch.rand(size, device='cuda')
output_torch = x + y
output_triton = add(x, y)
print(output_torch)
print(output_triton)
print(f'The maximum difference between torch and triton is '
f'{torch.max(torch.abs(output_torch - output_triton))}')
# benchmark
@triton.testing.perf_report(
triton.testing.Benchmark(
x_names=['size'], # Argument names to use as an x-axis for the plot.
x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`.
x_log=True, # x axis is logarithmic.
line_arg='provider', # Argument name whose value corresponds to a different line in the plot.
line_vals=['triton', 'torch'], # Possible values for `line_arg`.
line_names=['Triton', 'Torch'], # Label name for the lines.
styles=[('blue', '-'), ('green', '-')], # Line styles.
ylabel='GB/s', # Label name for the y-axis.
plot_name='vector-add-performance', # Name for the plot. Used also as a file name for saving the plot.
args={}, # Values for function arguments not in `x_names` and `y_name`.
))
def benchmark(size, provider):
x = torch.rand(size, device='cuda', dtype=torch.float32)
y = torch.rand(size, device='cuda', dtype=torch.float32)
quantiles = [0.5, 0.2, 0.8]
if provider == 'torch':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: x + y, quantiles=quantiles)
if provider == 'triton':
ms, min_ms, max_ms = triton.testing.do_bench(lambda: add(x, y), quantiles=quantiles)
gbps = lambda ms: 12 * size / ms * 1e-6
return gbps(ms), gbps(max_ms), gbps(min_ms)
benchmark.run(print_data=True, show_plots=True)
|
aum-labs/dhi
|
kernels/rope2d.py
|
https://github.com/aum-labs/dhi/blob/e162c8f8cfb052fa2428a40a4242b3f913b7971e/kernels/rope2d.py
|
import triton
import triton.language as tl
import torch
from configure import calculate_settings
# b * c * h * w -> so dim is channels or d_model
# b * (hw) * c lets say x coordinate is corresponding to w and y coordinate is corresponding to h
# original rope2d paper proposed two methods to rotate the coordinates
# ø = option + o
# 1. r(n, 2t) = cos(p(x)*ø) + i * sin(p(x)*ø), r(n, 2t+1) = cos(p(y)*ø) + i * sin(p(y)*ø) --> axial frequency
# 2. r(n, 2t) = exp(i * (ø(x) * p(x) + ø(y) * p(y))) , where ø(x) and ø(y) are learnable params
def rope2d(x, dim, width, n_heads):
b, hw, c = x.shape
head_dim = c // n_heads
h = hw // width
w = width
dim_half = head_dim // 2
theta = 1 / (100 ** (torch.arange(0, dim_half//2, dtype=torch.float32) / (dim_half)))
theta = theta.to(x.device)
h_pos = torch.arange(h, dtype=torch.float32).to(x.device)
w_pos = torch.arange(w, dtype=torch.float32).to(x.device)
freqs_h = torch.outer(h_pos, theta)
freqs_w = torch.outer(w_pos, theta)
freqs_h = torch.cat((freqs_h,freqs_h), dim = -1)
freqs_w = torch.cat((freqs_w,freqs_w), dim = -1)
x = x.view(b, n_heads, h, w, head_dim)
x_h = x[..., :dim_half]
x_w = x[..., dim_half:]
cos_h = torch.cos(freqs_h)[None, None, :, None, :]
sin_h = torch.sin(freqs_h)[None, None, :, None, :]
r1_h = x_h * cos_h
r2_h = torch.cat((-x_h[..., dim_half//2:], x_h[..., :dim_half//2]), dim=-1) * sin_h
x_h_rotated = r1_h + r2_h
cos_w = torch.cos(freqs_w)[None, None, None, :, :]
sin_w = torch.sin(freqs_w)[None, None, None, :, :]
r1_w = x_w * cos_w
r2_w = torch.cat((-x_w[..., dim_half//2:], x_w[..., :dim_half//2]), dim=-1) * sin_w
x_w_rotated = r1_w + r2_w
x_out = torch.cat([x_h_rotated, x_w_rotated], dim=-1)
return x_out.view(b, h*w, c)
def get_cis_mat_2d(head_dim, hw, width):
h = hw // width
w = width
dim_half = head_dim // 2
theta = 1 / (100 ** (torch.arange(0, dim_half//2, dtype=torch.float32) / (dim_half)))
h_pos = torch.arange(h, dtype=torch.float32)
w_pos = torch.arange(w, dtype=torch.float32)
freqs_h = torch.outer(h_pos, theta)
freqs_w = torch.outer(w_pos, theta)
cos_h = torch.cos(freqs_h) # h * head_dim/2
sin_h = torch.sin(freqs_h)
cos_w = torch.cos(freqs_w) # w * head_dim/2
sin_w = torch.sin(freqs_w)
return cos_h, sin_h, cos_w, sin_w
@triton.jit
def _rope2d_fwd_kernel(
inp_ptr,
cos_h_ptr,
sin_h_ptr,
cos_w_ptr,
sin_w_ptr,
out_ptr,
inp_stride_batch,
inp_stride_hw,
inp_stride_head,
cos_stride_hw,
cos_stride_dim,
head_dim,
batch_size,
height,
width,
n_heads,
BLOCK_SIZE: tl.constexpr,
):
# 3D grid: (batch_size, n_heads, height*width)
b = tl.program_id(0) # batch index
n = tl.program_id(1) # head index
h_w = tl.program_id(2) # spatial position index
# height_coordinate hc = y, width_coordinate wc = x
# say h_w = 0 1
# 2 3
# so for point 2, y = 1, x = 0
y = h_w // width
x = h_w % width
dim_fourth = head_dim // 4
inp_offset = (b * inp_stride_batch + n * inp_stride_head + h_w * inp_stride_hw)
h_offset = (y * cos_stride_hw)
w_offset = (x * cos_stride_hw)
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < dim_fourth
inp1 = tl.load(inp_ptr + inp_offset + cols, mask=mask)
inp2 = tl.load(inp_ptr + inp_offset + cols + dim_fourth, mask=mask)
inp3 = tl.load(inp_ptr + inp_offset + cols + 2 * dim_fourth, mask=mask)
inp4 = tl.load(inp_ptr + inp_offset + cols + 3 * dim_fourth, mask=mask)
cos_h = tl.load(cos_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
sin_h = tl.load(sin_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
cos_w = tl.load(cos_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
sin_w = tl.load(sin_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
out1h = inp1 * cos_h - inp2 * sin_h
out2h = inp2 * cos_h + inp1 * sin_h
out1w = inp3 * cos_w - inp4 * sin_w
out2w = inp4 * cos_w + inp3 * sin_w
tl.store(out_ptr + inp_offset + cols, out1h, mask=mask)
tl.store(out_ptr + inp_offset + cols + dim_fourth, out2h, mask=mask)
tl.store(out_ptr + inp_offset + cols + 2 * dim_fourth, out1w, mask=mask)
tl.store(out_ptr + inp_offset + cols + 3 * dim_fourth, out2w, mask=mask)
@triton.jit
def _rope2d_bwd_kernel(
grad_ptr,
cos_h_ptr,
sin_h_ptr,
cos_w_ptr,
sin_w_ptr,
out_ptr,
grad_stride_batch,
grad_stride_head,
grad_stride_hw,
cos_stride_hw,
cos_stride_dim,
head_dim,
batch_size,
height,
width,
n_heads,
BLOCK_SIZE: tl.constexpr,
):
# 3D grid: (batch_size, n_heads, height*width)
b = tl.program_id(0) # batch index
n = tl.program_id(1) # head index
h_w = tl.program_id(2) # spatial position index
y = h_w // width
x = h_w % width
dim_fourth = head_dim // 4
grad_offset = (b * grad_stride_batch + n * grad_stride_head + h_w * grad_stride_hw)
h_offset = (y * cos_stride_hw)
w_offset = (x * cos_stride_hw)
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < dim_fourth
grad1h = tl.load(grad_ptr + grad_offset + cols * 1, mask=mask)
grad2h = tl.load(grad_ptr + grad_offset + (cols + dim_fourth)*1, mask=mask)
grad3w = tl.load(grad_ptr + grad_offset + (cols + 2 * dim_fourth)*1, mask=mask)
grad4w = tl.load(grad_ptr + grad_offset + (cols + 3 * dim_fourth)*1, mask=mask)
cos_h = tl.load(cos_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
sin_h = tl.load(sin_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
cos_w = tl.load(cos_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
sin_w = tl.load(sin_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
# For height dimension:
# Forward: out1h = inp1 * cos_h - inp2 * sin_h
# out2h = inp2 * cos_h + inp1 * sin_h
# Backward derivation: 'do' is option + d
# ðL/ðinp1 = ðL/ðout1h * ðout1h/ðinp1 + ðL/ðout2h * ðout2h/ðinp1
# = grad1h * cos_h + grad2h * sin_h
# ðL/ðinp2 = ðL/ðout1h * ðout1h/ðinp2 + ðL/ðout2h * ðout2h/ðinp2
# = -grad1h * sin_h + grad2h * cos_h
out1h = grad1h * cos_h + grad2h * sin_h
out2h = -grad1h * sin_h + grad2h * cos_h
# For width dimension:
# Forward: out1w = inp3 * cos_w - inp4 * sin_w
# out2w = inp4 * cos_w + inp3 * sin_w
# Backward derivation follows same pattern as height
out1w = grad3w * cos_w + grad4w * sin_w
out2w = -grad3w * sin_w + grad4w * cos_w
tl.store(out_ptr + grad_offset + cols * 1, out1h, mask=mask)
tl.store(out_ptr + grad_offset + (cols + dim_fourth)*1, out2h, mask=mask)
tl.store(out_ptr + grad_offset + (cols + 2 * dim_fourth)*1, out1w, mask=mask)
tl.store(out_ptr + grad_offset + (cols + 3 * dim_fourth)*1, out2w, mask=mask)
class RoPE2D_triton(torch.autograd.Function):
@staticmethod
def forward(ctx, x, cos_h, sin_h, cos_w, sin_w, width):
b, n, hw, head_dim = x.shape
height = hw // width
out = torch.empty_like(x)
BLOCK_SIZE, num_warps = calculate_settings(head_dim//4)
_rope2d_fwd_kernel[(b, n, hw)](
x,
cos_h, sin_h,
cos_w, sin_w,
out,
x.stride(0),
x.stride(2),
x.stride(1),
cos_h.stride(0),
cos_h.stride(1),
head_dim,
b, height, width, n,
BLOCK_SIZE,
num_warps=num_warps,
)
ctx.save_for_backward(cos_h, sin_h, cos_w, sin_w)
ctx.width = width
return out
@staticmethod
def backward(ctx, grad_output):
cos_h, sin_h, cos_w, sin_w = ctx.saved_tensors
width = ctx.width
b, n, hw, head_dim = grad_output.shape
height = hw // width
grad_input = torch.empty_like(grad_output)
BLOCK_SIZE, num_warps = calculate_settings(head_dim//4)
# Use 3D grid
_rope2d_bwd_kernel[(b, n, hw)](
grad_output,
cos_h, sin_h,
cos_w, sin_w,
grad_input,
grad_output.stride(0),
grad_output.stride(1),
grad_output.stride(2),
cos_h.stride(0),
cos_h.stride(1),
head_dim,
b, height, width, n,
BLOCK_SIZE,
num_warps=num_warps,
)
return grad_input, None, None, None, None, None
# phew! man that was exhausting, took one whole day to understand and implement this
|
@triton.jit
def _rope2d_fwd_kernel(
inp_ptr,
cos_h_ptr,
sin_h_ptr,
cos_w_ptr,
sin_w_ptr,
out_ptr,
inp_stride_batch,
inp_stride_hw,
inp_stride_head,
cos_stride_hw,
cos_stride_dim,
head_dim,
batch_size,
height,
width,
n_heads,
BLOCK_SIZE: tl.constexpr,
):
# 3D grid: (batch_size, n_heads, height*width)
b = tl.program_id(0) # batch index
n = tl.program_id(1) # head index
h_w = tl.program_id(2) # spatial position index
# height_coordinate hc = y, width_coordinate wc = x
# say h_w = 0 1
# 2 3
# so for point 2, y = 1, x = 0
y = h_w // width
x = h_w % width
dim_fourth = head_dim // 4
inp_offset = (b * inp_stride_batch + n * inp_stride_head + h_w * inp_stride_hw)
h_offset = (y * cos_stride_hw)
w_offset = (x * cos_stride_hw)
cols = tl.arange(0, BLOCK_SIZE)
mask = cols < dim_fourth
inp1 = tl.load(inp_ptr + inp_offset + cols, mask=mask)
inp2 = tl.load(inp_ptr + inp_offset + cols + dim_fourth, mask=mask)
inp3 = tl.load(inp_ptr + inp_offset + cols + 2 * dim_fourth, mask=mask)
inp4 = tl.load(inp_ptr + inp_offset + cols + 3 * dim_fourth, mask=mask)
cos_h = tl.load(cos_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
sin_h = tl.load(sin_h_ptr + h_offset + cols * cos_stride_dim, mask=mask)
cos_w = tl.load(cos_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
sin_w = tl.load(sin_w_ptr + w_offset + cols * cos_stride_dim, mask=mask)
out1h = inp1 * cos_h - inp2 * sin_h
out2h = inp2 * cos_h + inp1 * sin_h
out1w = inp3 * cos_w - inp4 * sin_w
out2w = inp4 * cos_w + inp3 * sin_w
tl.store(out_ptr + inp_offset + cols, out1h, mask=mask)
tl.store(out_ptr + inp_offset + cols + dim_fourth, out2h, mask=mask)
tl.store(out_ptr + inp_offset + cols + 2 * dim_fourth, out1w, mask=mask)
tl.store(out_ptr + inp_offset + cols + 3 * dim_fourth, out2w, mask=mask)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.