Dataset Viewer
	| repository
				 stringclasses 166
				values | file_path
				 stringlengths 6 125 | url
				 stringlengths 89 210 | code
				 stringlengths 413 290k | chunk
				 stringlengths 56 175k | 
|---|---|---|---|---|
| 
	lucidrains/lion-pytorch | 
	lion_pytorch/triton.py | 
	https://github.com/lucidrains/lion-pytorch/blob/6a74fdc0ba572ab5683dc0270c66c20ecbc02d09/lion_pytorch/triton.py | 
	import torch
try:
    import triton
    import triton.language as tl
except ImportError as e:
    print('triton is not installed, please install by running `pip install triton>=2.2.0`')
    exit()
# triton cuda kernel
@triton.autotune(configs = [
    triton.Config({'BLOCK_SIZE': 128}, num_warps = 4),
    triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8),
], key = ['n_elements'], restore_value=['p_ptr', 'exp_avg_ptr'])
@triton.jit
def update_fn_kernel(
    p_ptr,
    grad_ptr,
    exp_avg_ptr,
    lr,
    wd,
    beta1,
    beta2,
    n_elements,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(axis = 0)
    block_start = pid * BLOCK_SIZE
    offsets = block_start + tl.arange(0, BLOCK_SIZE)
    mask = offsets < n_elements
    # offsetted pointers
    offset_p_ptr = p_ptr + offsets
    offset_grad_ptr = grad_ptr + offsets
    offset_exp_avg_ptr = exp_avg_ptr + offsets
    # load
    p = tl.load(offset_p_ptr, mask = mask)
    grad = tl.load(offset_grad_ptr, mask = mask)
    exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
    # stepweight decay
    p = p * (1 - lr * wd)
    # diff between momentum running average and grad
    diff = exp_avg - grad
    # weight update
    update = diff * beta1 + grad
    # torch.sign
    can_update = update != 0
    update_sign = tl.where(update > 0, -lr, lr)
    p = p + update_sign * can_update
    # decay the momentum running average coefficient
    exp_avg = diff * beta2 + grad
    # store new params and momentum running average coefficient
    tl.store(offset_p_ptr, p, mask = mask)
    tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
    p: torch.Tensor,
    grad: torch.Tensor,
    exp_avg: torch.Tensor,
    lr: float,
    wd: float,
    beta1: float,
    beta2: float
):
    assert all([t.is_cuda for t in (p, grad, exp_avg)])
    n_elements = p.numel()
    grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)    
    update_fn_kernel[grid](
        p,
        grad,
        exp_avg,
        lr,
        wd,
        beta1,
        beta2,
        n_elements
    )
 | 
	@triton.jit
def update_fn_kernel(
    p_ptr,
    grad_ptr,
    exp_avg_ptr,
    lr,
    wd,
    beta1,
    beta2,
    n_elements,
    BLOCK_SIZE: tl.constexpr,
):
    pid = tl.program_id(axis = 0)
    block_start = pid * BLOCK_SIZE
    offsets = block_start + tl.arange(0, BLOCK_SIZE)
    mask = offsets < n_elements
    # offsetted pointers
    offset_p_ptr = p_ptr + offsets
    offset_grad_ptr = grad_ptr + offsets
    offset_exp_avg_ptr = exp_avg_ptr + offsets
    # load
    p = tl.load(offset_p_ptr, mask = mask)
    grad = tl.load(offset_grad_ptr, mask = mask)
    exp_avg = tl.load(offset_exp_avg_ptr, mask = mask)
    # stepweight decay
    p = p * (1 - lr * wd)
    # diff between momentum running average and grad
    diff = exp_avg - grad
    # weight update
    update = diff * beta1 + grad
    # torch.sign
    can_update = update != 0
    update_sign = tl.where(update > 0, -lr, lr)
    p = p + update_sign * can_update
    # decay the momentum running average coefficient
    exp_avg = diff * beta2 + grad
    # store new params and momentum running average coefficient
    tl.store(offset_p_ptr, p, mask = mask)
    tl.store(offset_exp_avg_ptr, exp_avg, mask = mask)
def update_fn(
    p: torch.Tensor,
    grad: torch.Tensor,
    exp_avg: torch.Tensor,
    lr: float,
    wd: float,
    beta1: float,
    beta2: float
):
    assert all([t.is_cuda for t in (p, grad, exp_avg)])
    n_elements = p.numel()
    grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)    
    update_fn_kernel[grid](
        p,
        grad,
        exp_avg,
        lr,
        wd,
        beta1,
        beta2,
        n_elements
    )
 | 
| 
	jax-ml/jax-triton | 
	examples/add.py | 
	https://github.com/jax-ml/jax-triton/blob/9aff06677a24d07e510f3632532a88b6804324dc/examples/add.py | 
	# Copyright 2024 The jax_triton Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Addition example."""
import jax
import jax.numpy as jnp
import jax_triton as jt
import triton
import triton.language as tl
@triton.jit
def add_kernel(
    x_ptr,
    y_ptr,
    output_ptr,
    block_size: tl.constexpr,
):
  """Adds two vectors."""
  pid = tl.program_id(axis=0)
  block_start = pid * block_size
  offsets = block_start + tl.arange(0, block_size)
  mask = offsets < 8
  x = tl.load(x_ptr + offsets, mask=mask)
  y = tl.load(y_ptr + offsets, mask=mask)
  output = x + y
  tl.store(output_ptr + offsets, output, mask=mask)
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
  out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
  block_size = 8
  grid = (triton.cdiv(x.size, block_size),)
  return jt.triton_call(
      x,
      y,
      kernel=add_kernel,
      out_shape=out_shape,
      grid=grid,
      block_size=block_size)
def main(unused_argv):
  x_val = jnp.arange(8)
  y_val = jnp.arange(8, 16)
  print(add(x_val, y_val))
  print(jax.jit(add)(x_val, y_val))
if __name__ == "__main__":
  from absl import app
  app.run(main)
 | 
	@triton.jit
def add_kernel(
    x_ptr,
    y_ptr,
    output_ptr,
    block_size: tl.constexpr,
):
  """Adds two vectors."""
  pid = tl.program_id(axis=0)
  block_start = pid * block_size
  offsets = block_start + tl.arange(0, block_size)
  mask = offsets < 8
  x = tl.load(x_ptr + offsets, mask=mask)
  y = tl.load(y_ptr + offsets, mask=mask)
  output = x + y
  tl.store(output_ptr + offsets, output, mask=mask)
def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:
  out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype)
  block_size = 8
  grid = (triton.cdiv(x.size, block_size),)
  return jt.triton_call(
      x,
      y,
      kernel=add_kernel,
      out_shape=out_shape,
      grid=grid,
      block_size=block_size)
def main(unused_argv):
  x_val = jnp.arange(8)
  y_val = jnp.arange(8, 16)
  print(add(x_val, y_val))
  print(jax.jit(add)(x_val, y_val))
if __name__ == "__main__":
  from absl import app
  app.run(main)
 | 
| 
	josStorer/RWKV-Runner | 
	finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
#    seq_len     chunk  recurrent  chunk_bwd  recurrent_bwd
# 0    128.0  0.039360   0.061056   0.312160       0.205008
# 1    256.0  0.045824   0.123712   0.308784       0.297696
# 2    512.0  0.058688   0.241952   0.310720       0.626528
# 3   1024.0  0.088288   0.476992   0.313184       1.333152
# 4   2048.0  0.169472   0.943264   0.452464       2.724864
# 5   4096.0  0.329920   1.886144   0.881600       5.551520
# 6   8192.0  0.647872   3.755040   1.740496      11.117184
# 7  16384.0  1.272064   7.520576   3.446608      22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
    x,
    g,
    gc,
    o,
    h0,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    p_x = x + i_bh * T * D + i_t * BT * D + o_d
    p_g = g + i_bh * T * D + i_t * BT * D + o_d
    p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
    p_o = o + i_bh * T * D + i_t * BT * D + o_d
    b_h = tl.zeros([BD], dtype=tl.float32)
    b_gc = tl.zeros([BD], dtype=tl.float32)
    if USE_INITIAL_STATE:
        if i_t == 0:
            b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
    for i in range(0, BT):
        mask_t = mask & ((i_t * BT + i) < T)
        b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
        b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
        b_h = tl.exp(b_g) * b_h + b_x
        b_gc = b_gc + b_g
        tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
        tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
        p_x += D
        p_g += D
        p_gc += D
        p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
    gc,
    o,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(1, tl.cdiv(T, BT)):
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
        # [BT, BD]
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
        tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
    g,
    gc,
    dx,
    do,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    BC = min(BT, T - i_t * BT)
    NT = tl.num_programs(1)
    p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    if i_t == NT - 1:
        b_gc = tl.zeros([BD], dtype=tl.float32)
    else:
        b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
    b_dh = tl.zeros([BD], dtype=tl.float32)
    for _ in range(BC - 1, -1, -1):
        tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
        b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
        b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
        b_gc = b_gc + b_g
        b_dh = b_dh + b_do
        b_dx = b_dh
        b_dh = b_dh * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
        p_g -= D
        p_gc -= D
        p_dx -= D
        p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
    g,
    gc,
    o,
    dx,
    dg,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
        p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
        p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        mask_t = mask & ((i_t + 1) * BT < T)
        b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
        # [BT, BD]
        b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
        b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
        b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
        b_dg = b_o * b_dx * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
        tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
    @staticmethod
    @contiguous
    def forward(ctx, x, g, initial_state=None, output_final_state=False):
        B, H, T, D = x.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        o = torch.empty_like(x, dtype=torch.float)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_fwd_kernel_h[grid](
            x, g, gc, o, initial_state,
            T, D,
            BT=BT,
            USE_INITIAL_STATE=initial_state is not None
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_fwd_kernel_o[grid](
            gc, o,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        final_state = None
        if output_final_state:
            final_state = o[:, :, -1].clone()
        o = o.to(x.dtype)
        ctx.save_for_backward(g, o, initial_state)
        return o, final_state
    @staticmethod
    @contiguous
    def backward(ctx, do, dht=None):
        g, o, initial_state = ctx.saved_tensors
        B, H, T, D = do.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        dx = torch.empty_like(o)
        dg = torch.empty_like(g)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_bwd_kernel_h[grid](
            g, gc, dx, do,
            T, D,
            BT=BT
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_bwd_kernel_o[grid](
            g, gc, o, dx, dg,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        if initial_state is not None:
            dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
        return dx, dg, None, None
def chunk_hgrn(
    x: torch.Tensor,
    g: torch.Tensor,
    initial_state: torch.Tensor = None,
    output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
    if initial_state is not None:
        initial_state = initial_state.detach()
    o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
    return o, final_state
if __name__ == '__main__':
    import torch.nn.functional as F
    from fla.ops.hgrn.naive import naive_recurrent_hgrn
    from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
    B, H, T, D = 8, 4, 512, 128
    dtype = torch.bfloat16
    torch.manual_seed(42)
    # [batch_size, n_heads, seq_len, d_head]
    x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
    print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
    print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
    x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
    print(f"DTYPE:\t{x.dtype}")
    do = torch.randn_like(x)
    h0 = torch.randn_like(x[:, :, 0])
    ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
    ref.backward(do)
    ref_dx, x.grad = x.grad.clone(), None
    ref_dg, g.grad = g.grad.clone(), None
    tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
    tri.backward(do)
    tri_dx, x.grad = x.grad.clone(), None
    tri_dg, g.grad = g.grad.clone(), None
    print("  \t    DIFF\t    MAX")
    print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
    print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
    print('Done!')
    @triton.testing.perf_report(
        triton.testing.Benchmark(
            # argument names to use as an x-axis for the plot
            x_names=['seq_len'],
            # different possible values for `x_name`
            x_vals=[128 * 2 ** i for i in range(0, 8)],
            # argument name whose value corresponds to a different line in the plot
            line_arg='provider',
            # possible values for `line_arg``
            line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # label name for the lines
            line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # line styles
            styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
            ylabel="Execution Time (ms)",  # label name for the y-axis
            # name for the plot. Used also as a file name for saving the plot.
            plot_name="Performance",
            args={},
        )
    )
    def benchmark(seq_len, provider):
        dtype = torch.bfloat16
        B, H, D = 16, 4, 128
        x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
        g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
        x = (1 - g) * x
        x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
        do = torch.randn_like(x, dtype=dtype)
        quantiles = [0.5, 0.2, 0.8]
        results = 0, 0, 0
        if provider == 'chunk':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
        if provider == 'recurrent':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
        if provider == 'chunk_bwd':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        if provider == 'recurrent_bwd':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        return results
    benchmark.run(print_data=True)
 | 
	@triton.jit
def chunk_hgrn_fwd_kernel_h(
    x,
    g,
    gc,
    o,
    h0,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    p_x = x + i_bh * T * D + i_t * BT * D + o_d
    p_g = g + i_bh * T * D + i_t * BT * D + o_d
    p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
    p_o = o + i_bh * T * D + i_t * BT * D + o_d
    b_h = tl.zeros([BD], dtype=tl.float32)
    b_gc = tl.zeros([BD], dtype=tl.float32)
    if USE_INITIAL_STATE:
        if i_t == 0:
            b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
    for i in range(0, BT):
        mask_t = mask & ((i_t * BT + i) < T)
        b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
        b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
        b_h = tl.exp(b_g) * b_h + b_x
        b_gc = b_gc + b_g
        tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
        tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
        p_x += D
        p_g += D
        p_gc += D
        p_o += D
 | 
| 
	josStorer/RWKV-Runner | 
	finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
#    seq_len     chunk  recurrent  chunk_bwd  recurrent_bwd
# 0    128.0  0.039360   0.061056   0.312160       0.205008
# 1    256.0  0.045824   0.123712   0.308784       0.297696
# 2    512.0  0.058688   0.241952   0.310720       0.626528
# 3   1024.0  0.088288   0.476992   0.313184       1.333152
# 4   2048.0  0.169472   0.943264   0.452464       2.724864
# 5   4096.0  0.329920   1.886144   0.881600       5.551520
# 6   8192.0  0.647872   3.755040   1.740496      11.117184
# 7  16384.0  1.272064   7.520576   3.446608      22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
    x,
    g,
    gc,
    o,
    h0,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    p_x = x + i_bh * T * D + i_t * BT * D + o_d
    p_g = g + i_bh * T * D + i_t * BT * D + o_d
    p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
    p_o = o + i_bh * T * D + i_t * BT * D + o_d
    b_h = tl.zeros([BD], dtype=tl.float32)
    b_gc = tl.zeros([BD], dtype=tl.float32)
    if USE_INITIAL_STATE:
        if i_t == 0:
            b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
    for i in range(0, BT):
        mask_t = mask & ((i_t * BT + i) < T)
        b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
        b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
        b_h = tl.exp(b_g) * b_h + b_x
        b_gc = b_gc + b_g
        tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
        tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
        p_x += D
        p_g += D
        p_gc += D
        p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
    gc,
    o,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(1, tl.cdiv(T, BT)):
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
        # [BT, BD]
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
        tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
    g,
    gc,
    dx,
    do,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    BC = min(BT, T - i_t * BT)
    NT = tl.num_programs(1)
    p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    if i_t == NT - 1:
        b_gc = tl.zeros([BD], dtype=tl.float32)
    else:
        b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
    b_dh = tl.zeros([BD], dtype=tl.float32)
    for _ in range(BC - 1, -1, -1):
        tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
        b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
        b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
        b_gc = b_gc + b_g
        b_dh = b_dh + b_do
        b_dx = b_dh
        b_dh = b_dh * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
        p_g -= D
        p_gc -= D
        p_dx -= D
        p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
    g,
    gc,
    o,
    dx,
    dg,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
        p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
        p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        mask_t = mask & ((i_t + 1) * BT < T)
        b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
        # [BT, BD]
        b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
        b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
        b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
        b_dg = b_o * b_dx * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
        tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
    @staticmethod
    @contiguous
    def forward(ctx, x, g, initial_state=None, output_final_state=False):
        B, H, T, D = x.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        o = torch.empty_like(x, dtype=torch.float)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_fwd_kernel_h[grid](
            x, g, gc, o, initial_state,
            T, D,
            BT=BT,
            USE_INITIAL_STATE=initial_state is not None
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_fwd_kernel_o[grid](
            gc, o,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        final_state = None
        if output_final_state:
            final_state = o[:, :, -1].clone()
        o = o.to(x.dtype)
        ctx.save_for_backward(g, o, initial_state)
        return o, final_state
    @staticmethod
    @contiguous
    def backward(ctx, do, dht=None):
        g, o, initial_state = ctx.saved_tensors
        B, H, T, D = do.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        dx = torch.empty_like(o)
        dg = torch.empty_like(g)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_bwd_kernel_h[grid](
            g, gc, dx, do,
            T, D,
            BT=BT
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_bwd_kernel_o[grid](
            g, gc, o, dx, dg,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        if initial_state is not None:
            dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
        return dx, dg, None, None
def chunk_hgrn(
    x: torch.Tensor,
    g: torch.Tensor,
    initial_state: torch.Tensor = None,
    output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
    if initial_state is not None:
        initial_state = initial_state.detach()
    o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
    return o, final_state
if __name__ == '__main__':
    import torch.nn.functional as F
    from fla.ops.hgrn.naive import naive_recurrent_hgrn
    from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
    B, H, T, D = 8, 4, 512, 128
    dtype = torch.bfloat16
    torch.manual_seed(42)
    # [batch_size, n_heads, seq_len, d_head]
    x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
    print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
    print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
    x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
    print(f"DTYPE:\t{x.dtype}")
    do = torch.randn_like(x)
    h0 = torch.randn_like(x[:, :, 0])
    ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
    ref.backward(do)
    ref_dx, x.grad = x.grad.clone(), None
    ref_dg, g.grad = g.grad.clone(), None
    tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
    tri.backward(do)
    tri_dx, x.grad = x.grad.clone(), None
    tri_dg, g.grad = g.grad.clone(), None
    print("  \t    DIFF\t    MAX")
    print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
    print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
    print('Done!')
    @triton.testing.perf_report(
        triton.testing.Benchmark(
            # argument names to use as an x-axis for the plot
            x_names=['seq_len'],
            # different possible values for `x_name`
            x_vals=[128 * 2 ** i for i in range(0, 8)],
            # argument name whose value corresponds to a different line in the plot
            line_arg='provider',
            # possible values for `line_arg``
            line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # label name for the lines
            line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # line styles
            styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
            ylabel="Execution Time (ms)",  # label name for the y-axis
            # name for the plot. Used also as a file name for saving the plot.
            plot_name="Performance",
            args={},
        )
    )
    def benchmark(seq_len, provider):
        dtype = torch.bfloat16
        B, H, D = 16, 4, 128
        x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
        g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
        x = (1 - g) * x
        x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
        do = torch.randn_like(x, dtype=dtype)
        quantiles = [0.5, 0.2, 0.8]
        results = 0, 0, 0
        if provider == 'chunk':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
        if provider == 'recurrent':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
        if provider == 'chunk_bwd':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        if provider == 'recurrent_bwd':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        return results
    benchmark.run(print_data=True)
 | 
	@triton.jit
def chunk_hgrn_fwd_kernel_o(
    gc,
    o,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(1, tl.cdiv(T, BT)):
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
        # [BT, BD]
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
        tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
 | 
| 
	josStorer/RWKV-Runner | 
	finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
#    seq_len     chunk  recurrent  chunk_bwd  recurrent_bwd
# 0    128.0  0.039360   0.061056   0.312160       0.205008
# 1    256.0  0.045824   0.123712   0.308784       0.297696
# 2    512.0  0.058688   0.241952   0.310720       0.626528
# 3   1024.0  0.088288   0.476992   0.313184       1.333152
# 4   2048.0  0.169472   0.943264   0.452464       2.724864
# 5   4096.0  0.329920   1.886144   0.881600       5.551520
# 6   8192.0  0.647872   3.755040   1.740496      11.117184
# 7  16384.0  1.272064   7.520576   3.446608      22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
    x,
    g,
    gc,
    o,
    h0,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    p_x = x + i_bh * T * D + i_t * BT * D + o_d
    p_g = g + i_bh * T * D + i_t * BT * D + o_d
    p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
    p_o = o + i_bh * T * D + i_t * BT * D + o_d
    b_h = tl.zeros([BD], dtype=tl.float32)
    b_gc = tl.zeros([BD], dtype=tl.float32)
    if USE_INITIAL_STATE:
        if i_t == 0:
            b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
    for i in range(0, BT):
        mask_t = mask & ((i_t * BT + i) < T)
        b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
        b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
        b_h = tl.exp(b_g) * b_h + b_x
        b_gc = b_gc + b_g
        tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
        tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
        p_x += D
        p_g += D
        p_gc += D
        p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
    gc,
    o,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(1, tl.cdiv(T, BT)):
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
        # [BT, BD]
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
        tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
    g,
    gc,
    dx,
    do,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    BC = min(BT, T - i_t * BT)
    NT = tl.num_programs(1)
    p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    if i_t == NT - 1:
        b_gc = tl.zeros([BD], dtype=tl.float32)
    else:
        b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
    b_dh = tl.zeros([BD], dtype=tl.float32)
    for _ in range(BC - 1, -1, -1):
        tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
        b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
        b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
        b_gc = b_gc + b_g
        b_dh = b_dh + b_do
        b_dx = b_dh
        b_dh = b_dh * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
        p_g -= D
        p_gc -= D
        p_dx -= D
        p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
    g,
    gc,
    o,
    dx,
    dg,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
        p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
        p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        mask_t = mask & ((i_t + 1) * BT < T)
        b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
        # [BT, BD]
        b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
        b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
        b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
        b_dg = b_o * b_dx * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
        tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
    @staticmethod
    @contiguous
    def forward(ctx, x, g, initial_state=None, output_final_state=False):
        B, H, T, D = x.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        o = torch.empty_like(x, dtype=torch.float)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_fwd_kernel_h[grid](
            x, g, gc, o, initial_state,
            T, D,
            BT=BT,
            USE_INITIAL_STATE=initial_state is not None
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_fwd_kernel_o[grid](
            gc, o,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        final_state = None
        if output_final_state:
            final_state = o[:, :, -1].clone()
        o = o.to(x.dtype)
        ctx.save_for_backward(g, o, initial_state)
        return o, final_state
    @staticmethod
    @contiguous
    def backward(ctx, do, dht=None):
        g, o, initial_state = ctx.saved_tensors
        B, H, T, D = do.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        dx = torch.empty_like(o)
        dg = torch.empty_like(g)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_bwd_kernel_h[grid](
            g, gc, dx, do,
            T, D,
            BT=BT
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_bwd_kernel_o[grid](
            g, gc, o, dx, dg,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        if initial_state is not None:
            dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
        return dx, dg, None, None
def chunk_hgrn(
    x: torch.Tensor,
    g: torch.Tensor,
    initial_state: torch.Tensor = None,
    output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
    if initial_state is not None:
        initial_state = initial_state.detach()
    o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
    return o, final_state
if __name__ == '__main__':
    import torch.nn.functional as F
    from fla.ops.hgrn.naive import naive_recurrent_hgrn
    from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
    B, H, T, D = 8, 4, 512, 128
    dtype = torch.bfloat16
    torch.manual_seed(42)
    # [batch_size, n_heads, seq_len, d_head]
    x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
    print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
    print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
    x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
    print(f"DTYPE:\t{x.dtype}")
    do = torch.randn_like(x)
    h0 = torch.randn_like(x[:, :, 0])
    ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
    ref.backward(do)
    ref_dx, x.grad = x.grad.clone(), None
    ref_dg, g.grad = g.grad.clone(), None
    tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
    tri.backward(do)
    tri_dx, x.grad = x.grad.clone(), None
    tri_dg, g.grad = g.grad.clone(), None
    print("  \t    DIFF\t    MAX")
    print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
    print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
    print('Done!')
    @triton.testing.perf_report(
        triton.testing.Benchmark(
            # argument names to use as an x-axis for the plot
            x_names=['seq_len'],
            # different possible values for `x_name`
            x_vals=[128 * 2 ** i for i in range(0, 8)],
            # argument name whose value corresponds to a different line in the plot
            line_arg='provider',
            # possible values for `line_arg``
            line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # label name for the lines
            line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # line styles
            styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
            ylabel="Execution Time (ms)",  # label name for the y-axis
            # name for the plot. Used also as a file name for saving the plot.
            plot_name="Performance",
            args={},
        )
    )
    def benchmark(seq_len, provider):
        dtype = torch.bfloat16
        B, H, D = 16, 4, 128
        x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
        g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
        x = (1 - g) * x
        x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
        do = torch.randn_like(x, dtype=dtype)
        quantiles = [0.5, 0.2, 0.8]
        results = 0, 0, 0
        if provider == 'chunk':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
        if provider == 'recurrent':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
        if provider == 'chunk_bwd':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        if provider == 'recurrent_bwd':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        return results
    benchmark.run(print_data=True)
 | 
	@triton.jit
def chunk_hgrn_bwd_kernel_h(
    g,
    gc,
    dx,
    do,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    BC = min(BT, T - i_t * BT)
    NT = tl.num_programs(1)
    p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    if i_t == NT - 1:
        b_gc = tl.zeros([BD], dtype=tl.float32)
    else:
        b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
    b_dh = tl.zeros([BD], dtype=tl.float32)
    for _ in range(BC - 1, -1, -1):
        tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
        b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
        b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
        b_gc = b_gc + b_g
        b_dh = b_dh + b_do
        b_dx = b_dh
        b_dh = b_dh * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
        p_g -= D
        p_gc -= D
        p_dx -= D
        p_do -= D
 | 
| 
	josStorer/RWKV-Runner | 
	finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	https://github.com/josStorer/RWKV-Runner/blob/ad6170816a776bfc312837aafc9a3ff889a3cdd3/finetune/lora/v6/fla/ops/hgrn/chunk.py | 
	# -*- coding: utf-8 -*-
# Copyright (c) 2024, Yu Zhang, Songlin Yang
# this function implements the chunkwise form of HGRN, inspired by
# [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html)
# also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan
# from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent:
#
# Performance:
#    seq_len     chunk  recurrent  chunk_bwd  recurrent_bwd
# 0    128.0  0.039360   0.061056   0.312160       0.205008
# 1    256.0  0.045824   0.123712   0.308784       0.297696
# 2    512.0  0.058688   0.241952   0.310720       0.626528
# 3   1024.0  0.088288   0.476992   0.313184       1.333152
# 4   2048.0  0.169472   0.943264   0.452464       2.724864
# 5   4096.0  0.329920   1.886144   0.881600       5.551520
# 6   8192.0  0.647872   3.755040   1.740496      11.117184
# 7  16384.0  1.272064   7.520576   3.446608      22.362528
from typing import Tuple
import torch
import triton
import triton.language as tl
from fla.utils import contiguous
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_fwd_kernel_h(
    x,
    g,
    gc,
    o,
    h0,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr,
    USE_INITIAL_STATE: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    p_x = x + i_bh * T * D + i_t * BT * D + o_d
    p_g = g + i_bh * T * D + i_t * BT * D + o_d
    p_gc = gc + i_bh * T * D + i_t * BT * D + o_d
    p_o = o + i_bh * T * D + i_t * BT * D + o_d
    b_h = tl.zeros([BD], dtype=tl.float32)
    b_gc = tl.zeros([BD], dtype=tl.float32)
    if USE_INITIAL_STATE:
        if i_t == 0:
            b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32)
    for i in range(0, BT):
        mask_t = mask & ((i_t * BT + i) < T)
        b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32)
        b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32)
        b_h = tl.exp(b_g) * b_h + b_x
        b_gc = b_gc + b_g
        tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t)
        tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t)
        p_x += D
        p_g += D
        p_gc += D
        p_o += D
@triton.jit
def chunk_hgrn_fwd_kernel_o(
    gc,
    o,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(1, tl.cdiv(T, BT)):
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32)
        # [BT, BD]
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_o = b_o + tl.exp(b_gc) * b_h0[None, :]
        tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1))
@triton.autotune(
    configs=[
        triton.Config({'BD': 32}, num_warps=1),
        triton.Config({'BD': 32}, num_warps=2),
        triton.Config({'BD': 32}, num_warps=4),
        triton.Config({'BD': 32}, num_warps=8),
        triton.Config({'BD': 64}, num_warps=1),
        triton.Config({'BD': 64}, num_warps=2),
        triton.Config({'BD': 64}, num_warps=4),
        triton.Config({'BD': 64}, num_warps=8),
        triton.Config({'BD': 128}, num_warps=1),
        triton.Config({'BD': 128}, num_warps=2),
        triton.Config({'BD': 128}, num_warps=4),
        triton.Config({'BD': 128}, num_warps=8),
    ],
    key=['D']
)
@triton.jit
def chunk_hgrn_bwd_kernel_h(
    g,
    gc,
    dx,
    do,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    BC = min(BT, T - i_t * BT)
    NT = tl.num_programs(1)
    p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d
    if i_t == NT - 1:
        b_gc = tl.zeros([BD], dtype=tl.float32)
    else:
        b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32)
    b_dh = tl.zeros([BD], dtype=tl.float32)
    for _ in range(BC - 1, -1, -1):
        tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask)
        b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32)
        b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32)
        b_gc = b_gc + b_g
        b_dh = b_dh + b_do
        b_dx = b_dh
        b_dh = b_dh * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask)
        p_g -= D
        p_gc -= D
        p_dx -= D
        p_do -= D
@triton.jit
def chunk_hgrn_bwd_kernel_o(
    g,
    gc,
    o,
    dx,
    dg,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
        p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
        p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        mask_t = mask & ((i_t + 1) * BT < T)
        b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
        # [BT, BD]
        b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
        b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
        b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
        b_dg = b_o * b_dx * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
        tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
    @staticmethod
    @contiguous
    def forward(ctx, x, g, initial_state=None, output_final_state=False):
        B, H, T, D = x.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        o = torch.empty_like(x, dtype=torch.float)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_fwd_kernel_h[grid](
            x, g, gc, o, initial_state,
            T, D,
            BT=BT,
            USE_INITIAL_STATE=initial_state is not None
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_fwd_kernel_o[grid](
            gc, o,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        final_state = None
        if output_final_state:
            final_state = o[:, :, -1].clone()
        o = o.to(x.dtype)
        ctx.save_for_backward(g, o, initial_state)
        return o, final_state
    @staticmethod
    @contiguous
    def backward(ctx, do, dht=None):
        g, o, initial_state = ctx.saved_tensors
        B, H, T, D = do.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        dx = torch.empty_like(o)
        dg = torch.empty_like(g)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_bwd_kernel_h[grid](
            g, gc, dx, do,
            T, D,
            BT=BT
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_bwd_kernel_o[grid](
            g, gc, o, dx, dg,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        if initial_state is not None:
            dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
        return dx, dg, None, None
def chunk_hgrn(
    x: torch.Tensor,
    g: torch.Tensor,
    initial_state: torch.Tensor = None,
    output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
    if initial_state is not None:
        initial_state = initial_state.detach()
    o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
    return o, final_state
if __name__ == '__main__':
    import torch.nn.functional as F
    from fla.ops.hgrn.naive import naive_recurrent_hgrn
    from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
    B, H, T, D = 8, 4, 512, 128
    dtype = torch.bfloat16
    torch.manual_seed(42)
    # [batch_size, n_heads, seq_len, d_head]
    x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
    print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
    print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
    x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
    print(f"DTYPE:\t{x.dtype}")
    do = torch.randn_like(x)
    h0 = torch.randn_like(x[:, :, 0])
    ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
    ref.backward(do)
    ref_dx, x.grad = x.grad.clone(), None
    ref_dg, g.grad = g.grad.clone(), None
    tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
    tri.backward(do)
    tri_dx, x.grad = x.grad.clone(), None
    tri_dg, g.grad = g.grad.clone(), None
    print("  \t    DIFF\t    MAX")
    print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
    print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
    print('Done!')
    @triton.testing.perf_report(
        triton.testing.Benchmark(
            # argument names to use as an x-axis for the plot
            x_names=['seq_len'],
            # different possible values for `x_name`
            x_vals=[128 * 2 ** i for i in range(0, 8)],
            # argument name whose value corresponds to a different line in the plot
            line_arg='provider',
            # possible values for `line_arg``
            line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # label name for the lines
            line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # line styles
            styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
            ylabel="Execution Time (ms)",  # label name for the y-axis
            # name for the plot. Used also as a file name for saving the plot.
            plot_name="Performance",
            args={},
        )
    )
    def benchmark(seq_len, provider):
        dtype = torch.bfloat16
        B, H, D = 16, 4, 128
        x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
        g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
        x = (1 - g) * x
        x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
        do = torch.randn_like(x, dtype=dtype)
        quantiles = [0.5, 0.2, 0.8]
        results = 0, 0, 0
        if provider == 'chunk':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
        if provider == 'recurrent':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
        if provider == 'chunk_bwd':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        if provider == 'recurrent_bwd':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        return results
    benchmark.run(print_data=True)
 | 
	@triton.jit
def chunk_hgrn_bwd_kernel_o(
    g,
    gc,
    o,
    dx,
    dg,
    s_h,
    s_t,
    s_d,
    T: tl.constexpr,
    D: tl.constexpr,
    BT: tl.constexpr,
    BD: tl.constexpr
):
    i_d, i_bh = tl.program_id(0), tl.program_id(1)
    o_d = i_d * BD + tl.arange(0, BD)
    mask = o_d < D
    for i_t in range(tl.cdiv(T, BT) - 1, -1, -1):
        p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0))
        p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0))
        # [BD,]
        mask_t = mask & ((i_t + 1) * BT < T)
        b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32)
        # [BT, BD]
        b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32)
        b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32)
        b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32)
        b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32)
        b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32)
        b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :]
        b_dg = b_o * b_dx * tl.exp(b_g)
        tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1))
        tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1))
class ChunkHGRNFunction(torch.autograd.Function):
    @staticmethod
    @contiguous
    def forward(ctx, x, g, initial_state=None, output_final_state=False):
        B, H, T, D = x.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        o = torch.empty_like(x, dtype=torch.float)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_fwd_kernel_h[grid](
            x, g, gc, o, initial_state,
            T, D,
            BT=BT,
            USE_INITIAL_STATE=initial_state is not None
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_fwd_kernel_o[grid](
            gc, o,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        final_state = None
        if output_final_state:
            final_state = o[:, :, -1].clone()
        o = o.to(x.dtype)
        ctx.save_for_backward(g, o, initial_state)
        return o, final_state
    @staticmethod
    @contiguous
    def backward(ctx, do, dht=None):
        g, o, initial_state = ctx.saved_tensors
        B, H, T, D = do.shape
        BT, BD = 128, min(64, triton.next_power_of_2(D))
        num_warps = 8 if BD == 64 else 4
        gc = torch.empty_like(g, dtype=torch.float)
        dx = torch.empty_like(o)
        dg = torch.empty_like(g)
        def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H)
        chunk_hgrn_bwd_kernel_h[grid](
            g, gc, dx, do,
            T, D,
            BT=BT
        )
        def grid(meta): return (triton.cdiv(D, meta['BD']), B * H)
        chunk_hgrn_bwd_kernel_o[grid](
            g, gc, o, dx, dg,
            o.stride(1), o.stride(2), o.stride(3),
            T, D,
            BT=BT, BD=BD,
            num_warps=num_warps
        )
        if initial_state is not None:
            dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp()
        return dx, dg, None, None
def chunk_hgrn(
    x: torch.Tensor,
    g: torch.Tensor,
    initial_state: torch.Tensor = None,
    output_final_state: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
    if initial_state is not None:
        initial_state = initial_state.detach()
    o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
    return o, final_state
if __name__ == '__main__':
    import torch.nn.functional as F
    from fla.ops.hgrn.naive import naive_recurrent_hgrn
    from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn
    B, H, T, D = 8, 4, 512, 128
    dtype = torch.bfloat16
    torch.manual_seed(42)
    # [batch_size, n_heads, seq_len, d_head]
    x = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    g = torch.randn((B, H, T, D), dtype=dtype, device='cuda')
    x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g)
    print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}')
    print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}')
    x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
    print(f"DTYPE:\t{x.dtype}")
    do = torch.randn_like(x)
    h0 = torch.randn_like(x[:, :, 0])
    ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True)
    ref.backward(do)
    ref_dx, x.grad = x.grad.clone(), None
    ref_dg, g.grad = g.grad.clone(), None
    tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True)
    tri.backward(do)
    tri_dx, x.grad = x.grad.clone(), None
    tri_dg, g.grad = g.grad.clone(), None
    print("  \t    DIFF\t    MAX")
    print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}")
    print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}")
    print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}")
    print('Done!')
    @triton.testing.perf_report(
        triton.testing.Benchmark(
            # argument names to use as an x-axis for the plot
            x_names=['seq_len'],
            # different possible values for `x_name`
            x_vals=[128 * 2 ** i for i in range(0, 8)],
            # argument name whose value corresponds to a different line in the plot
            line_arg='provider',
            # possible values for `line_arg``
            line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # label name for the lines
            line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'],
            # line styles
            styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')],
            ylabel="Execution Time (ms)",  # label name for the y-axis
            # name for the plot. Used also as a file name for saving the plot.
            plot_name="Performance",
            args={},
        )
    )
    def benchmark(seq_len, provider):
        dtype = torch.bfloat16
        B, H, D = 16, 4, 128
        x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda')
        g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid()
        x = (1 - g) * x
        x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g))
        do = torch.randn_like(x, dtype=dtype)
        quantiles = [0.5, 0.2, 0.8]
        results = 0, 0, 0
        if provider == 'chunk':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles)
        if provider == 'recurrent':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles)
        if provider == 'chunk_bwd':
            results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        if provider == 'recurrent_bwd':
            results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles)
        return results
    benchmark.run(print_data=True)
 | 
| 
	INT-FlashAttention2024/INT-FlashAttention | 
	flash_atten_full_int8.py | 
	https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py | 
	import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q,  #
                    K_block_ptr, V_block_ptr,  #
                    q_scale, K_block_scale_ptr, v_scale,#
                    start_m, qk_scale,  #
                    BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr,  #
                    STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr,  #
                    N_CTX: tl.constexpr, fp8_v: tl.constexpr):
    # range of values handled by this stage
    if STAGE == 1:
        lo, hi = 0, start_m * BLOCK_M
    elif STAGE == 2:
        lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
        lo = tl.multiple_of(lo, BLOCK_M)
    # causal = False
    else:
        lo, hi = 0, N_CTX
    K_block_ptr = tl.advance(K_block_ptr, (0, lo))
    K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
    V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
    # loop over k, v and update accumulator
    for start_n in range(lo, hi, BLOCK_N):
    # for start_n in range(0, 32, BLOCK_N):
        start_n = tl.multiple_of(start_n, BLOCK_N)
        # -- compute qk ----
        k = tl.load(K_block_ptr)
        k_scale = tl.load(K_block_scale_ptr)
        qk = tl.dot(q, k).to(tl.float32)
        qk = qk * q_scale[:, None]
        qk = qk * k_scale
        if STAGE == 2:
            mask = offs_m[:, None] >= (start_n + offs_n[None, :])
            qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
            m_ij = tl.maximum(m_i, tl.max(qk, 1))
            qk -= m_ij[:, None]
        else:
            m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
            qk = qk * qk_scale - m_ij[:, None]
        p = tl.math.exp2(qk)
        l_ij = tl.sum(p, 1)
        p = p.to(tl.float16)
        p = p * 127
        p = (p+0.5).to(tl.int8)
        # -- update m_i and l_i
        alpha = tl.math.exp2(m_i - m_ij)
        l_i = l_i * alpha + l_ij
        # -- update output accumulator --
        acc = acc * alpha[:, None]
        # update acc
        v = tl.load(V_block_ptr)
        tmp = tl.dot(p, v)
        tmp = tmp.to(tl.float32)
        tmp = tmp * v_scale / 127
        acc = acc + tmp
        # tmp = tl.dot(p, v)
        # tl.device_print("tmp", tmp)
        
        # update m_i and l_i
        m_i = m_ij
        V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
        K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
        K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
    return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out,  #
              stride_qz, stride_qh, stride_qm, stride_qk,  #
              stride_kz, stride_kh, stride_kn, stride_kk,  #
              stride_vz, stride_vh, stride_vk, stride_vn,  #
              stride_oz, stride_oh, stride_om, stride_on,  #
              stride_s1, stride_s2, stride_s3,  #
              stride_v1, stride_v2,  #
              Z, H, N_CTX,  #
              HEAD_DIM: tl.constexpr,  #
              BLOCK_M: tl.constexpr,  #
              BLOCK_N: tl.constexpr,  #
              STAGE: tl.constexpr  #
              ):
    tl.static_assert(BLOCK_N <= HEAD_DIM)
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_z = off_hz // H
    off_h = off_hz % H
    qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
    scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
    vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
    # block pointers
    Q_block_ptr = tl.make_block_ptr(
        base=Q + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_qm, stride_qk),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    V_block_ptr = tl.make_block_ptr(
        base=V + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_vk, stride_vn),
        offsets=(0, 0),
        block_shape=(BLOCK_N, HEAD_DIM),
        order=(1, 0),
    )
    K_block_ptr = tl.make_block_ptr(
        base=K + qvk_offset,
        shape=(HEAD_DIM, N_CTX),
        strides=(stride_kk, stride_kn),
        offsets=(0, 0),
        block_shape=(HEAD_DIM, BLOCK_N),
        order=(0, 1),
    )
    O_block_ptr = tl.make_block_ptr(
        base=Out + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_om, stride_on),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    # scale vector pointers
    Q_block_scale_ptr = tl.make_block_ptr(
        base=Q_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(start_m * BLOCK_M,),
        block_shape=(BLOCK_M,),
        order=(0,),
    )
    K_block_scale_ptr = tl.make_block_ptr(
        base=K_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(0,),
        block_shape=(BLOCK_N,),
        order=(0,),
    )
    # initialize offsets
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    # initialize pointer to m and l
    m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
    acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
    # load scales
    qk_scale = sm_scale
    qk_scale *= 1.44269504  # 1/log(2)
    # load q: it will stay in SRAM throughout
    q = tl.load(Q_block_ptr)
    q_scale = tl.load(Q_block_scale_ptr)
    v_scale = tl.load(V_scale + vscl_offset)
    # stage 1: off-band
    # For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
    # For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
    if STAGE & 1:
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # stage 2: on-band
    if STAGE & 2:
        # barrier makes it easier for compielr to schedule the
        # two loops independently
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # epilogue
    acc = acc / l_i[:, None]
    tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
    @staticmethod # q, k, v: int8,  q_scale, k_scale: float16
    def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
        # shape constraints
        HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
        # when v is in float8_e5m2 it is transposed.
        HEAD_DIM_V = v.shape[-1]
        assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
        assert HEAD_DIM_K in {16, 32, 64, 128, 256}
        o = torch.empty_like(q)
        o = o.to(torch.float16)
        stage = 3 if causal else 1
        extra_kern_args = {}
        # Tuning for AMD target
        if is_hip():
            waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
            extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
        grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
        _attn_fwd_full_int8[grid](
            q, k, v, q_scale, k_scale, v_scale, sm_scale, o,  #
            q.stride(0), q.stride(1), q.stride(2), q.stride(3),  #
            k.stride(0), k.stride(1), k.stride(2), k.stride(3),  #
            v.stride(0), v.stride(1), v.stride(2), v.stride(3),  #
            o.stride(0), o.stride(1), o.stride(2), o.stride(3),  #
            q_scale.stride(0), q_scale.stride(1), q_scale.stride(2),  #
            v_scale.stride(0), v_scale.stride(1), #
            q.shape[0], q.shape[1],  #
            N_CTX=q.shape[2],  #
            HEAD_DIM=HEAD_DIM_K,  #
            STAGE=stage,  #
            **extra_kern_args)
        ctx.sm_scale = sm_scale
        ctx.HEAD_DIM = HEAD_DIM_K
        ctx.causal = causal
        return o
attention_full_int8 = _attention_full_int8.apply | 
	@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q,  #
                    K_block_ptr, V_block_ptr,  #
                    q_scale, K_block_scale_ptr, v_scale,#
                    start_m, qk_scale,  #
                    BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr,  #
                    STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr,  #
                    N_CTX: tl.constexpr, fp8_v: tl.constexpr):
    # range of values handled by this stage
    if STAGE == 1:
        lo, hi = 0, start_m * BLOCK_M
    elif STAGE == 2:
        lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
        lo = tl.multiple_of(lo, BLOCK_M)
    # causal = False
    else:
        lo, hi = 0, N_CTX
    K_block_ptr = tl.advance(K_block_ptr, (0, lo))
    K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
    V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
    # loop over k, v and update accumulator
    for start_n in range(lo, hi, BLOCK_N):
    # for start_n in range(0, 32, BLOCK_N):
        start_n = tl.multiple_of(start_n, BLOCK_N)
        # -- compute qk ----
        k = tl.load(K_block_ptr)
        k_scale = tl.load(K_block_scale_ptr)
        qk = tl.dot(q, k).to(tl.float32)
        qk = qk * q_scale[:, None]
        qk = qk * k_scale
        if STAGE == 2:
            mask = offs_m[:, None] >= (start_n + offs_n[None, :])
            qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
            m_ij = tl.maximum(m_i, tl.max(qk, 1))
            qk -= m_ij[:, None]
        else:
            m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
            qk = qk * qk_scale - m_ij[:, None]
        p = tl.math.exp2(qk)
        l_ij = tl.sum(p, 1)
        p = p.to(tl.float16)
        p = p * 127
        p = (p+0.5).to(tl.int8)
        # -- update m_i and l_i
        alpha = tl.math.exp2(m_i - m_ij)
        l_i = l_i * alpha + l_ij
        # -- update output accumulator --
        acc = acc * alpha[:, None]
        # update acc
        v = tl.load(V_block_ptr)
        tmp = tl.dot(p, v)
        tmp = tmp.to(tl.float32)
        tmp = tmp * v_scale / 127
        acc = acc + tmp
        # tmp = tl.dot(p, v)
        # tl.device_print("tmp", tmp)
        
        # update m_i and l_i
        m_i = m_ij
        V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
        K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
        K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
    return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
 | 
| 
	INT-FlashAttention2024/INT-FlashAttention | 
	flash_atten_full_int8.py | 
	https://github.com/INT-FlashAttention2024/INT-FlashAttention/blob/7f7bfb00bcd26b2cef49e7783f51ef610e05abf7/flash_atten_full_int8.py | 
	import pytest
import torch
import triton
import triton.language as tl
from configs import *
@triton.jit
def _attn_fwd_inner_full_int8(acc, l_i, m_i, q,  #
                    K_block_ptr, V_block_ptr,  #
                    q_scale, K_block_scale_ptr, v_scale,#
                    start_m, qk_scale,  #
                    BLOCK_M: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_N: tl.constexpr,  #
                    STAGE: tl.constexpr, offs_m: tl.constexpr, offs_n: tl.constexpr,  #
                    N_CTX: tl.constexpr, fp8_v: tl.constexpr):
    # range of values handled by this stage
    if STAGE == 1:
        lo, hi = 0, start_m * BLOCK_M
    elif STAGE == 2:
        lo, hi = start_m * BLOCK_M, (start_m + 1) * BLOCK_M
        lo = tl.multiple_of(lo, BLOCK_M)
    # causal = False
    else:
        lo, hi = 0, N_CTX
    K_block_ptr = tl.advance(K_block_ptr, (0, lo))
    K_block_scale_ptr = tl.advance(K_block_scale_ptr, (lo,))
    V_block_ptr = tl.advance(V_block_ptr, (lo, 0))
    # loop over k, v and update accumulator
    for start_n in range(lo, hi, BLOCK_N):
    # for start_n in range(0, 32, BLOCK_N):
        start_n = tl.multiple_of(start_n, BLOCK_N)
        # -- compute qk ----
        k = tl.load(K_block_ptr)
        k_scale = tl.load(K_block_scale_ptr)
        qk = tl.dot(q, k).to(tl.float32)
        qk = qk * q_scale[:, None]
        qk = qk * k_scale
        if STAGE == 2:
            mask = offs_m[:, None] >= (start_n + offs_n[None, :])
            qk = qk * qk_scale + tl.where(mask, 0, -1.0e6)
            m_ij = tl.maximum(m_i, tl.max(qk, 1))
            qk -= m_ij[:, None]
        else:
            m_ij = tl.maximum(m_i, tl.max(qk, 1) * qk_scale)
            qk = qk * qk_scale - m_ij[:, None]
        p = tl.math.exp2(qk)
        l_ij = tl.sum(p, 1)
        p = p.to(tl.float16)
        p = p * 127
        p = (p+0.5).to(tl.int8)
        # -- update m_i and l_i
        alpha = tl.math.exp2(m_i - m_ij)
        l_i = l_i * alpha + l_ij
        # -- update output accumulator --
        acc = acc * alpha[:, None]
        # update acc
        v = tl.load(V_block_ptr)
        tmp = tl.dot(p, v)
        tmp = tmp.to(tl.float32)
        tmp = tmp * v_scale / 127
        acc = acc + tmp
        # tmp = tl.dot(p, v)
        # tl.device_print("tmp", tmp)
        
        # update m_i and l_i
        m_i = m_ij
        V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
        K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
        K_block_scale_ptr = tl.advance(K_block_scale_ptr, (BLOCK_N,))
    return acc, l_i
@triton.autotune(list(filter(keep, configs)), key=["N_CTX", "HEAD_DIM"])
@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out,  #
              stride_qz, stride_qh, stride_qm, stride_qk,  #
              stride_kz, stride_kh, stride_kn, stride_kk,  #
              stride_vz, stride_vh, stride_vk, stride_vn,  #
              stride_oz, stride_oh, stride_om, stride_on,  #
              stride_s1, stride_s2, stride_s3,  #
              stride_v1, stride_v2,  #
              Z, H, N_CTX,  #
              HEAD_DIM: tl.constexpr,  #
              BLOCK_M: tl.constexpr,  #
              BLOCK_N: tl.constexpr,  #
              STAGE: tl.constexpr  #
              ):
    tl.static_assert(BLOCK_N <= HEAD_DIM)
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_z = off_hz // H
    off_h = off_hz % H
    qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
    scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
    vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
    # block pointers
    Q_block_ptr = tl.make_block_ptr(
        base=Q + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_qm, stride_qk),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    V_block_ptr = tl.make_block_ptr(
        base=V + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_vk, stride_vn),
        offsets=(0, 0),
        block_shape=(BLOCK_N, HEAD_DIM),
        order=(1, 0),
    )
    K_block_ptr = tl.make_block_ptr(
        base=K + qvk_offset,
        shape=(HEAD_DIM, N_CTX),
        strides=(stride_kk, stride_kn),
        offsets=(0, 0),
        block_shape=(HEAD_DIM, BLOCK_N),
        order=(0, 1),
    )
    O_block_ptr = tl.make_block_ptr(
        base=Out + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_om, stride_on),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    # scale vector pointers
    Q_block_scale_ptr = tl.make_block_ptr(
        base=Q_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(start_m * BLOCK_M,),
        block_shape=(BLOCK_M,),
        order=(0,),
    )
    K_block_scale_ptr = tl.make_block_ptr(
        base=K_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(0,),
        block_shape=(BLOCK_N,),
        order=(0,),
    )
    # initialize offsets
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    # initialize pointer to m and l
    m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
    acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
    # load scales
    qk_scale = sm_scale
    qk_scale *= 1.44269504  # 1/log(2)
    # load q: it will stay in SRAM throughout
    q = tl.load(Q_block_ptr)
    q_scale = tl.load(Q_block_scale_ptr)
    v_scale = tl.load(V_scale + vscl_offset)
    # stage 1: off-band
    # For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
    # For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
    if STAGE & 1:
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # stage 2: on-band
    if STAGE & 2:
        # barrier makes it easier for compielr to schedule the
        # two loops independently
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # epilogue
    acc = acc / l_i[:, None]
    tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
    @staticmethod # q, k, v: int8,  q_scale, k_scale: float16
    def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
        # shape constraints
        HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
        # when v is in float8_e5m2 it is transposed.
        HEAD_DIM_V = v.shape[-1]
        assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
        assert HEAD_DIM_K in {16, 32, 64, 128, 256}
        o = torch.empty_like(q)
        o = o.to(torch.float16)
        stage = 3 if causal else 1
        extra_kern_args = {}
        # Tuning for AMD target
        if is_hip():
            waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
            extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
        grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
        _attn_fwd_full_int8[grid](
            q, k, v, q_scale, k_scale, v_scale, sm_scale, o,  #
            q.stride(0), q.stride(1), q.stride(2), q.stride(3),  #
            k.stride(0), k.stride(1), k.stride(2), k.stride(3),  #
            v.stride(0), v.stride(1), v.stride(2), v.stride(3),  #
            o.stride(0), o.stride(1), o.stride(2), o.stride(3),  #
            q_scale.stride(0), q_scale.stride(1), q_scale.stride(2),  #
            v_scale.stride(0), v_scale.stride(1), #
            q.shape[0], q.shape[1],  #
            N_CTX=q.shape[2],  #
            HEAD_DIM=HEAD_DIM_K,  #
            STAGE=stage,  #
            **extra_kern_args)
        ctx.sm_scale = sm_scale
        ctx.HEAD_DIM = HEAD_DIM_K
        ctx.causal = causal
        return o
attention_full_int8 = _attention_full_int8.apply | 
	@triton.jit
def _attn_fwd_full_int8(Q, K, V, Q_scale, K_scale, V_scale, sm_scale, Out,  #
              stride_qz, stride_qh, stride_qm, stride_qk,  #
              stride_kz, stride_kh, stride_kn, stride_kk,  #
              stride_vz, stride_vh, stride_vk, stride_vn,  #
              stride_oz, stride_oh, stride_om, stride_on,  #
              stride_s1, stride_s2, stride_s3,  #
              stride_v1, stride_v2,  #
              Z, H, N_CTX,  #
              HEAD_DIM: tl.constexpr,  #
              BLOCK_M: tl.constexpr,  #
              BLOCK_N: tl.constexpr,  #
              STAGE: tl.constexpr  #
              ):
    tl.static_assert(BLOCK_N <= HEAD_DIM)
    start_m = tl.program_id(0)
    off_hz = tl.program_id(1)
    off_z = off_hz // H
    off_h = off_hz % H
    qvk_offset = off_z.to(tl.int64) * stride_qz + off_h.to(tl.int64) * stride_qh
    scl_offset = off_z.to(tl.int64) * stride_s1 + off_h.to(tl.int64) * stride_s2
    vscl_offset = off_z.to(tl.int64) * stride_v1 + off_h.to(tl.int64) * stride_v2
    # block pointers
    Q_block_ptr = tl.make_block_ptr(
        base=Q + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_qm, stride_qk),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    V_block_ptr = tl.make_block_ptr(
        base=V + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_vk, stride_vn),
        offsets=(0, 0),
        block_shape=(BLOCK_N, HEAD_DIM),
        order=(1, 0),
    )
    K_block_ptr = tl.make_block_ptr(
        base=K + qvk_offset,
        shape=(HEAD_DIM, N_CTX),
        strides=(stride_kk, stride_kn),
        offsets=(0, 0),
        block_shape=(HEAD_DIM, BLOCK_N),
        order=(0, 1),
    )
    O_block_ptr = tl.make_block_ptr(
        base=Out + qvk_offset,
        shape=(N_CTX, HEAD_DIM),
        strides=(stride_om, stride_on),
        offsets=(start_m * BLOCK_M, 0),
        block_shape=(BLOCK_M, HEAD_DIM),
        order=(1, 0),
    )
    # scale vector pointers
    Q_block_scale_ptr = tl.make_block_ptr(
        base=Q_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(start_m * BLOCK_M,),
        block_shape=(BLOCK_M,),
        order=(0,),
    )
    K_block_scale_ptr = tl.make_block_ptr(
        base=K_scale + scl_offset,
        shape=(N_CTX,),
        strides=(stride_s3,),
        offsets=(0,),
        block_shape=(BLOCK_N,),
        order=(0,),
    )
    # initialize offsets
    offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
    offs_n = tl.arange(0, BLOCK_N)
    # initialize pointer to m and l
    m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32) + 1.0
    acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32)
    # load scales
    qk_scale = sm_scale
    qk_scale *= 1.44269504  # 1/log(2)
    # load q: it will stay in SRAM throughout
    q = tl.load(Q_block_ptr)
    q_scale = tl.load(Q_block_scale_ptr)
    v_scale = tl.load(V_scale + vscl_offset)
    # stage 1: off-band
    # For causal = True, STAGE = 3 and _attn_fwd_inner gets 1 as its STAGE
    # For causal = False, STAGE = 1, and _attn_fwd_inner gets 3 as its STAGE
    if STAGE & 1:
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        4 - STAGE, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # stage 2: on-band
    if STAGE & 2:
        # barrier makes it easier for compielr to schedule the
        # two loops independently
        acc, l_i = _attn_fwd_inner_full_int8(acc, l_i, m_i, q, K_block_ptr, V_block_ptr, q_scale, K_block_scale_ptr, v_scale, #
                                        start_m, qk_scale,  #
                                        BLOCK_M, HEAD_DIM, BLOCK_N,  #
                                        2, offs_m, offs_n, N_CTX, V.dtype.element_ty == tl.float8e5  #
                                        )
    # epilogue
    acc = acc / l_i[:, None]
    tl.store(O_block_ptr, acc.to(Out.type.element_ty))
class _attention_full_int8(torch.autograd.Function):
    @staticmethod # q, k, v: int8,  q_scale, k_scale: float16
    def forward(ctx, q, k, v, q_scale, k_scale, v_scale, causal, sm_scale):
        # shape constraints
        HEAD_DIM_Q, HEAD_DIM_K = q.shape[-1], k.shape[-1]
        # when v is in float8_e5m2 it is transposed.
        HEAD_DIM_V = v.shape[-1]
        assert HEAD_DIM_Q == HEAD_DIM_K and HEAD_DIM_K == HEAD_DIM_V
        assert HEAD_DIM_K in {16, 32, 64, 128, 256}
        o = torch.empty_like(q)
        o = o.to(torch.float16)
        stage = 3 if causal else 1
        extra_kern_args = {}
        # Tuning for AMD target
        if is_hip():
            waves_per_eu = 3 if HEAD_DIM_K <= 64 else 2
            extra_kern_args = {"waves_per_eu": waves_per_eu, "allow_flush_denorm": True}
        grid = lambda args: (triton.cdiv(q.shape[2], args["BLOCK_M"]), q.shape[0] * q.shape[1], 1)
        _attn_fwd_full_int8[grid](
            q, k, v, q_scale, k_scale, v_scale, sm_scale, o,  #
            q.stride(0), q.stride(1), q.stride(2), q.stride(3),  #
            k.stride(0), k.stride(1), k.stride(2), k.stride(3),  #
            v.stride(0), v.stride(1), v.stride(2), v.stride(3),  #
            o.stride(0), o.stride(1), o.stride(2), o.stride(3),  #
            q_scale.stride(0), q_scale.stride(1), q_scale.stride(2),  #
            v_scale.stride(0), v_scale.stride(1), #
            q.shape[0], q.shape[1],  #
            N_CTX=q.shape[2],  #
            HEAD_DIM=HEAD_DIM_K,  #
            STAGE=stage,  #
            **extra_kern_args)
        ctx.sm_scale = sm_scale
        ctx.HEAD_DIM = HEAD_DIM_K
        ctx.causal = causal
        return o
attention_full_int8 = _attention_full_int8.apply | 
| 
	TD87/triton-kernels | 
	gemm_matmul.py | 
	https://github.com/TD87/triton-kernels/blob/17a97ede7b6d0ca7356db68b56d0e5b6a9080ad4/gemm_matmul.py | 
	import math
import torch # type: ignore
import triton # type: ignore
import triton.language as tl # type: ignore
@triton.jit()
def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, 
                  BLOCK_K: tl.constexpr):
    pid_r = tl.program_id(0)
    pid_c = tl.program_id(1)
    row_start = pid_r * BLOCK_M
    row_offsets = row_start + tl.arange(0, BLOCK_M)
    col_start = pid_c * BLOCK_N
    col_offsets = col_start + tl.arange(0, BLOCK_N)
    out = tl.zeros((BLOCK_M, BLOCK_N), dtype = tl.float32)
    for k in tl.range(0, K, BLOCK_K):
        k_offsets = k + tl.arange(0, BLOCK_K)
        row = row_offsets[:, None] * K + k_offsets[None, :]
        mask = (row_offsets[:, None] < M) & (k_offsets[None, :] < K)
        x = tl.load(x_ptr + row, mask = mask)
        col = col_offsets[None, :] + k_offsets[:, None] * N
        mask = (col_offsets[None, :] < N) & (k_offsets[:, None] < K)
        y = tl.load(y_ptr + col, mask = mask)
        out = tl.dot(x, y, out)
    out_offsets = row_offsets[:, None] * N + col_offsets[None, :]
    mask = (row_offsets[:, None] < M) & (col_offsets[None, :] < N)
    tl.store(out_ptr + out_offsets, out, mask = mask)
def matmul(x, y, BLOCK_M = 128, BLOCK_N = 64, BLOCK_K = 64):
    M, K = x.size()
    N = y.size(1)
    assert K == y.size(0)
    out = torch.empty(M, N, device = 'cuda', dtype = torch.float32)
    grid = (math.ceil(M / BLOCK_M), math.ceil(N / BLOCK_N))
    matmul_kernel[grid](x, y, out, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K)
    return out
@triton.testing.perf_report(
    triton.testing.Benchmark(
        x_names = ["M", "N", "K"],
        x_vals = [128 * i for i in range(2, 33)],
        line_arg = "provider",
        line_vals = ["triton", "torch"],
        line_names = ["Triton", "Torch"],
        styles = [("green", "-"), ("blue", "-")],
        ylabel = "TFLOPS",
        plot_name = "matmul-performance",
        args = {},
    ))
def benchmark(M, N, K, provider):
    x = torch.randn(M, K, device = 'cuda', dtype = torch.float32)
    y = torch.randn(K, N, device = 'cuda', dtype = torch.float32)
    if provider == "torch":
        ms = triton.testing.do_bench(lambda: torch.matmul(x, y))
    else:
        ms = triton.testing.do_bench(lambda: matmul(x, y))
    tflops = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
    return tflops(ms)
benchmark.run(print_data = True, save_path = "plots") | 
	@triton.jit
()
def matmul_kernel(x_ptr, y_ptr, out_ptr, M, N, K, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, 
                  BLOCK_K: tl.constexpr):
    pid_r = tl.program_id(0)
    pid_c = tl.program_id(1)
    row_start = pid_r * BLOCK_M
    row_offsets = row_start + tl.arange(0, BLOCK_M)
    col_start = pid_c * BLOCK_N
    col_offsets = col_start + tl.arange(0, BLOCK_N)
    out = tl.zeros((BLOCK_M, BLOCK_N), dtype = tl.float32)
    for k in tl.range(0, K, BLOCK_K):
        k_offsets = k + tl.arange(0, BLOCK_K)
        row = row_offsets[:, None] * K + k_offsets[None, :]
        mask = (row_offsets[:, None] < M) & (k_offsets[None, :] < K)
        x = tl.load(x_ptr + row, mask = mask)
        col = col_offsets[None, :] + k_offsets[:, None] * N
        mask = (col_offsets[None, :] < N) & (k_offsets[:, None] < K)
        y = tl.load(y_ptr + col, mask = mask)
        out = tl.dot(x, y, out)
    out_offsets = row_offsets[:, None] * N + col_offsets[None, :]
    mask = (row_offsets[:, None] < M) & (col_offsets[None, :] < N)
    tl.store(out_ptr + out_offsets, out, mask = mask)
def matmul(x, y, BLOCK_M = 128, BLOCK_N = 64, BLOCK_K = 64):
    M, K = x.size()
    N = y.size(1)
    assert K == y.size(0)
    out = torch.empty(M, N, device = 'cuda', dtype = torch.float32)
    grid = (math.ceil(M / BLOCK_M), math.ceil(N / BLOCK_N))
    matmul_kernel[grid](x, y, out, M, N, K, BLOCK_M, BLOCK_N, BLOCK_K)
    return out
@triton.testing.perf_report(
    triton.testing.Benchmark(
        x_names = ["M", "N", "K"],
        x_vals = [128 * i for i in range(2, 33)],
        line_arg = "provider",
        line_vals = ["triton", "torch"],
        line_names = ["Triton", "Torch"],
        styles = [("green", "-"), ("blue", "-")],
        ylabel = "TFLOPS",
        plot_name = "matmul-performance",
        args = {},
    ))
def benchmark(M, N, K, provider):
    x = torch.randn(M, K, device = 'cuda', dtype = torch.float32)
    y = torch.randn(K, N, device = 'cuda', dtype = torch.float32)
    if provider == "torch":
        ms = triton.testing.do_bench(lambda: torch.matmul(x, y))
    else:
        ms = triton.testing.do_bench(lambda: matmul(x, y))
    tflops = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3)
    return tflops(ms)
benchmark.run(print_data = True, save_path = "plots") | 
| 
	xiaonans/triton-gemm-benchmark | 
	kernels/basic_matmul.py | 
	https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py | 
	import triton
import triton.language as tl
import torch
from .autotune_config import get_autotune_config
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
#   - A list of `triton.Config` objects that define different configurations of
#       meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
#   - An auto-tuning *key* whose change in values will trigger evaluation of all the
#       provided configs
@triton.autotune(
    configs=get_autotune_config(),
    key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
        # Pointers to matrices
        a_ptr, b_ptr, c_ptr,
        # Matrix dimensions
        M, N, K,
        # The stride variables represent how much to increase the ptr by when moving by 1
        # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
        # by to get the element one row down (A has M rows).
        stride_am, stride_ak,  #
        stride_bk, stride_bn,  #
        stride_cm, stride_cn,
        # Meta-parameters
        BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,  #
        GROUP_SIZE_M: tl.constexpr,  #
        ACTIVATION: tl.constexpr  #
):
    """Kernel for computing the matmul C = A x B.
    A has shape (M, K), B has shape (K, N) and C has shape (M, N)
    """
    # -----------------------------------------------------------
    # Map program ids `pid` to the block of C it should compute.
    # This is done in a grouped ordering to promote L2 data reuse.
    # See above `L2 Cache Optimizations` section for details.
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    # ----------------------------------------------------------
    # Create pointers for the first blocks of A and B.
    # We will advance this pointer as we move in the K direction
    # and accumulate
    # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
    # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
    # See above `Pointer Arithmetic` section for details
    offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
    offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
    offs_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
    # -----------------------------------------------------------
    # Iterate to compute a block of the C matrix.
    # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
    # of fp32 values for higher accuracy.
    # `accumulator` will be converted back to fp16 after the loop.
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
        # Load the next block of A and B, generate a mask by checking the K dimension.
        # If it is out of bounds, set it to 0.
        a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
        b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
        # We accumulate along the K dimension.
        accumulator = tl.dot(a, b, accumulator)
        # Advance the ptrs to the next K block.
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    # You can fuse arbitrary activation functions here
    # while the accumulator is still in FP32!
    if ACTIVATION == "leaky_relu":
        accumulator = leaky_relu(accumulator)
    c = accumulator.to(tl.float16)
    # -----------------------------------------------------------
    # Write back the block of the output matrix C with masks.
    offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
    c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
    tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
    return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
    # Check constraints.
    assert a.shape[1] == b.shape[0], "Incompatible dimensions"
    assert a.is_contiguous(), "Matrix A must be contiguous"
    M, K = a.shape
    K, N = b.shape
    # Allocates output.
    c = torch.empty((M, N), device=a.device, dtype=torch.float16)
    # 1D launch kernel where each block gets its own program.
    grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
    matmul_kernel[grid](
        a, b, c,  #
        M, N, K,  #
        a.stride(0), a.stride(1),  #
        b.stride(0), b.stride(1),  #
        c.stride(0), c.stride(1),  #
        ACTIVATION=activation  #
    )
    return c | 
	@triton.jit
def matmul_kernel(
        # Pointers to matrices
        a_ptr, b_ptr, c_ptr,
        # Matrix dimensions
        M, N, K,
        # The stride variables represent how much to increase the ptr by when moving by 1
        # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
        # by to get the element one row down (A has M rows).
        stride_am, stride_ak,  #
        stride_bk, stride_bn,  #
        stride_cm, stride_cn,
        # Meta-parameters
        BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,  #
        GROUP_SIZE_M: tl.constexpr,  #
        ACTIVATION: tl.constexpr  #
):
    """Kernel for computing the matmul C = A x B.
    A has shape (M, K), B has shape (K, N) and C has shape (M, N)
    """
    # -----------------------------------------------------------
    # Map program ids `pid` to the block of C it should compute.
    # This is done in a grouped ordering to promote L2 data reuse.
    # See above `L2 Cache Optimizations` section for details.
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    # ----------------------------------------------------------
    # Create pointers for the first blocks of A and B.
    # We will advance this pointer as we move in the K direction
    # and accumulate
    # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
    # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
    # See above `Pointer Arithmetic` section for details
    offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
    offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
    offs_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
    # -----------------------------------------------------------
    # Iterate to compute a block of the C matrix.
    # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
    # of fp32 values for higher accuracy.
    # `accumulator` will be converted back to fp16 after the loop.
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
        # Load the next block of A and B, generate a mask by checking the K dimension.
        # If it is out of bounds, set it to 0.
        a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
        b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
        # We accumulate along the K dimension.
        accumulator = tl.dot(a, b, accumulator)
        # Advance the ptrs to the next K block.
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    # You can fuse arbitrary activation functions here
    # while the accumulator is still in FP32!
    if ACTIVATION == "leaky_relu":
        accumulator = leaky_relu(accumulator)
    c = accumulator.to(tl.float16)
    # -----------------------------------------------------------
    # Write back the block of the output matrix C with masks.
    offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
    c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
    tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
 | 
| 
	xiaonans/triton-gemm-benchmark | 
	kernels/basic_matmul.py | 
	https://github.com/xiaonans/triton-gemm-benchmark/blob/436ee5a77e01ede7e4a1fe015f533dfdc53b31d3/kernels/basic_matmul.py | 
	import triton
import triton.language as tl
import torch
from .autotune_config import get_autotune_config
# `triton.jit`'ed functions can be auto-tuned by using the `triton.autotune` decorator, which consumes:
#   - A list of `triton.Config` objects that define different configurations of
#       meta-parameters (e.g., `BLOCK_SIZE_M`) and compilation options (e.g., `num_warps`) to try
#   - An auto-tuning *key* whose change in values will trigger evaluation of all the
#       provided configs
@triton.autotune(
    configs=get_autotune_config(),
    key=['M', 'N', 'K'],
)
@triton.jit
def matmul_kernel(
        # Pointers to matrices
        a_ptr, b_ptr, c_ptr,
        # Matrix dimensions
        M, N, K,
        # The stride variables represent how much to increase the ptr by when moving by 1
        # element in a particular dimension. E.g. `stride_am` is how much to increase `a_ptr`
        # by to get the element one row down (A has M rows).
        stride_am, stride_ak,  #
        stride_bk, stride_bn,  #
        stride_cm, stride_cn,
        # Meta-parameters
        BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,  #
        GROUP_SIZE_M: tl.constexpr,  #
        ACTIVATION: tl.constexpr  #
):
    """Kernel for computing the matmul C = A x B.
    A has shape (M, K), B has shape (K, N) and C has shape (M, N)
    """
    # -----------------------------------------------------------
    # Map program ids `pid` to the block of C it should compute.
    # This is done in a grouped ordering to promote L2 data reuse.
    # See above `L2 Cache Optimizations` section for details.
    pid = tl.program_id(axis=0)
    num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
    num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
    num_pid_in_group = GROUP_SIZE_M * num_pid_n
    group_id = pid // num_pid_in_group
    first_pid_m = group_id * GROUP_SIZE_M
    group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M)
    pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m)
    pid_n = (pid % num_pid_in_group) // group_size_m
    # ----------------------------------------------------------
    # Create pointers for the first blocks of A and B.
    # We will advance this pointer as we move in the K direction
    # and accumulate
    # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers
    # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers
    # See above `Pointer Arithmetic` section for details
    offs_am = (pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)) % M
    offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N
    offs_k = tl.arange(0, BLOCK_SIZE_K)
    a_ptrs = a_ptr + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak)
    b_ptrs = b_ptr + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)
    # -----------------------------------------------------------
    # Iterate to compute a block of the C matrix.
    # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block
    # of fp32 values for higher accuracy.
    # `accumulator` will be converted back to fp16 after the loop.
    accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
    for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)):
        # Load the next block of A and B, generate a mask by checking the K dimension.
        # If it is out of bounds, set it to 0.
        a = tl.load(a_ptrs, mask=offs_k[None, :] < K - k * BLOCK_SIZE_K, other=0.0)
        b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0)
        # We accumulate along the K dimension.
        accumulator = tl.dot(a, b, accumulator)
        # Advance the ptrs to the next K block.
        a_ptrs += BLOCK_SIZE_K * stride_ak
        b_ptrs += BLOCK_SIZE_K * stride_bk
    # You can fuse arbitrary activation functions here
    # while the accumulator is still in FP32!
    if ACTIVATION == "leaky_relu":
        accumulator = leaky_relu(accumulator)
    c = accumulator.to(tl.float16)
    # -----------------------------------------------------------
    # Write back the block of the output matrix C with masks.
    offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
    offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
    c_ptrs = c_ptr + stride_cm * offs_cm[:, None] + stride_cn * offs_cn[None, :]
    c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
    tl.store(c_ptrs, c, mask=c_mask)
# We can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `matmul_kernel`.
@triton.jit
def leaky_relu(x):
    return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
    # Check constraints.
    assert a.shape[1] == b.shape[0], "Incompatible dimensions"
    assert a.is_contiguous(), "Matrix A must be contiguous"
    M, K = a.shape
    K, N = b.shape
    # Allocates output.
    c = torch.empty((M, N), device=a.device, dtype=torch.float16)
    # 1D launch kernel where each block gets its own program.
    grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
    matmul_kernel[grid](
        a, b, c,  #
        M, N, K,  #
        a.stride(0), a.stride(1),  #
        b.stride(0), b.stride(1),  #
        c.stride(0), c.stride(1),  #
        ACTIVATION=activation  #
    )
    return c | 
	@triton.jit
def leaky_relu(x):
    return tl.where(x >= 0, x, 0.01 * x)
def matmul(a, b, activation=""):
    # Check constraints.
    assert a.shape[1] == b.shape[0], "Incompatible dimensions"
    assert a.is_contiguous(), "Matrix A must be contiguous"
    M, K = a.shape
    K, N = b.shape
    # Allocates output.
    c = torch.empty((M, N), device=a.device, dtype=torch.float16)
    # 1D launch kernel where each block gets its own program.
    grid = lambda META: (triton.cdiv(M, META['BLOCK_SIZE_M']) * triton.cdiv(N, META['BLOCK_SIZE_N']), )
    matmul_kernel[grid](
        a, b, c,  #
        M, N, K,  #
        a.stride(0), a.stride(1),  #
        b.stride(0), b.stride(1),  #
        c.stride(0), c.stride(1),  #
        ACTIVATION=activation  #
    )
    return c | 
| 
	xiaohuguo2023/scripts | 
	others/tune_gemm1.py | 
	https://github.com/xiaohuguo2023/scripts/blob/b6de80a590c78e78a4f8d64346c34ef445e2aa17/others/tune_gemm1.py | 
	import argparse
import sys
import yaml
import os
import glob
import subprocess
import torch
import triton
import triton.language as tl
from matmul_kernel import matmul_kernel
from datetime import datetime
import pandas as pd
import torch.distributed as dist
from torch.multiprocessing import spawn
def get_full_tuning_space():
    configs = []
    block_mn_range = [16, 32, 64, 128, 256]
    block_k_range = [16, 32, 64, 128, 256]
    split_k_range = [1, 2, 4, 5, 6, 8, 10, 12, 16, 18, 24]
    num_warps_range = [1, 2, 4, 8]
    group_m_range = [1, 4, 8, 16, 32]
    num_stage_range = [0]
    waves_per_eu_range = [0]
    matrix_instr_nonkdim_range = [16, 32]
    kpack_range = [1, 2]
    for block_m in block_mn_range:
        for block_n in block_mn_range:
            for block_k in block_k_range:
                for num_warps in num_warps_range:
                    for group_m in group_m_range:
                        for split_k in split_k_range:
                            for num_stages in num_stage_range:
                                for waves_per_eu in waves_per_eu_range:
                                    for matrix_instr_nonkdim in matrix_instr_nonkdim_range:
                                        for kpack in kpack_range:
                                            configs.append({
                                                'BLOCK_SIZE_M': block_m, 'BLOCK_SIZE_N': block_n, 'BLOCK_SIZE_K': block_k,
                                                'GROUP_SIZE_M': group_m, 'SPLIT_K': split_k, 'num_warps': num_warps,
                                                'num_stages': num_stages, 'waves_per_eu': waves_per_eu,
                                                'matrix_instr_nonkdim': matrix_instr_nonkdim, 'kpack': kpack
                                            })
    return configs
def prune_configs(M, N, K, configs, elemBytes_a, elemBytes_b):
    pruned_configs = []
    if M < 32 or N < 32:
        mfma = 16
    else:
        mfma = 32
    large_gemm = False
    if M >= 2048 and N >=2048:
        large_gemm = True
    for config in configs:
        BLOCK_SIZE_M = config.get("BLOCK_SIZE_M")
        BLOCK_SIZE_N = config.get("BLOCK_SIZE_N")
        BLOCK_SIZE_K = config.get("BLOCK_SIZE_K")
        num_warps = config.get("num_warps")
        matrix_instr_nonkdim = config.get("matrix_instr_nonkdim")
        kpack = config.get("kpack")
        if matrix_instr_nonkdim > mfma:
            continue
        if mfma == 4 and BLOCK_SIZE_K < 64:
            continue
        if BLOCK_SIZE_M * BLOCK_SIZE_N < 64:
            continue
        SPLIT_K = config.get("SPLIT_K")
        GROUP_M = config.get("GROUP_SIZE_M")
        if BLOCK_SIZE_M < matrix_instr_nonkdim or BLOCK_SIZE_N < matrix_instr_nonkdim:
            continue
        if M <= matrix_instr_nonkdim and BLOCK_SIZE_M != matrix_instr_nonkdim:
            continue
        if N <= matrix_instr_nonkdim and BLOCK_SIZE_N != matrix_instr_nonkdim:
            continue
        if BLOCK_SIZE_M > M * 2 and BLOCK_SIZE_M != 16:
            continue
        if BLOCK_SIZE_N > N * 2 and BLOCK_SIZE_N != 16:
            continue
        if SPLIT_K != 1 and not need_split_k(M, N, K):
            continue
        leap = SPLIT_K * BLOCK_SIZE_K
        modv = K % leap
        if modv != 0:
            continue
        if GROUP_M * BLOCK_SIZE_M > M and GROUP_M != 1:
            continue
        LDS = BLOCK_SIZE_K * BLOCK_SIZE_M * elemBytes_a + BLOCK_SIZE_K * BLOCK_SIZE_N * elemBytes_b
        if LDS > 65536:
            continue
        if large_gemm:
            if BLOCK_SIZE_M < 64 or BLOCK_SIZE_N < 64:
                continue
            if BLOCK_SIZE_K < 64:
                continue
            if num_warps < 4:
                continue
        pruned_configs.append(config)
    return pruned_configs
def need_split_k(SIZE_M, SIZE_N, SIZE_K):
    return (SIZE_M < 64 or SIZE_N < 64) and SIZE_K > 1024
def run_bash_command_wrapper(commandstring, capture=True):
    try:
        run_bash_command(commandstring, capture)
    except subprocess.CalledProcessError as e:
        if not capture:
            print(f"running {commandstring} one more time")
        run_bash_command(commandstring, capture)
def run_bash_command(commandstring, capture=True):
    if capture:
        proc = subprocess.run(commandstring, shell=True, check=True, executable='/bin/bash', stdout=subprocess.PIPE)
        return proc.stdout.splitlines()
    proc = subprocess.run(commandstring, shell=True, check=True, executable='/bin/bash')
    return None
def read_config(config):
    block_m = config.get('BLOCK_SIZE_M')
    block_n = config.get('BLOCK_SIZE_N')
    block_k = config.get('BLOCK_SIZE_K')
    group_m = config.get('GROUP_SIZE_M')
    split_k = config.get('SPLIT_K')
    num_warps = config.get('num_warps')
    num_stages = config.get('num_stages')
    waves_per_eu = config.get('waves_per_eu')
    mfma_instr_size = config.get('matrix_instr_nonkdim')
    kpack = config.get('kpack')
    return block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfma_instr_size, kpack
def gen_kernel_and_configStr_from_config(M, N, K, config, dtype_a, dtype_b, dtype_c):
    block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
    torch_dtype_a = 'fp16'
    torch_dtype_b = 'fp16'
    torch_dtype_c = 'fp16'
    if dtype_a:
        torch_dtype_a = tl_to_torch_types[name_to_tl_types[dtype_a]]
    if dtype_b:
        torch_dtype_b = tl_to_torch_types[name_to_tl_types[dtype_b]]
    if dtype_c:
        torch_dtype_c = tl_to_torch_types[name_to_tl_types[dtype_c]]
    configStr = f"M{M}_N{N}_K{K}_BM{block_m}_BN{block_n}_BK{block_k}_GM{group_m}_SK{split_k}_nW{num_warps}_nS{num_stages}_EU{waves_per_eu}_kP{kpack}_mfma{mfmaInstrSize}"
    matmul_def_str = f"""
def matmul_{configStr}(a, b, c, M, N, K, am, ak, bk, bn, cm, cn, warmup=False):
    grid = triton.cdiv(M, {block_m}) * triton.cdiv(N, {block_n}), {split_k}
    if warmup:
        matmul_kernel_{configStr}.warmup(
            {torch_dtype_a}, {torch_dtype_b}, {torch_dtype_c},
            M, N, K,
            am, ak, bk, bn, cm, cn,
            BLOCK_SIZE_M = {block_m},
            BLOCK_SIZE_N = {block_n},
            BLOCK_SIZE_K = {block_k},
            GROUP_SIZE_M = {group_m},
            SPLIT_K = {split_k},
            num_warps = {num_warps},
            num_stages = {num_stages},
            waves_per_eu = {waves_per_eu},
            matrix_instr_nonkdim = {mfmaInstrSize},
            kpack = {kpack},
            grid=(1,)
        )
        return None
    else:
        matmul_kernel_{configStr}[grid](
            a, b, c,
            M, N, K,
            am, ak, bk, bn, cm, cn,
            BLOCK_SIZE_M = {block_m},
            BLOCK_SIZE_N = {block_n},
            BLOCK_SIZE_K = {block_k},
            GROUP_SIZE_M = {group_m},
            SPLIT_K = {split_k},
            num_warps = {num_warps},
            num_stages = {num_stages},
            waves_per_eu = {waves_per_eu},
            matrix_instr_nonkdim = {mfmaInstrSize},
            kpack = {kpack}
        )
        return c
def try_config_{configStr}(M, N, K, am, ak, bk, bn, cm, cn):
    try:
        matmul_{configStr}(None, None, None, M, N, K, am, ak, bk, bn, cm, cn, True)
        return True
    except Exception as e:
        print(f'invalid config(compilation): {configStr}: ', e, flush=True)
        return False
"""
    return configStr, matmul_def_str
def generated_kernel_name(M, N, K, gpu_id):
    path = os.path.dirname(os.path.abspath(__file__))
    return f"{path}/generated_kernel{M}-{N}-{K}-{gpu_id}.py"
def generate_kernel(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, jobs, iters, run_bench):
    filenames = [generated_kernel_name(M, N, K, i) for i in range(jobs)]
    f_kernel = [open(path, 'w') for path in filenames]
    import_str = """import torch
import triton
import triton.language as tl
import argparse
import sys
import torch.multiprocessing as mp
from tune_gemm import gen_input
"""
    for fi in range(jobs):
        f_kernel[fi].write(import_str + "\n")
    with open(os.path.dirname(os.path.abspath(__file__))+"/matmul_kernel.py") as file:
        matmul_kernel_code = file.read()
    idx = 0
    for config in configs:
        file_idx = idx % jobs
        configStr, matmul_def_str = gen_kernel_and_configStr_from_config(M, N, K, config, dtype_a, dtype_b, dtype_c)
        matmul_kernel_config = matmul_kernel_code.replace("matmul_kernel", f"matmul_kernel_{configStr}")
        matmul_kernel_config = matmul_kernel_config.replace("import triton.language as tl", "")
        matmul_kernel_config = matmul_kernel_config.replace("import triton", "")
        f_kernel[file_idx].write(matmul_kernel_config + "\n\n")
        f_kernel[file_idx].write(matmul_def_str + "\n")
        idx += 1
    test_gemm_pre_str = f"""def test_gemm(M, N, K, num_threads):
    results = []
    config_names = []
    a, a_fp16 = gen_input(M, K, '{dtype_a}', {col_a}, 1, '{init_type}', device='cuda')
    b, b_fp16 = gen_input(K, N, '{dtype_b}', {col_b}, 2, '{init_type}', device='cuda')
    c = torch.zeros((M, N), device=a.device, dtype={tl_to_torch_types[name_to_tl_types[dtype_c]]})
    task_args = (M, N, K,
                 a.stride(0), a.stride(1),
                 b.stride(0), b.stride(1),
                 c.stride(0), c.stride(1))
"""
    for fi in range(jobs):
        f_kernel[fi].write(test_gemm_pre_str + "\n")
    idx = 0
    for config in configs:
        configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
        task_str = f"    results.append(try_config_{configStr}(*task_args))\n"
        task_str += f"    config_names.append('{configStr}')\n"
        f_kernel[idx % jobs].write(task_str)
        idx += 1
    for fi in range(jobs):
        threadpool_str = """
    failed_configs = []
    for i in range(len(results)):
        if not results[i]:
            failed_configs.append(config_names[i])
    with open("{filename}.failed_configs", "w") as f:
        for cfg in failed_configs:
            f.write(cfg + "\\n")
    else:
        try:
            with open("{filename}.failed_configs", "r") as f:
                failed_configs = [cfg.strip() for cfg in f.readlines()]
        except Exception:
            failed_configs = []
        """.format(filename=filenames[fi])
        f_kernel[fi].write(threadpool_str)
    idx = 0
    runs = iters if run_bench else 200
    for config in configs:
        configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
        matmul_call_str = f"""
    if '{configStr}' not in failed_configs:
        for i in range({runs}):
            d = matmul_{configStr}(a, b, c, M, N, K, a.stride(0), a.stride(1), b.stride(0), b.stride(1), c.stride(0), c.stride(1))"""
        f_kernel[idx % jobs].write(matmul_call_str + "\n")
        idx += 1
    for fi in range(jobs):
        f_kernel[fi].write("        return d\n")
    def_main_str = """
def main():
    parser = argparse.ArgumentParser(
        prog="tune a specific gemm size",
        allow_abbrev=False,)
    parser.add_argument("-n", type=int, default=1, help='number of threads')
    args = parser.parse_args()
    numThreads = args.n
    """
    test_gemm_call_str = f'test_gemm({M}, {N}, {K}, numThreads)'
    for fi in range(jobs):
        f_kernel[fi].write(def_main_str)
        f_kernel[fi].write(test_gemm_call_str + "\n\n")
        f_kernel[fi].write("""if __name__ == '__main__':
   sys.exit(main())""")
        f_kernel[fi].close()
def extract_kernel_time(M, N, K, config, df):
    configStr, _ = gen_kernel_and_configStr_from_config(M, N, K, config, None, None, None)
    df = df[df['KernelName'].str.contains(configStr)]
    meanTime = df['DurationNs'].tail(100).mean()
    return config, meanTime
def profile_batch_kernels(rank, world_size, M, N, K, jobs, verbose):
    ngpus = world_size
    gpu_id = rank
    os.environ['ROCR_VISIBLE_DEVICES'] = str(gpu_id)
    jobId = gpu_id
    while jobId < jobs:
        if verbose:
            print(f"profiling {generated_kernel_name(M, N, K, jobId)} on GPU {gpu_id}")
        run_bash_command_wrapper(f"rocprof --stats -o results-{jobId}.csv python {generated_kernel_name(M, N, K, jobId)}", capture=(verbose < 2))
        jobId += ngpus
def tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, run_bench, jobs, iters, skipWarmup, verbose=0, num_threads=16):
    setup(rank, world_size)
    
    if rank == 0:
        generate_kernel(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, configs, jobs, iters, run_bench)
        run_bash_command("rm -rf ~/.triton/cache")
        start_time = datetime.now()
        if not skipWarmup:
            for i in range(jobs):
                run_bash_command(f"python {generated_kernel_name(M, N, K, i)} -n {num_threads}", capture=(verbose < 2))
        compile_end = datetime.now()
        compile_time = compile_end - start_time
        if verbose:
            print(f"compile time: {compile_time}", flush=True)
    dist.barrier()
    profile_batch_kernels(rank, world_size, M, N, K, jobs, verbose)
    dist.barrier()
    if rank == 0:
        profile_end = datetime.now()
        profile_time = profile_end - compile_end
        if verbose:
            print(f"profile time: {profile_time}", flush=True)
        minTime = 1024 * 1024 * 1024
        tasks = []
        idx = 0
        df_prof = [pd.read_csv(f"results-{i}.csv") for i in range(jobs)]
        for config in configs:
            file_idx = idx % jobs
            tasks.append((M, N, K, config, df_prof[file_idx]))
            idx += 1
        for task in tasks:
            config, myTime = extract_kernel_time(*task)
            if myTime:
                min_us = myTime / 1000
                if min_us < minTime:
                    minTime = min_us
                    bestConfig = config
            else:
                min_us = -1
                print(f"invalid config(post processing): SIZE {M} {N} {K}: {config}", flush=True)
        post_end = datetime.now()
        post_time = post_end - profile_end
        if verbose:
            print(f"post processing time: {post_time}", flush=True)
        return minTime, bestConfig, compile_time, profile_time, post_time
    cleanup()
def gen_input(M, N, ty_name, needTrans, seed, init_type, device='cuda'):
    d_type = name_to_tl_types[ty_name]
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    @triton.jit
    def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
        offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
        mask = offsets < n_elements
        input = tl.load(input_ptr + offsets, mask=mask)
        output = input
        tl.store(output_ptr + offsets, output, mask=mask)
    def init_by_size_and_type(size, dtype, init_type):
        if init_type == 'hpl':
            return torch.empty(size, device='cuda', dtype=dtype).uniform_(-0.5, 0.5)
        # This init type has element[i] in row[j] equal to sin(i+j*N)
        elif init_type == 'trig_float':
            M, N = size
            return torch.reshape(torch.arange(0, M*N), (M, N)).sin().to(dtype=dtype, device='cuda')
        elif init_type == 'zeros':
            return torch.zeros(size, dtype=dtype, device='cuda')
        elif init_type == "randn":
            temp = torch.randn(size, dtype=dtype, device='cuda')
            return temp
        else:
            raise ValueError("Bad matrix initialization type.")
    raw_data = init_by_size_and_type((N,M) if needTrans else (M,N), torch.float32, init_type)
    if needTrans:
        raw_data = raw_data.T
    if (d_type == tl.float8e4b8 and TORCH_HAS_FP8E4B8) or \
        (d_type == tl.float8e5b16 and TORCH_HAS_FP8E5B16) or not d_type.is_fp8():
        input = raw_data.to(tl_to_torch_types[d_type])
        input_f16 = input.to(torch.float16)
    else:
        f8_tensor = raw_data.to(torch.int8)
        # keep only two bits of exponent to avoid overflow
        f8_tensor = f8_tensor & 0b00111111
        input = triton.reinterpret(f8_tensor, d_type)
        input_f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
        grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
        n_elements = raw_data.numel()
        copy_kernel[grid](input, input_f16, n_elements, BLOCK_SIZE=1024)
    return input, input_f16
def matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack):
    # Check constraints.
    assert a.shape[1] == b.shape[0], "Incompatible dimensions"
    #assert a.is_contiguous(), "Matrix A must be contiguous"
    #assert b.is_contiguous(), "Matrix B must be contiguous"
    M, K = a.shape
    K, N = b.shape
    # 1D launch kernel where each block gets its own program.
    grid = triton.cdiv(M, block_m) * triton.cdiv(N, block_n), split_k
    matmul_kernel[grid](
        a, b, c,
        M, N, K,
        a.stride(0), a.stride(1),
        b.stride(0), b.stride(1),
        c.stride(0), c.stride(1),
        BLOCK_SIZE_M=block_m,
        BLOCK_SIZE_N=block_n,
        BLOCK_SIZE_K=block_k,
        GROUP_SIZE_M=group_m,
        SPLIT_K=split_k,
        num_warps=num_warps,
        num_stages=num_stages,
        waves_per_eu=waves_per_eu,
        matrix_instr_nonkdim = mfmaInstrSize,
        kpack = kpack
    )
    return c
def test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, config, verbose):
    block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
    torch.manual_seed(0)
    #a = torch.randn((M, K), device='cuda', dtype=datatype)
    #b = torch.randn((K, N), device='cuda', dtype=datatype)
    a, a_fp16 = gen_input(M, K, dtype_a, col_a, 1, init_type, device='cuda')
    b, b_fp16 = gen_input(K, N, dtype_b, col_b, 2, init_type, device='cuda')
    # Allocates output.
    c = torch.zeros((M, N), device=a.device, dtype=tl_to_torch_types[name_to_tl_types[dtype_c]])
    triton_output = matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack)
    torch_output = torch.matmul(a_fp16, b_fp16)
    # print(f"triton_output={triton_output}")
    # print(f"torch_output={torch_output}")
    rtol = 0 if torch.version.hip is None else 1e-2
    atol = 1e-3 if split_k == 1 else 4e-2
    row_a_str = 'N' if col_a else 'T'
    row_b_str = 'N' if col_b else 'T'
    size_str = ''
    if verbose:
        size_str = f'SIZE M: {M}, N: {N}, K: {K}, trans: {row_a_str}{row_b_str}'
    if torch.allclose(triton_output.to(torch.float16), torch_output, atol=atol, rtol=rtol):
        print(f'{size_str} Correct✅')
    else:
        print(f'{size_str} Incorrect❌')
def get_default_tuning_result_filename():
    git_branch_name = run_bash_command("git rev-parse --abbrev-ref HEAD")
    git_branch_name = git_branch_name[0].decode()
    git_commit_hash = run_bash_command("git rev-parse --short HEAD")
    git_commit_hash = git_commit_hash[0].decode()
    dt_string = datetime.now().strftime("%m-%d-%Y-%H:%M:%S")
    defaultName = f"tuning_results_{git_branch_name}@{git_commit_hash}_{dt_string}.yaml"
    return defaultName
def parse_args():
    parser = argparse.ArgumentParser(
        prog="tune a specific gemm size",
        allow_abbrev=False,
    )
    parser.add_argument("-m", type=int, default=0)
    parser.add_argument("-n", type=int, default=0)
    parser.add_argument("-k", type=int, default=0)
    parser.add_argument("-col_a", action='store_true', default=False, help='whether matrix a is column major')
    parser.add_argument("-col_b", action='store_true', default=False, help='whether matrix b is column major')
    parser.add_argument("-dtype_a", type=str, default='fp16', help="matrix a element data type")
    parser.add_argument("-dtype_b", type=str, default='fp16', help="matrix b element data type")
    parser.add_argument("-dtype_c", type=str, default='fp16', help="output element data type")
    parser.add_argument("--ngpus", type=int, default=0, help='number of GPUs used in the profiling step')
    parser.add_argument("--gpu_ids", type=lambda s: [int(id) for id in s.split(',')], default=[], help='list of gpu ids to use for tuning')
    parser.add_argument("--gemm_size_file", type=str, default="", help='yaml file to indicate matrix size')
    parser.add_argument("--o", type=str, default='', help='yaml file to store tuning results')
    parser.add_argument("--keep", action='store_true', default=False, help='keep generated files')
    parser.add_argument("--compare", action='store_true', default=False, help="Whether check result correctness")
    parser.add_argument("--compare_wo_tuning", action='store_true', default=False, help="Whether check result correctness")
    parser.add_argument("--benchmark", action='store_true', default=False, help="Benchmark the given config")
    parser.add_argument("--time_breakdown", action='store_true', default=False, help="Show detailed time breakdown of each step during the tuning")
    parser.add_argument("--verbose", action='store_true', default=False, help="enables time_breakdown and additional logging messages")
    parser.add_argument("--num_threads", type=int, default=16, help="number of threads to use for kernel compilation and post processing")
    parser.add_argument("--jobs", type=int, default=1, help="number of generated files")
    parser.add_argument("--iters", type=int, default=1000, help="number of generated files")
    parser.add_argument("--init_type", type=str, default='randn', help="Initialization type for input matrices (default uniform rand [0, 1.0)])")
    parser.add_argument("--no_warmup", action='store_true', default=False, help="Do not call the warmup kernel")
    args = parser.parse_args()
    if not args.o:
        if args.benchmark:
            args.o = "benchmarking_results.csv"
        else:
            args.o = get_default_tuning_result_filename()
    return args
TORCH_HAS_FP8E5B16 = hasattr(torch, 'float8_e5m2fnuz')
TORCH_HAS_FP8E4B8 = hasattr(torch, 'float8_e4m3fnuz')
tl_to_torch_types = {
    tl.float16: torch.float16,
    tl.bfloat16: torch.bfloat16,
    tl.float32: torch.float32,
    tl.int8: torch.int8,
    tl.int32: torch.int32,
}
if TORCH_HAS_FP8E5B16:
    tl_to_torch_types[tl.float8e5b16] = torch.float8_e5m2fnuz
if TORCH_HAS_FP8E4B8:
    tl_to_torch_types[tl.float8e4b8] = torch.float8_e4m3fnuz
name_to_tl_types = {
    'int8': tl.int8,
    'int32': tl.int32,
    'fp16': tl.float16,
    'fp32': tl.float32,
    'bf16': tl.bfloat16,
    'fp8': tl.float8e4b8,
    'bf8': tl.float8e5b16,
}
def process_item(item):
    M = item['M']
    N = item['N']
    K = item['K']
    col_a = False if item['rowMajorA'] == 'T' else True
    col_b = False if item['rowMajorB'] == 'T' else True
    del item['M']
    del item['N']
    del item['K']
    del item['rowMajorA']
    del item['rowMajorB']
    return M, N, K, col_a, col_b, item
def type_name_to_bytes(ty_name):
    if '32' in ty_name:
        return 4
    if '16' in ty_name:
        return 2
    if '8' in ty_name:
        return 1
    else:
        print(f"Unrecognized input type name {ty_name}")
        sys.exit(1)
def format_output(unformatted):
    if unformatted < 0.0001:
        formatted = "{:.3e}".format(unformatted)
    elif unformatted > 1000:
        formatted = "{:.1f}".format(unformatted)
    else:
        formatted = "{:.2f}".format(unformatted)
    return formatted
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    print(f"Rank {rank}/{world_size} process initialized.")
def cleanup():
    dist.destroy_process_group()
    print("Process group destroyed.")
def main():
    args = parse_args()
    world_size = len(args.gpu_ids) if args.gpu_ids else torch.cuda.device_count()
    print(f"Number of GPUs available: {world_size}")
    matrix_size_file = args.gemm_size_file
    output_file = args.o
    keepTmp = args.keep
    run_bench = args.benchmark
    jobs = args.jobs
    iters = args.iters
    skipWarmup = args.no_warmup
    # Get GPU ids
    ngpus = args.ngpus
    gpu_ids = args.gpu_ids
    if ngpus != 0 and gpu_ids:
        print("--ngpus and --gpu_ids are mutually exclusive options")
        return os.EX_USAGE
    if ngpus == 0 and not gpu_ids:
        ngpus = 1
    if ngpus != 0:
        gpus = list(range(ngpus))
    if gpu_ids:
        gpus = gpu_ids
    if run_bench:
        gpus = [gpus[0]]
        jobs = 1
    # Get element type
    dtype_a = args.dtype_a
    dtype_b = args.dtype_b
    dtype_c = args.dtype_c
    if not dtype_a in name_to_tl_types or not dtype_b in name_to_tl_types or not dtype_c in name_to_tl_types:
        print(f"Unsupported dtype_a {args.dtype_a} or dtype_b {args.dtype_b} or dtype_c {args.dtype_c}")
        print("Supported types: ", list(name_to_tl_types.keys()))
        sys.exit(1)
    mnks = []
    # TODO: make it more robust to get user input
    init_type = args.init_type
    if matrix_size_file == "" or not os.path.isfile(matrix_size_file):
        M = args.m
        N = args.n
        K = args.k
        col_a = args.col_a
        col_b = args.col_b
        mnks = [(M, N, K, col_a, col_b, None)]
    else:
        with open(matrix_size_file) as file:
            matrix_sizes = yaml.safe_load(file)
        for item in matrix_sizes:
            M, N, K, col_a, col_b, item = process_item(item)
            mnks.append((M, N, K, col_a, col_b, item))
    # Check correctness from given configs
    if args.compare_wo_tuning:
        for (M, N, K, col_a, col_b, myConfig) in mnks:
            test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, myConfig, True)
        return
    configs_full = get_full_tuning_space()
    start_time = datetime.now()
    # Append to the output file so that we can save all results into one file
    f_results = open(output_file, 'a')
    if run_bench:
        print(f"Benchmarking gemm with {dtype_a} inputs")
        print("trans     M      N      K    TFLOPS   us")
        f_results.write("trans,M,N,K,TFLOPS,us\n")
    else:
        print(f"Tuning {len(mnks)} gemm sizes starts at: {start_time}", flush=True)
    for (M, N, K, col_a, col_b, myConfig) in mnks:
        start_local_time = datetime.now()
        # Obtain a pruned tuning space according to gemm size
        # If running benchmark, use the provided config
        pruned_configs = [myConfig] if run_bench else prune_configs(M, N, K, configs_full, type_name_to_bytes(dtype_a), type_name_to_bytes(dtype_b))
        row_a_str = 'N' if col_a else 'T'
        row_b_str = 'N' if col_b else 'T'
        size_str = f'SIZE: {M} {N} {K} {row_a_str}{row_b_str}'
        if not run_bench:
            print(f"{size_str} nConfigs: {len(pruned_configs)}", end=" ", flush=True)
        else:
            print(f"{row_a_str}{row_b_str}    {M:5d}  {N:5d}  {K:5d}    ", end="")
            f_results.write(f"{row_a_str}{row_b_str},{M},{N},{K},")
        # The main tuning function for one gemm size
        verbose_level = 0
        if args.time_breakdown:
            verbose_level = 1
        if args.verbose:
            verbose_level = 2
        def tune_gemm(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads):
            return tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads)
        minTime, bestConfig, compile_time, profile_time, post_time = spawn(
            tune_gemm,
            args=(world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose_level, num_threads),
            nprocs=world_size,
            join=True
        )
        # post processing the numbers
        perf_tflops = lambda us: 2 * M * N * K * 1e-12 / (us * 1e-6)
        tri_tflops = perf_tflops(minTime)
        formatted_tflops = format_output(tri_tflops)
        minTime = format_output(minTime)
        if not run_bench:
            print(f'TFLOPS: {formatted_tflops} time(us): {minTime}', end=" ", flush=True)
        bestConfig_compact_str, _ = gen_kernel_and_configStr_from_config(M, N, K, bestConfig, None, None, None)
        if not run_bench:
            print(f'best_config: {bestConfig_compact_str}', end=" ", flush=True)
        # write best config to tuning_results.yaml
        if run_bench:
            print(f"{formatted_tflops}     {minTime}")
            f_results.write(f"{formatted_tflops},{minTime}\n")
        sizeDict = {'M': M, 'N': N, 'K': K, 'rowMajorA': row_a_str, 'rowMajorB': row_b_str}
        sizeDict.update(bestConfig)
        if not run_bench:
            f_results.write("- " + str(sizeDict) + " ")
            f_results.write(f'# TFLOPS: {formatted_tflops} time(us): {minTime}\n')
        # remove generated files if asked to
        if not keepTmp:
            for i in range(jobs):
                generated_script = generated_kernel_name(M, N, K, i)
                os.remove(generated_script)
                if not skipWarmup:
                    os.remove(generated_script + ".failed_configs")
                for f in glob.glob(f"results-{i}.*"):
                    os.remove(f)
        # Check correctness if asked to
        if args.compare:
            print("correctness: ", end=" ", flush=True)
            test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, bestConfig, False)
        elif not run_bench:
            print("", flush=True)
        end_local_time = datetime.now()
        if not run_bench:
            print(f">>> Elapsed time: {end_local_time - start_local_time} = {compile_time} (compile) + {profile_time} (profile) + {post_time} (post processing)", flush=True)
    f_results.close()
    end_time = datetime.now()
    tuning_time = end_time - start_time
    if not run_bench:
        print(f"Tuning ends at: {end_time}")
        print(f"Total tuning time (h:m:s): {tuning_time}")
if __name__ == '__main__':
    main()
 | 
	@triton.jit
    def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):
        offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
        mask = offsets < n_elements
        input = tl.load(input_ptr + offsets, mask=mask)
        output = input
        tl.store(output_ptr + offsets, output, mask=mask)
    def init_by_size_and_type(size, dtype, init_type):
        if init_type == 'hpl':
            return torch.empty(size, device='cuda', dtype=dtype).uniform_(-0.5, 0.5)
        # This init type has element[i] in row[j] equal to sin(i+j*N)
        elif init_type == 'trig_float':
            M, N = size
            return torch.reshape(torch.arange(0, M*N), (M, N)).sin().to(dtype=dtype, device='cuda')
        elif init_type == 'zeros':
            return torch.zeros(size, dtype=dtype, device='cuda')
        elif init_type == "randn":
            temp = torch.randn(size, dtype=dtype, device='cuda')
            return temp
        else:
            raise ValueError("Bad matrix initialization type.")
    raw_data = init_by_size_and_type((N,M) if needTrans else (M,N), torch.float32, init_type)
    if needTrans:
        raw_data = raw_data.T
    if (d_type == tl.float8e4b8 and TORCH_HAS_FP8E4B8) or \
        (d_type == tl.float8e5b16 and TORCH_HAS_FP8E5B16) or not d_type.is_fp8():
        input = raw_data.to(tl_to_torch_types[d_type])
        input_f16 = input.to(torch.float16)
    else:
        f8_tensor = raw_data.to(torch.int8)
        # keep only two bits of exponent to avoid overflow
        f8_tensor = f8_tensor & 0b00111111
        input = triton.reinterpret(f8_tensor, d_type)
        input_f16 = torch.empty_like(f8_tensor, dtype=torch.float16)
        grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
        n_elements = raw_data.numel()
        copy_kernel[grid](input, input_f16, n_elements, BLOCK_SIZE=1024)
    return input, input_f16
def matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack):
    # Check constraints.
    assert a.shape[1] == b.shape[0], "Incompatible dimensions"
    #assert a.is_contiguous(), "Matrix A must be contiguous"
    #assert b.is_contiguous(), "Matrix B must be contiguous"
    M, K = a.shape
    K, N = b.shape
    # 1D launch kernel where each block gets its own program.
    grid = triton.cdiv(M, block_m) * triton.cdiv(N, block_n), split_k
    matmul_kernel[grid](
        a, b, c,
        M, N, K,
        a.stride(0), a.stride(1),
        b.stride(0), b.stride(1),
        c.stride(0), c.stride(1),
        BLOCK_SIZE_M=block_m,
        BLOCK_SIZE_N=block_n,
        BLOCK_SIZE_K=block_k,
        GROUP_SIZE_M=group_m,
        SPLIT_K=split_k,
        num_warps=num_warps,
        num_stages=num_stages,
        waves_per_eu=waves_per_eu,
        matrix_instr_nonkdim = mfmaInstrSize,
        kpack = kpack
    )
    return c
def test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, config, verbose):
    block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack = read_config(config)
    torch.manual_seed(0)
    #a = torch.randn((M, K), device='cuda', dtype=datatype)
    #b = torch.randn((K, N), device='cuda', dtype=datatype)
    a, a_fp16 = gen_input(M, K, dtype_a, col_a, 1, init_type, device='cuda')
    b, b_fp16 = gen_input(K, N, dtype_b, col_b, 2, init_type, device='cuda')
    # Allocates output.
    c = torch.zeros((M, N), device=a.device, dtype=tl_to_torch_types[name_to_tl_types[dtype_c]])
    triton_output = matmul(a, b, c, block_m, block_n, block_k, group_m, split_k, num_warps, num_stages, waves_per_eu, mfmaInstrSize, kpack)
    torch_output = torch.matmul(a_fp16, b_fp16)
    # print(f"triton_output={triton_output}")
    # print(f"torch_output={torch_output}")
    rtol = 0 if torch.version.hip is None else 1e-2
    atol = 1e-3 if split_k == 1 else 4e-2
    row_a_str = 'N' if col_a else 'T'
    row_b_str = 'N' if col_b else 'T'
    size_str = ''
    if verbose:
        size_str = f'SIZE M: {M}, N: {N}, K: {K}, trans: {row_a_str}{row_b_str}'
    if torch.allclose(triton_output.to(torch.float16), torch_output, atol=atol, rtol=rtol):
        print(f'{size_str} Correct✅')
    else:
        print(f'{size_str} Incorrect❌')
def get_default_tuning_result_filename():
    git_branch_name = run_bash_command("git rev-parse --abbrev-ref HEAD")
    git_branch_name = git_branch_name[0].decode()
    git_commit_hash = run_bash_command("git rev-parse --short HEAD")
    git_commit_hash = git_commit_hash[0].decode()
    dt_string = datetime.now().strftime("%m-%d-%Y-%H:%M:%S")
    defaultName = f"tuning_results_{git_branch_name}@{git_commit_hash}_{dt_string}.yaml"
    return defaultName
def parse_args():
    parser = argparse.ArgumentParser(
        prog="tune a specific gemm size",
        allow_abbrev=False,
    )
    parser.add_argument("-m", type=int, default=0)
    parser.add_argument("-n", type=int, default=0)
    parser.add_argument("-k", type=int, default=0)
    parser.add_argument("-col_a", action='store_true', default=False, help='whether matrix a is column major')
    parser.add_argument("-col_b", action='store_true', default=False, help='whether matrix b is column major')
    parser.add_argument("-dtype_a", type=str, default='fp16', help="matrix a element data type")
    parser.add_argument("-dtype_b", type=str, default='fp16', help="matrix b element data type")
    parser.add_argument("-dtype_c", type=str, default='fp16', help="output element data type")
    parser.add_argument("--ngpus", type=int, default=0, help='number of GPUs used in the profiling step')
    parser.add_argument("--gpu_ids", type=lambda s: [int(id) for id in s.split(',')], default=[], help='list of gpu ids to use for tuning')
    parser.add_argument("--gemm_size_file", type=str, default="", help='yaml file to indicate matrix size')
    parser.add_argument("--o", type=str, default='', help='yaml file to store tuning results')
    parser.add_argument("--keep", action='store_true', default=False, help='keep generated files')
    parser.add_argument("--compare", action='store_true', default=False, help="Whether check result correctness")
    parser.add_argument("--compare_wo_tuning", action='store_true', default=False, help="Whether check result correctness")
    parser.add_argument("--benchmark", action='store_true', default=False, help="Benchmark the given config")
    parser.add_argument("--time_breakdown", action='store_true', default=False, help="Show detailed time breakdown of each step during the tuning")
    parser.add_argument("--verbose", action='store_true', default=False, help="enables time_breakdown and additional logging messages")
    parser.add_argument("--num_threads", type=int, default=16, help="number of threads to use for kernel compilation and post processing")
    parser.add_argument("--jobs", type=int, default=1, help="number of generated files")
    parser.add_argument("--iters", type=int, default=1000, help="number of generated files")
    parser.add_argument("--init_type", type=str, default='randn', help="Initialization type for input matrices (default uniform rand [0, 1.0)])")
    parser.add_argument("--no_warmup", action='store_true', default=False, help="Do not call the warmup kernel")
    args = parser.parse_args()
    if not args.o:
        if args.benchmark:
            args.o = "benchmarking_results.csv"
        else:
            args.o = get_default_tuning_result_filename()
    return args
TORCH_HAS_FP8E5B16 = hasattr(torch, 'float8_e5m2fnuz')
TORCH_HAS_FP8E4B8 = hasattr(torch, 'float8_e4m3fnuz')
tl_to_torch_types = {
    tl.float16: torch.float16,
    tl.bfloat16: torch.bfloat16,
    tl.float32: torch.float32,
    tl.int8: torch.int8,
    tl.int32: torch.int32,
}
if TORCH_HAS_FP8E5B16:
    tl_to_torch_types[tl.float8e5b16] = torch.float8_e5m2fnuz
if TORCH_HAS_FP8E4B8:
    tl_to_torch_types[tl.float8e4b8] = torch.float8_e4m3fnuz
name_to_tl_types = {
    'int8': tl.int8,
    'int32': tl.int32,
    'fp16': tl.float16,
    'fp32': tl.float32,
    'bf16': tl.bfloat16,
    'fp8': tl.float8e4b8,
    'bf8': tl.float8e5b16,
}
def process_item(item):
    M = item['M']
    N = item['N']
    K = item['K']
    col_a = False if item['rowMajorA'] == 'T' else True
    col_b = False if item['rowMajorB'] == 'T' else True
    del item['M']
    del item['N']
    del item['K']
    del item['rowMajorA']
    del item['rowMajorB']
    return M, N, K, col_a, col_b, item
def type_name_to_bytes(ty_name):
    if '32' in ty_name:
        return 4
    if '16' in ty_name:
        return 2
    if '8' in ty_name:
        return 1
    else:
        print(f"Unrecognized input type name {ty_name}")
        sys.exit(1)
def format_output(unformatted):
    if unformatted < 0.0001:
        formatted = "{:.3e}".format(unformatted)
    elif unformatted > 1000:
        formatted = "{:.1f}".format(unformatted)
    else:
        formatted = "{:.2f}".format(unformatted)
    return formatted
def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("nccl", rank=rank, world_size=world_size)
    print(f"Rank {rank}/{world_size} process initialized.")
def cleanup():
    dist.destroy_process_group()
    print("Process group destroyed.")
def main():
    args = parse_args()
    world_size = len(args.gpu_ids) if args.gpu_ids else torch.cuda.device_count()
    print(f"Number of GPUs available: {world_size}")
    matrix_size_file = args.gemm_size_file
    output_file = args.o
    keepTmp = args.keep
    run_bench = args.benchmark
    jobs = args.jobs
    iters = args.iters
    skipWarmup = args.no_warmup
    # Get GPU ids
    ngpus = args.ngpus
    gpu_ids = args.gpu_ids
    if ngpus != 0 and gpu_ids:
        print("--ngpus and --gpu_ids are mutually exclusive options")
        return os.EX_USAGE
    if ngpus == 0 and not gpu_ids:
        ngpus = 1
    if ngpus != 0:
        gpus = list(range(ngpus))
    if gpu_ids:
        gpus = gpu_ids
    if run_bench:
        gpus = [gpus[0]]
        jobs = 1
    # Get element type
    dtype_a = args.dtype_a
    dtype_b = args.dtype_b
    dtype_c = args.dtype_c
    if not dtype_a in name_to_tl_types or not dtype_b in name_to_tl_types or not dtype_c in name_to_tl_types:
        print(f"Unsupported dtype_a {args.dtype_a} or dtype_b {args.dtype_b} or dtype_c {args.dtype_c}")
        print("Supported types: ", list(name_to_tl_types.keys()))
        sys.exit(1)
    mnks = []
    # TODO: make it more robust to get user input
    init_type = args.init_type
    if matrix_size_file == "" or not os.path.isfile(matrix_size_file):
        M = args.m
        N = args.n
        K = args.k
        col_a = args.col_a
        col_b = args.col_b
        mnks = [(M, N, K, col_a, col_b, None)]
    else:
        with open(matrix_size_file) as file:
            matrix_sizes = yaml.safe_load(file)
        for item in matrix_sizes:
            M, N, K, col_a, col_b, item = process_item(item)
            mnks.append((M, N, K, col_a, col_b, item))
    # Check correctness from given configs
    if args.compare_wo_tuning:
        for (M, N, K, col_a, col_b, myConfig) in mnks:
            test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, myConfig, True)
        return
    configs_full = get_full_tuning_space()
    start_time = datetime.now()
    # Append to the output file so that we can save all results into one file
    f_results = open(output_file, 'a')
    if run_bench:
        print(f"Benchmarking gemm with {dtype_a} inputs")
        print("trans     M      N      K    TFLOPS   us")
        f_results.write("trans,M,N,K,TFLOPS,us\n")
    else:
        print(f"Tuning {len(mnks)} gemm sizes starts at: {start_time}", flush=True)
    for (M, N, K, col_a, col_b, myConfig) in mnks:
        start_local_time = datetime.now()
        # Obtain a pruned tuning space according to gemm size
        # If running benchmark, use the provided config
        pruned_configs = [myConfig] if run_bench else prune_configs(M, N, K, configs_full, type_name_to_bytes(dtype_a), type_name_to_bytes(dtype_b))
        row_a_str = 'N' if col_a else 'T'
        row_b_str = 'N' if col_b else 'T'
        size_str = f'SIZE: {M} {N} {K} {row_a_str}{row_b_str}'
        if not run_bench:
            print(f"{size_str} nConfigs: {len(pruned_configs)}", end=" ", flush=True)
        else:
            print(f"{row_a_str}{row_b_str}    {M:5d}  {N:5d}  {K:5d}    ", end="")
            f_results.write(f"{row_a_str}{row_b_str},{M},{N},{K},")
        # The main tuning function for one gemm size
        verbose_level = 0
        if args.time_breakdown:
            verbose_level = 1
        if args.verbose:
            verbose_level = 2
        def tune_gemm(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads):
            return tune_gemm_config(rank, world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose, num_threads)
        minTime, bestConfig, compile_time, profile_time, post_time = spawn(
            tune_gemm,
            args=(world_size, M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, pruned_configs, run_bench, jobs, iters, skipWarmup, verbose_level, num_threads),
            nprocs=world_size,
            join=True
        )
        # post processing the numbers
        perf_tflops = lambda us: 2 * M * N * K * 1e-12 / (us * 1e-6)
        tri_tflops = perf_tflops(minTime)
        formatted_tflops = format_output(tri_tflops)
        minTime = format_output(minTime)
        if not run_bench:
            print(f'TFLOPS: {formatted_tflops} time(us): {minTime}', end=" ", flush=True)
        bestConfig_compact_str, _ = gen_kernel_and_configStr_from_config(M, N, K, bestConfig, None, None, None)
        if not run_bench:
            print(f'best_config: {bestConfig_compact_str}', end=" ", flush=True)
        # write best config to tuning_results.yaml
        if run_bench:
            print(f"{formatted_tflops}     {minTime}")
            f_results.write(f"{formatted_tflops},{minTime}\n")
        sizeDict = {'M': M, 'N': N, 'K': K, 'rowMajorA': row_a_str, 'rowMajorB': row_b_str}
        sizeDict.update(bestConfig)
        if not run_bench:
            f_results.write("- " + str(sizeDict) + " ")
            f_results.write(f'# TFLOPS: {formatted_tflops} time(us): {minTime}\n')
        # remove generated files if asked to
        if not keepTmp:
            for i in range(jobs):
                generated_script = generated_kernel_name(M, N, K, i)
                os.remove(generated_script)
                if not skipWarmup:
                    os.remove(generated_script + ".failed_configs")
                for f in glob.glob(f"results-{i}.*"):
                    os.remove(f)
        # Check correctness if asked to
        if args.compare:
            print("correctness: ", end=" ", flush=True)
            test_correctness(M, N, K, col_a, col_b, dtype_a, dtype_b, dtype_c, init_type, bestConfig, False)
        elif not run_bench:
            print("", flush=True)
        end_local_time = datetime.now()
        if not run_bench:
            print(f">>> Elapsed time: {end_local_time - start_local_time} = {compile_time} (compile) + {profile_time} (profile) + {post_time} (post processing)", flush=True)
    f_results.close()
    end_time = datetime.now()
    tuning_time = end_time - start_time
    if not run_bench:
        print(f"Tuning ends at: {end_time}")
        print(f"Total tuning time (h:m:s): {tuning_time}")
if __name__ == '__main__':
    main()
 | 
| 
	phlippe/liger_kernels | 
	liger_kernels/utils.py | 
	https://github.com/phlippe/liger_kernels/blob/0abb152b752e66e1c3e0c78a7eb56daea9a07f42/liger_kernels/utils.py | 
	import jax
import numpy as np
import triton
import triton.language as tl
@triton.jit
def element_mul_kernel(
    _,  # alias for X_ptr
    grad_output_ptr,
    X_ptr,
    X_stride,
    n_cols,
    BLOCK_SIZE: tl.constexpr,
):
    """
    This function multiplies each element of the tensor pointed by X_ptr with the value pointed by grad_output_ptr.
    The multiplication is performed in-place on the tensor pointed by X_ptr.
    Parameters:
    X_ptr: Pointer to the input tensor.
    X_stride (int): The stride of the input tensor.
    grad_output_ptr: Pointer to the gradient output value.
    n_cols (int): The number of columns in the input tensor.
    BLOCK_SIZE (int): The block size for Triton operations.
    """
    # Get the program ID and convert it to int64 to avoid overflow
    program_id = tl.program_id(0).to(tl.int64)
    # Locate the start index
    X_ptr += program_id * X_stride
    # Load the gradient output value
    grad_output = tl.load(grad_output_ptr)
    # Perform the element-wise multiplication
    for i in range(0, n_cols, BLOCK_SIZE):
        X_offsets = i + tl.arange(0, BLOCK_SIZE)
        X_block = tl.load(X_ptr + X_offsets, mask=X_offsets < n_cols)
        tl.store(X_ptr + X_offsets, X_block * grad_output, mask=X_offsets < n_cols)
def get_stride(array: jax.Array | jax.ShapeDtypeStruct, axis: int) -> int:
    """
    Returns the stride of a JAX array at a given axis.
    To calculate all strides, use get_strides.
    Args:
        array: JAX array or shape-dtype struct.
        axis: The axis at which to calculate the stride.
    Returns:
        The stride of the array at the given axis.
    """
    if axis < 0:
        axis += len(array.shape)
    shape = array.shape
    size = array.size
    stride = size // np.prod(shape[: axis + 1])
    return int(stride)
 | 
	@triton.jit
def element_mul_kernel(
    _,  # alias for X_ptr
    grad_output_ptr,
    X_ptr,
    X_stride,
    n_cols,
    BLOCK_SIZE: tl.constexpr,
):
    """
    This function multiplies each element of the tensor pointed by X_ptr with the value pointed by grad_output_ptr.
    The multiplication is performed in-place on the tensor pointed by X_ptr.
    Parameters:
    X_ptr: Pointer to the input tensor.
    X_stride (int): The stride of the input tensor.
    grad_output_ptr: Pointer to the gradient output value.
    n_cols (int): The number of columns in the input tensor.
    BLOCK_SIZE (int): The block size for Triton operations.
    """
    # Get the program ID and convert it to int64 to avoid overflow
    program_id = tl.program_id(0).to(tl.int64)
    # Locate the start index
    X_ptr += program_id * X_stride
    # Load the gradient output value
    grad_output = tl.load(grad_output_ptr)
    # Perform the element-wise multiplication
    for i in range(0, n_cols, BLOCK_SIZE):
        X_offsets = i + tl.arange(0, BLOCK_SIZE)
        X_block = tl.load(X_ptr + X_offsets, mask=X_offsets < n_cols)
        tl.store(X_ptr + X_offsets, X_block * grad_output, mask=X_offsets < n_cols)
def get_stride(array: jax.Array | jax.ShapeDtypeStruct, axis: int) -> int:
    """
    Returns the stride of a JAX array at a given axis.
    To calculate all strides, use get_strides.
    Args:
        array: JAX array or shape-dtype struct.
        axis: The axis at which to calculate the stride.
    Returns:
        The stride of the array at the given axis.
    """
    if axis < 0:
        axis += len(array.shape)
    shape = array.shape
    size = array.size
    stride = size // np.prod(shape[: axis + 1])
    return int(stride)
 | 
| 
	yifuwang/symm-mem-recipes | 
	triton_utils.py | 
	https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py | 
	import triton
import triton.language as tl
@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
	@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
 | 
| 
	yifuwang/symm-mem-recipes | 
	triton_utils.py | 
	https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py | 
	import triton
import triton.language as tl
@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
	@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
 | 
| 
	yifuwang/symm-mem-recipes | 
	triton_utils.py | 
	https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py | 
	import triton
import triton.language as tl
@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
	@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
 | 
| 
	yifuwang/symm-mem-recipes | 
	triton_utils.py | 
	https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py | 
	import triton
import triton.language as tl
@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
	@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
 | 
| 
	yifuwang/symm-mem-recipes | 
	triton_utils.py | 
	https://github.com/yifuwang/symm-mem-recipes/blob/8ee5d5b8f53efb9c051e6cdf0ca62270c2b43c34/triton_utils.py | 
	import triton
import triton.language as tl
@triton.jit
def get_tid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %tid.x;
        mov.u32 $1, %tid.y;
        mov.u32 $2, %tid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_ntid():
    return tl.inline_asm_elementwise(
        """
        mov.u32 $0, %ntid.x;
        mov.u32 $1, %ntid.y;
        mov.u32 $2, %ntid.z;
        """,
        "=r,=r,=r",
        [],
        dtype=(tl.uint32, tl.uint32, tl.uint32),
        is_pure=True,
        pack=1,
    )
@triton.jit
def get_flat_tid():
    tid_x, tid_y, tid_z = get_tid()
    ntid_x, ntid_y, _ = get_ntid()
    return tid_z * ntid_y * ntid_x + tid_y * ntid_x + tid_x
@triton.jit
def get_flat_bid():
    return (
        tl.program_id(2) * tl.num_programs(1) * tl.num_programs(0)
        + tl.program_id(1) * tl.num_programs(0)
        + tl.program_id(0)
    )
@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
	@triton.jit
def sync_threads():
    tl.inline_asm_elementwise(
        "bar.sync 0;", "=r", [], dtype=tl.int32, is_pure=False, pack=1
    )
 | 
| 
	Terapines/AI-Benchmark | 
	src/triton/resize.py | 
	https://github.com/Terapines/AI-Benchmark/blob/0ae8cd849a833d4c35a4b25b722ce98c5af2fe34/src/triton/resize.py | 
	import torch
import triton
import triton.language as tl
import os
USE_GPU = False
triton.runtime.driver.set_active_to_cpu()
def get_resize_kernel_autotune_config():
    configs = [
        triton.Config({'BLOCK_SIZE_W': 1}),
        triton.Config({'BLOCK_SIZE_W': 2}),
        triton.Config({'BLOCK_SIZE_W': 4}),
        triton.Config({'BLOCK_SIZE_W': 8}),
        triton.Config({'BLOCK_SIZE_W': 16}),
        triton.Config({'BLOCK_SIZE_W': 32}),
        triton.Config({'BLOCK_SIZE_W': 64}),
        triton.Config({'BLOCK_SIZE_W': 128}),
    ]
    if(os.getenv("ENABLE_AUTOTUNING") == "resize_kernel"):
      assert (len(configs) > 1), "Autotuning config size need be larger than 1"
      return configs
    return [triton.Config({'BLOCK_SIZE_W': 32})]
@triton.autotune(
    configs=get_resize_kernel_autotune_config(),
    key=[],
)
@triton.jit
def resize_kernel(
    src_ptr,
    out_ptr,
    channel,
    height,
    width,
    BLOCK_SIZE_W: tl.constexpr,
):
    pid_h = tl.program_id(axis=0)
    pid_c = tl.program_id(axis=1)
    dst_height = 2 * height  # 2x upsample
    dst_width = 2 * width
    hw_fl = 7
    h_idx = pid_h
    input_y = h_idx << (hw_fl - 1)
    y0 = input_y >> hw_fl
    h1_lambda = input_y - (y0 << hw_fl)
    factor = 1 << hw_fl
    h0_lambda = factor - h1_lambda
    y1 = tl.minimum(y0 + 1, height - 1)
    src_offset = pid_c * height * width
    src_ptrs0 = src_ptr + src_offset + y0 * width
    src_ptrs1 = src_ptr + src_offset + y1 * width
    out_ptrs =  out_ptr + (pid_c * dst_height * dst_width + h_idx * dst_width)
    for off in range(0, width * 2, BLOCK_SIZE_W):
        w_idx = off + tl.arange(0, BLOCK_SIZE_W) # [1, BLOCK_SIZE_W]
        mask = (w_idx < dst_width)
        input_x = w_idx << (hw_fl - 1)
        x0 = input_x >> hw_fl
        y0x0 = tl.load(src_ptrs0 + x0, mask=mask, other=0).to(tl.int16)
        y1x0 = tl.load(src_ptrs1 + x0, mask=mask, other=0).to(tl.int16)
        x1 = tl.minimum(x0 + 1, width - 1)
        y0x1 = tl.load(src_ptrs0 + x1, mask=mask, other=0).to(tl.int16)
        y1x1 = tl.load(src_ptrs1 + x1, mask=mask, other=0).to(tl.int16)
        w1_lambda = input_x - (x0 << hw_fl)
        w0_lambda = factor - w1_lambda
        sum1 = (y0x0 * w0_lambda + y0x1 * w1_lambda) >> hw_fl
        sum2 = (y1x0 * w0_lambda + y1x1 * w1_lambda) >> hw_fl
        sum = (sum1 * h0_lambda + sum2 * h1_lambda) >> hw_fl
        sum = sum.to(tl.int8)
        tl.store(out_ptrs + w_idx, sum, mask=mask)
def resize(src_arr, out_arr):
    src_arr = src_arr.contiguous()
    out_arr = out_arr.contiguous()
    # Get dimensions
    channel, height, width = src_arr.shape
    # BLOCK_H = 32
    # BLOCK_W = 32
    # Compute grid dimensions
    grid = lambda meta: (height * 2, channel, 1)
    # Launch the Triton kernel
    resize_kernel[grid](
        src_arr, out_arr, channel, height, width
    )
C, H, W = 3, 512, 512
src = torch.ones((C, H, W), dtype=torch.int8, device='cpu')
out = torch.empty((C, 2 * H, 2 * W), dtype=torch.int8, device='cpu')
resize(src, out)
# print(src)
# print(out)
 | 
	@triton.jit
def resize_kernel(
    src_ptr,
    out_ptr,
    channel,
    height,
    width,
    BLOCK_SIZE_W: tl.constexpr,
):
    pid_h = tl.program_id(axis=0)
    pid_c = tl.program_id(axis=1)
    dst_height = 2 * height  # 2x upsample
    dst_width = 2 * width
    hw_fl = 7
    h_idx = pid_h
    input_y = h_idx << (hw_fl - 1)
    y0 = input_y >> hw_fl
    h1_lambda = input_y - (y0 << hw_fl)
    factor = 1 << hw_fl
    h0_lambda = factor - h1_lambda
    y1 = tl.minimum(y0 + 1, height - 1)
    src_offset = pid_c * height * width
    src_ptrs0 = src_ptr + src_offset + y0 * width
    src_ptrs1 = src_ptr + src_offset + y1 * width
    out_ptrs =  out_ptr + (pid_c * dst_height * dst_width + h_idx * dst_width)
    for off in range(0, width * 2, BLOCK_SIZE_W):
        w_idx = off + tl.arange(0, BLOCK_SIZE_W) # [1, BLOCK_SIZE_W]
        mask = (w_idx < dst_width)
        input_x = w_idx << (hw_fl - 1)
        x0 = input_x >> hw_fl
        y0x0 = tl.load(src_ptrs0 + x0, mask=mask, other=0).to(tl.int16)
        y1x0 = tl.load(src_ptrs1 + x0, mask=mask, other=0).to(tl.int16)
        x1 = tl.minimum(x0 + 1, width - 1)
        y0x1 = tl.load(src_ptrs0 + x1, mask=mask, other=0).to(tl.int16)
        y1x1 = tl.load(src_ptrs1 + x1, mask=mask, other=0).to(tl.int16)
        w1_lambda = input_x - (x0 << hw_fl)
        w0_lambda = factor - w1_lambda
        sum1 = (y0x0 * w0_lambda + y0x1 * w1_lambda) >> hw_fl
        sum2 = (y1x0 * w0_lambda + y1x1 * w1_lambda) >> hw_fl
        sum = (sum1 * h0_lambda + sum2 * h1_lambda) >> hw_fl
        sum = sum.to(tl.int8)
        tl.store(out_ptrs + w_idx, sum, mask=mask)
def resize(src_arr, out_arr):
    src_arr = src_arr.contiguous()
    out_arr = out_arr.contiguous()
    # Get dimensions
    channel, height, width = src_arr.shape
    # BLOCK_H = 32
    # BLOCK_W = 32
    # Compute grid dimensions
    grid = lambda meta: (height * 2, channel, 1)
    # Launch the Triton kernel
    resize_kernel[grid](
        src_arr, out_arr, channel, height, width
    )
C, H, W = 3, 512, 512
src = torch.ones((C, H, W), dtype=torch.int8, device='cpu')
out = torch.empty((C, 2 * H, 2 * W), dtype=torch.int8, device='cpu')
resize(src, out)
# print(src)
# print(out)
 | 
| 
	khulnasoft/divest | 
	divest/kernels/swiglu.py | 
	https://github.com/khulnasoft/divest/blob/53b878ed6cf9f8e172a496bf26a2b22ff3a30a51/divest/kernels/swiglu.py | 
	import triton
import triton.language as tl
import torch
from .utils import calculate_settings
@triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,):
    block_idx = tl.program_id(0)
    offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask = offsets < n_elements
    e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
    g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
    # f = e * sigmoid(e)
    f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
    f_row = f_row.to(g_row.dtype) # Exact copy from HF
    # h = f * g
    h_row = f_row * g_row
    # Store h
    tl.store(h + offsets, h_row, mask = mask)
pass
def swiglu_fg_kernel(e, g):
    batch, seq_len, hd = e.shape
    n_elements = e.numel()
    h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda")
    grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
    _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,)
    return h
pass
@triton.jit
def _DWf_DW_dfg_kernel(DW, e, g, n_elements, BLOCK_SIZE : tl.constexpr,):
    """
    e = e.float()
    se = 1.0 / (1.0 + torch.exp(-e))
    f = (se * e).to(dtype)
    h = f * g
    df = DW * f
    dg = DW * g
    de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
    """
    block_idx = tl.program_id(0)
    offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask = offsets < n_elements
    DW_row = tl.load(DW + offsets, mask = mask, other = 0)#.to(tl.float32)
    e_row  = tl.load(e  + offsets, mask = mask, other = 0).to(tl.float32)
    g_row  = tl.load(g  + offsets, mask = mask, other = 0)#.to(tl.float32)
    # e = e.float()
    # se = 1.0 / (1.0 + torch.exp(-e))
    se_row = tl.sigmoid(e_row) # 1.0 / (1.0 + tl.exp(-e_row))
    # f = (se * e).to(dtype)
    f_row = se_row * e_row
    f_row = f_row.to(DW_row.dtype)
    # h = f * g
    h_row  =  f_row * g_row
    # df = DW * f
    df_row = DW_row * f_row
    # dg = DW * g
    dg_row = DW_row * g_row
    # de = (dg.float() * se * (1.0 + e * (1.0 - se))).to(dtype)
    de_row = dg_row.to(tl.float32) * se_row * (1.0 + e_row * (1.0 - se_row))
    de_row = de_row.to(DW_row.dtype)
    # Store derivatives in buffers
    tl.store(DW + offsets, h_row,  mask = mask) # h  = f * g
    tl.store(e  + offsets, df_row, mask = mask) # df = DW * f
    tl.store(g  + offsets, de_row, mask = mask) # de
pass
def swiglu_DWf_DW_dfg_kernel(DW, e, g):
    batch_seq_len, hd = e.shape
    n_elements = e.numel()
    grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
    _DWf_DW_dfg_kernel[grid](DW, e, g, n_elements, BLOCK_SIZE = 1024,)
    return DW, e, g
pass | 
	@triton.jit
def _fg_kernel(e, g, h, n_elements, BLOCK_SIZE : tl.constexpr,):
    block_idx = tl.program_id(0)
    offsets = block_idx*BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
    mask = offsets < n_elements
    e_row = tl.load(e + offsets, mask = mask, other = 0).to(tl.float32)
    g_row = tl.load(g + offsets, mask = mask, other = 0)#.to(tl.float32)
    # f = e * sigmoid(e)
    f_row = e_row * tl.sigmoid(e_row) # e_row / (1 + tl.exp(-e_row))
    f_row = f_row.to(g_row.dtype) # Exact copy from HF
    # h = f * g
    h_row = f_row * g_row
    # Store h
    tl.store(h + offsets, h_row, mask = mask)
pass
def swiglu_fg_kernel(e, g):
    batch, seq_len, hd = e.shape
    n_elements = e.numel()
    h = torch.empty((batch, seq_len, hd), dtype = e.dtype, device = "cuda")
    grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)
    _fg_kernel[grid](e, g, h, n_elements, BLOCK_SIZE = 1024,)
    return h
pass
 | 
End of preview. Expand
						in Data Studio
					
README.md exists but content is empty.
								
- Downloads last month
- 10