repository
stringclasses
166 values
file_path
stringlengths
6
125
url
stringlengths
89
210
code
stringlengths
413
290k
chunk
stringlengths
56
175k
BobMcDear/attorch
attorch/act_kernels.py
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/act_kernels.py
""" Kernels for activation functions with fused dropout. """ import triton import triton.language as tl from .dropout_kernels import apply_dropout, apply_dropout_grad from .utils import element_wise_kernel_configs @triton.jit def sigmoid(input): """ Applies sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by sigmoid. """ return (1 / (1 + tl.exp(-input))) @triton.jit def sigmoid_grad(input): """ Calculates the gradient of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of sigmoid. """ output_sigmoid = sigmoid(input) return output_sigmoid * (1 - output_sigmoid) @triton.jit def logsigmoid(input): """ Applies the log of sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by the log of sigmoid. """ return tl.log(sigmoid(input)) @triton.jit def logsigmoid_grad(input): """ Calculates the gradient of the log of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of the log of sigmoid. """ return (1 / (1 + tl.exp(input))) @triton.jit def tanh(input): """ Applies tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh. """ return 2 * sigmoid(2 * input) - 1 @triton.jit def tanh_grad(input): """ Calculates the gradient of tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh. """ output_tanh = tanh(input) return 1 - output_tanh * output_tanh @triton.jit def relu(input): """ Applies ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU. """ return tl.maximum(0, input) @triton.jit def relu_grad(input): """ Calculates the gradient of ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU. """ return tl.where(input <= 0, 0, 1) @triton.jit def gelu(input): """ Applies GELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) return cdf * input @triton.jit def gelu_grad(input): """ Calculates the gradient of GELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) cdf_grad = 0.39894228 * tl.exp(-0.5 * input * input) return (cdf_grad * input + cdf) @triton.jit def silu(input): """ Applies SiLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SiLU. """ return (input * sigmoid(input)) @triton.jit def silu_grad(input): """ Calculates the gradient of SiLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SiLU. """ output_sigmoid = sigmoid(input) return (output_sigmoid * (input * (1 - output_sigmoid) + 1)) @triton.jit def relu6(input): """ Applies ReLU6 to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU6. """ return tl.minimum(relu(input), 6) @triton.jit def relu6_grad(input): """ Calculates the gradient of ReLU6. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU6. """ return tl.where((0 < input) & (input < 6), 1, 0) @triton.jit def hardsigmoid(input): """ Applies hard sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard sigmoid. """ return tl.maximum(0, tl.minimum(1, input / 6 + 0.5)) @triton.jit def hardsigmoid_grad(input): """ Calculates the gradient of hard sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard sigmoid. """ return tl.where((-3 < input) & (input < 3), 1 / 6, 0) @triton.jit def hardtanh(input): """ Applies hard tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard tanh. """ return tl.maximum(-1, tl.minimum(1, input)) @triton.jit def hardtanh_grad(input): """ Calculates the gradient of hard tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard tanh. """ return tl.where((-1 < input) & (input < 1), 1, 0) @triton.jit def hardswish(input): """ Applies hard Swish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard Swish. """ return input * relu6(input + 3) / 6 @triton.jit def hardswish_grad(input): """ Calculates the gradient of hard Swish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard Swish. """ return (relu6(input + 3) + input * relu6_grad(input + 3)) / 6 @triton.jit def selu(input): """ Applies SELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * (tl.maximum(0, input) + tl.minimum(0, alpha * (tl.exp(input) - 1))) @triton.jit def selu_grad(input): """ Calculates the gradient of SELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def mish(input): """ Applies Mish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by Mish. """ return input * tanh(tl.log(1 + tl.exp(input))) @triton.jit def mish_grad(input): """ Calculates the gradient of Mish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of Mish. """ exp = tl.exp(input) delta = exp * (exp + 2) + 2 return (exp * (exp * ((4 * input + 6) + exp * (exp + 4)) + 4 * (input + 1)) / (delta * delta)) @triton.jit def softplus(input): """ Applies softplus to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softplus. """ return tl.log(1 + tl.exp(input)) @triton.jit def softplus_grad(input): """ Calculates the gradient of softplus. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softplus. """ return sigmoid(input) @triton.jit def softsign(input): """ Applies softsign to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softsign. """ return input / (1 + tl.abs(input)) @triton.jit def softsign_grad(input): """ Calculates the gradient of softsign. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softsign. """ denom = 1 + tl.abs(input) return 1 / (denom * denom) @triton.jit def tanhshrink(input): """ Applies tanh shrink to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh shrink. """ return input - tanh(input) @triton.jit def tanhshrink_grad(input): """ Calculates the gradient of tanh shrink. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh shrink. """ return 1 - tanh_grad(input) @triton.jit def leaky_relu(input, negative_slope): """ Applies leaky ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Input transformed by leaky ReLU. """ return relu(input) + negative_slope * tl.minimum(0, input) @triton.jit def leaky_relu_grad(input, negative_slope): """ Calculates the gradient of leaky ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Gradient of leaky ReLU. """ return tl.where(input <= 0, negative_slope, 1) @triton.jit def elu(input, alpha): """ Applies ELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by ELU. """ return tl.where(input <= 0, alpha * (tl.exp(input) - 1), input) @triton.jit def elu_grad(input, alpha): """ Calculates the gradient of ELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of ELU. """ return tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def celu(input, alpha): """ Applies CELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by CELU. """ return relu(input) + tl.minimum(0, alpha * (tl.exp(input / alpha) - 1)) @triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1) @triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output @triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
@triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1)
BobMcDear/attorch
attorch/act_kernels.py
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/act_kernels.py
""" Kernels for activation functions with fused dropout. """ import triton import triton.language as tl from .dropout_kernels import apply_dropout, apply_dropout_grad from .utils import element_wise_kernel_configs @triton.jit def sigmoid(input): """ Applies sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by sigmoid. """ return (1 / (1 + tl.exp(-input))) @triton.jit def sigmoid_grad(input): """ Calculates the gradient of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of sigmoid. """ output_sigmoid = sigmoid(input) return output_sigmoid * (1 - output_sigmoid) @triton.jit def logsigmoid(input): """ Applies the log of sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by the log of sigmoid. """ return tl.log(sigmoid(input)) @triton.jit def logsigmoid_grad(input): """ Calculates the gradient of the log of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of the log of sigmoid. """ return (1 / (1 + tl.exp(input))) @triton.jit def tanh(input): """ Applies tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh. """ return 2 * sigmoid(2 * input) - 1 @triton.jit def tanh_grad(input): """ Calculates the gradient of tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh. """ output_tanh = tanh(input) return 1 - output_tanh * output_tanh @triton.jit def relu(input): """ Applies ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU. """ return tl.maximum(0, input) @triton.jit def relu_grad(input): """ Calculates the gradient of ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU. """ return tl.where(input <= 0, 0, 1) @triton.jit def gelu(input): """ Applies GELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) return cdf * input @triton.jit def gelu_grad(input): """ Calculates the gradient of GELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) cdf_grad = 0.39894228 * tl.exp(-0.5 * input * input) return (cdf_grad * input + cdf) @triton.jit def silu(input): """ Applies SiLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SiLU. """ return (input * sigmoid(input)) @triton.jit def silu_grad(input): """ Calculates the gradient of SiLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SiLU. """ output_sigmoid = sigmoid(input) return (output_sigmoid * (input * (1 - output_sigmoid) + 1)) @triton.jit def relu6(input): """ Applies ReLU6 to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU6. """ return tl.minimum(relu(input), 6) @triton.jit def relu6_grad(input): """ Calculates the gradient of ReLU6. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU6. """ return tl.where((0 < input) & (input < 6), 1, 0) @triton.jit def hardsigmoid(input): """ Applies hard sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard sigmoid. """ return tl.maximum(0, tl.minimum(1, input / 6 + 0.5)) @triton.jit def hardsigmoid_grad(input): """ Calculates the gradient of hard sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard sigmoid. """ return tl.where((-3 < input) & (input < 3), 1 / 6, 0) @triton.jit def hardtanh(input): """ Applies hard tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard tanh. """ return tl.maximum(-1, tl.minimum(1, input)) @triton.jit def hardtanh_grad(input): """ Calculates the gradient of hard tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard tanh. """ return tl.where((-1 < input) & (input < 1), 1, 0) @triton.jit def hardswish(input): """ Applies hard Swish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard Swish. """ return input * relu6(input + 3) / 6 @triton.jit def hardswish_grad(input): """ Calculates the gradient of hard Swish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard Swish. """ return (relu6(input + 3) + input * relu6_grad(input + 3)) / 6 @triton.jit def selu(input): """ Applies SELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * (tl.maximum(0, input) + tl.minimum(0, alpha * (tl.exp(input) - 1))) @triton.jit def selu_grad(input): """ Calculates the gradient of SELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def mish(input): """ Applies Mish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by Mish. """ return input * tanh(tl.log(1 + tl.exp(input))) @triton.jit def mish_grad(input): """ Calculates the gradient of Mish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of Mish. """ exp = tl.exp(input) delta = exp * (exp + 2) + 2 return (exp * (exp * ((4 * input + 6) + exp * (exp + 4)) + 4 * (input + 1)) / (delta * delta)) @triton.jit def softplus(input): """ Applies softplus to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softplus. """ return tl.log(1 + tl.exp(input)) @triton.jit def softplus_grad(input): """ Calculates the gradient of softplus. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softplus. """ return sigmoid(input) @triton.jit def softsign(input): """ Applies softsign to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softsign. """ return input / (1 + tl.abs(input)) @triton.jit def softsign_grad(input): """ Calculates the gradient of softsign. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softsign. """ denom = 1 + tl.abs(input) return 1 / (denom * denom) @triton.jit def tanhshrink(input): """ Applies tanh shrink to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh shrink. """ return input - tanh(input) @triton.jit def tanhshrink_grad(input): """ Calculates the gradient of tanh shrink. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh shrink. """ return 1 - tanh_grad(input) @triton.jit def leaky_relu(input, negative_slope): """ Applies leaky ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Input transformed by leaky ReLU. """ return relu(input) + negative_slope * tl.minimum(0, input) @triton.jit def leaky_relu_grad(input, negative_slope): """ Calculates the gradient of leaky ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Gradient of leaky ReLU. """ return tl.where(input <= 0, negative_slope, 1) @triton.jit def elu(input, alpha): """ Applies ELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by ELU. """ return tl.where(input <= 0, alpha * (tl.exp(input) - 1), input) @triton.jit def elu_grad(input, alpha): """ Calculates the gradient of ELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of ELU. """ return tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def celu(input, alpha): """ Applies CELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by CELU. """ return relu(input) + tl.minimum(0, alpha * (tl.exp(input / alpha) - 1)) @triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1) @triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output @triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
@triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output
BobMcDear/attorch
attorch/act_kernels.py
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/act_kernels.py
""" Kernels for activation functions with fused dropout. """ import triton import triton.language as tl from .dropout_kernels import apply_dropout, apply_dropout_grad from .utils import element_wise_kernel_configs @triton.jit def sigmoid(input): """ Applies sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by sigmoid. """ return (1 / (1 + tl.exp(-input))) @triton.jit def sigmoid_grad(input): """ Calculates the gradient of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of sigmoid. """ output_sigmoid = sigmoid(input) return output_sigmoid * (1 - output_sigmoid) @triton.jit def logsigmoid(input): """ Applies the log of sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by the log of sigmoid. """ return tl.log(sigmoid(input)) @triton.jit def logsigmoid_grad(input): """ Calculates the gradient of the log of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of the log of sigmoid. """ return (1 / (1 + tl.exp(input))) @triton.jit def tanh(input): """ Applies tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh. """ return 2 * sigmoid(2 * input) - 1 @triton.jit def tanh_grad(input): """ Calculates the gradient of tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh. """ output_tanh = tanh(input) return 1 - output_tanh * output_tanh @triton.jit def relu(input): """ Applies ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU. """ return tl.maximum(0, input) @triton.jit def relu_grad(input): """ Calculates the gradient of ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU. """ return tl.where(input <= 0, 0, 1) @triton.jit def gelu(input): """ Applies GELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) return cdf * input @triton.jit def gelu_grad(input): """ Calculates the gradient of GELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) cdf_grad = 0.39894228 * tl.exp(-0.5 * input * input) return (cdf_grad * input + cdf) @triton.jit def silu(input): """ Applies SiLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SiLU. """ return (input * sigmoid(input)) @triton.jit def silu_grad(input): """ Calculates the gradient of SiLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SiLU. """ output_sigmoid = sigmoid(input) return (output_sigmoid * (input * (1 - output_sigmoid) + 1)) @triton.jit def relu6(input): """ Applies ReLU6 to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU6. """ return tl.minimum(relu(input), 6) @triton.jit def relu6_grad(input): """ Calculates the gradient of ReLU6. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU6. """ return tl.where((0 < input) & (input < 6), 1, 0) @triton.jit def hardsigmoid(input): """ Applies hard sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard sigmoid. """ return tl.maximum(0, tl.minimum(1, input / 6 + 0.5)) @triton.jit def hardsigmoid_grad(input): """ Calculates the gradient of hard sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard sigmoid. """ return tl.where((-3 < input) & (input < 3), 1 / 6, 0) @triton.jit def hardtanh(input): """ Applies hard tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard tanh. """ return tl.maximum(-1, tl.minimum(1, input)) @triton.jit def hardtanh_grad(input): """ Calculates the gradient of hard tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard tanh. """ return tl.where((-1 < input) & (input < 1), 1, 0) @triton.jit def hardswish(input): """ Applies hard Swish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard Swish. """ return input * relu6(input + 3) / 6 @triton.jit def hardswish_grad(input): """ Calculates the gradient of hard Swish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard Swish. """ return (relu6(input + 3) + input * relu6_grad(input + 3)) / 6 @triton.jit def selu(input): """ Applies SELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * (tl.maximum(0, input) + tl.minimum(0, alpha * (tl.exp(input) - 1))) @triton.jit def selu_grad(input): """ Calculates the gradient of SELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def mish(input): """ Applies Mish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by Mish. """ return input * tanh(tl.log(1 + tl.exp(input))) @triton.jit def mish_grad(input): """ Calculates the gradient of Mish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of Mish. """ exp = tl.exp(input) delta = exp * (exp + 2) + 2 return (exp * (exp * ((4 * input + 6) + exp * (exp + 4)) + 4 * (input + 1)) / (delta * delta)) @triton.jit def softplus(input): """ Applies softplus to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softplus. """ return tl.log(1 + tl.exp(input)) @triton.jit def softplus_grad(input): """ Calculates the gradient of softplus. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softplus. """ return sigmoid(input) @triton.jit def softsign(input): """ Applies softsign to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softsign. """ return input / (1 + tl.abs(input)) @triton.jit def softsign_grad(input): """ Calculates the gradient of softsign. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softsign. """ denom = 1 + tl.abs(input) return 1 / (denom * denom) @triton.jit def tanhshrink(input): """ Applies tanh shrink to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh shrink. """ return input - tanh(input) @triton.jit def tanhshrink_grad(input): """ Calculates the gradient of tanh shrink. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh shrink. """ return 1 - tanh_grad(input) @triton.jit def leaky_relu(input, negative_slope): """ Applies leaky ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Input transformed by leaky ReLU. """ return relu(input) + negative_slope * tl.minimum(0, input) @triton.jit def leaky_relu_grad(input, negative_slope): """ Calculates the gradient of leaky ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Gradient of leaky ReLU. """ return tl.where(input <= 0, negative_slope, 1) @triton.jit def elu(input, alpha): """ Applies ELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by ELU. """ return tl.where(input <= 0, alpha * (tl.exp(input) - 1), input) @triton.jit def elu_grad(input, alpha): """ Calculates the gradient of ELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of ELU. """ return tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def celu(input, alpha): """ Applies CELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by CELU. """ return relu(input) + tl.minimum(0, alpha * (tl.exp(input / alpha) - 1)) @triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1) @triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output @triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
@triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], )
BobMcDear/attorch
attorch/act_kernels.py
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/act_kernels.py
""" Kernels for activation functions with fused dropout. """ import triton import triton.language as tl from .dropout_kernels import apply_dropout, apply_dropout_grad from .utils import element_wise_kernel_configs @triton.jit def sigmoid(input): """ Applies sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by sigmoid. """ return (1 / (1 + tl.exp(-input))) @triton.jit def sigmoid_grad(input): """ Calculates the gradient of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of sigmoid. """ output_sigmoid = sigmoid(input) return output_sigmoid * (1 - output_sigmoid) @triton.jit def logsigmoid(input): """ Applies the log of sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by the log of sigmoid. """ return tl.log(sigmoid(input)) @triton.jit def logsigmoid_grad(input): """ Calculates the gradient of the log of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of the log of sigmoid. """ return (1 / (1 + tl.exp(input))) @triton.jit def tanh(input): """ Applies tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh. """ return 2 * sigmoid(2 * input) - 1 @triton.jit def tanh_grad(input): """ Calculates the gradient of tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh. """ output_tanh = tanh(input) return 1 - output_tanh * output_tanh @triton.jit def relu(input): """ Applies ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU. """ return tl.maximum(0, input) @triton.jit def relu_grad(input): """ Calculates the gradient of ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU. """ return tl.where(input <= 0, 0, 1) @triton.jit def gelu(input): """ Applies GELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) return cdf * input @triton.jit def gelu_grad(input): """ Calculates the gradient of GELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) cdf_grad = 0.39894228 * tl.exp(-0.5 * input * input) return (cdf_grad * input + cdf) @triton.jit def silu(input): """ Applies SiLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SiLU. """ return (input * sigmoid(input)) @triton.jit def silu_grad(input): """ Calculates the gradient of SiLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SiLU. """ output_sigmoid = sigmoid(input) return (output_sigmoid * (input * (1 - output_sigmoid) + 1)) @triton.jit def relu6(input): """ Applies ReLU6 to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU6. """ return tl.minimum(relu(input), 6) @triton.jit def relu6_grad(input): """ Calculates the gradient of ReLU6. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU6. """ return tl.where((0 < input) & (input < 6), 1, 0) @triton.jit def hardsigmoid(input): """ Applies hard sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard sigmoid. """ return tl.maximum(0, tl.minimum(1, input / 6 + 0.5)) @triton.jit def hardsigmoid_grad(input): """ Calculates the gradient of hard sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard sigmoid. """ return tl.where((-3 < input) & (input < 3), 1 / 6, 0) @triton.jit def hardtanh(input): """ Applies hard tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard tanh. """ return tl.maximum(-1, tl.minimum(1, input)) @triton.jit def hardtanh_grad(input): """ Calculates the gradient of hard tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard tanh. """ return tl.where((-1 < input) & (input < 1), 1, 0) @triton.jit def hardswish(input): """ Applies hard Swish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard Swish. """ return input * relu6(input + 3) / 6 @triton.jit def hardswish_grad(input): """ Calculates the gradient of hard Swish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard Swish. """ return (relu6(input + 3) + input * relu6_grad(input + 3)) / 6 @triton.jit def selu(input): """ Applies SELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * (tl.maximum(0, input) + tl.minimum(0, alpha * (tl.exp(input) - 1))) @triton.jit def selu_grad(input): """ Calculates the gradient of SELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def mish(input): """ Applies Mish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by Mish. """ return input * tanh(tl.log(1 + tl.exp(input))) @triton.jit def mish_grad(input): """ Calculates the gradient of Mish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of Mish. """ exp = tl.exp(input) delta = exp * (exp + 2) + 2 return (exp * (exp * ((4 * input + 6) + exp * (exp + 4)) + 4 * (input + 1)) / (delta * delta)) @triton.jit def softplus(input): """ Applies softplus to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softplus. """ return tl.log(1 + tl.exp(input)) @triton.jit def softplus_grad(input): """ Calculates the gradient of softplus. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softplus. """ return sigmoid(input) @triton.jit def softsign(input): """ Applies softsign to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softsign. """ return input / (1 + tl.abs(input)) @triton.jit def softsign_grad(input): """ Calculates the gradient of softsign. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softsign. """ denom = 1 + tl.abs(input) return 1 / (denom * denom) @triton.jit def tanhshrink(input): """ Applies tanh shrink to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh shrink. """ return input - tanh(input) @triton.jit def tanhshrink_grad(input): """ Calculates the gradient of tanh shrink. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh shrink. """ return 1 - tanh_grad(input) @triton.jit def leaky_relu(input, negative_slope): """ Applies leaky ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Input transformed by leaky ReLU. """ return relu(input) + negative_slope * tl.minimum(0, input) @triton.jit def leaky_relu_grad(input, negative_slope): """ Calculates the gradient of leaky ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Gradient of leaky ReLU. """ return tl.where(input <= 0, negative_slope, 1) @triton.jit def elu(input, alpha): """ Applies ELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by ELU. """ return tl.where(input <= 0, alpha * (tl.exp(input) - 1), input) @triton.jit def elu_grad(input, alpha): """ Calculates the gradient of ELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of ELU. """ return tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def celu(input, alpha): """ Applies CELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by CELU. """ return relu(input) + tl.minimum(0, alpha * (tl.exp(input / alpha) - 1)) @triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1) @triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output @triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
@triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], )
BobMcDear/attorch
attorch/act_kernels.py
https://github.com/BobMcDear/attorch/blob/fdd7c33c9476f19488b9025404112f56212dcb05/attorch/act_kernels.py
""" Kernels for activation functions with fused dropout. """ import triton import triton.language as tl from .dropout_kernels import apply_dropout, apply_dropout_grad from .utils import element_wise_kernel_configs @triton.jit def sigmoid(input): """ Applies sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by sigmoid. """ return (1 / (1 + tl.exp(-input))) @triton.jit def sigmoid_grad(input): """ Calculates the gradient of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of sigmoid. """ output_sigmoid = sigmoid(input) return output_sigmoid * (1 - output_sigmoid) @triton.jit def logsigmoid(input): """ Applies the log of sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by the log of sigmoid. """ return tl.log(sigmoid(input)) @triton.jit def logsigmoid_grad(input): """ Calculates the gradient of the log of sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of the log of sigmoid. """ return (1 / (1 + tl.exp(input))) @triton.jit def tanh(input): """ Applies tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh. """ return 2 * sigmoid(2 * input) - 1 @triton.jit def tanh_grad(input): """ Calculates the gradient of tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh. """ output_tanh = tanh(input) return 1 - output_tanh * output_tanh @triton.jit def relu(input): """ Applies ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU. """ return tl.maximum(0, input) @triton.jit def relu_grad(input): """ Calculates the gradient of ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU. """ return tl.where(input <= 0, 0, 1) @triton.jit def gelu(input): """ Applies GELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) return cdf * input @triton.jit def gelu_grad(input): """ Calculates the gradient of GELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of GELU. """ cdf = 0.5 * (1 + tl.math.erf(0.707106781 * input)) cdf_grad = 0.39894228 * tl.exp(-0.5 * input * input) return (cdf_grad * input + cdf) @triton.jit def silu(input): """ Applies SiLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SiLU. """ return (input * sigmoid(input)) @triton.jit def silu_grad(input): """ Calculates the gradient of SiLU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SiLU. """ output_sigmoid = sigmoid(input) return (output_sigmoid * (input * (1 - output_sigmoid) + 1)) @triton.jit def relu6(input): """ Applies ReLU6 to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by ReLU6. """ return tl.minimum(relu(input), 6) @triton.jit def relu6_grad(input): """ Calculates the gradient of ReLU6. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of ReLU6. """ return tl.where((0 < input) & (input < 6), 1, 0) @triton.jit def hardsigmoid(input): """ Applies hard sigmoid to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard sigmoid. """ return tl.maximum(0, tl.minimum(1, input / 6 + 0.5)) @triton.jit def hardsigmoid_grad(input): """ Calculates the gradient of hard sigmoid. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard sigmoid. """ return tl.where((-3 < input) & (input < 3), 1 / 6, 0) @triton.jit def hardtanh(input): """ Applies hard tanh to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard tanh. """ return tl.maximum(-1, tl.minimum(1, input)) @triton.jit def hardtanh_grad(input): """ Calculates the gradient of hard tanh. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard tanh. """ return tl.where((-1 < input) & (input < 1), 1, 0) @triton.jit def hardswish(input): """ Applies hard Swish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by hard Swish. """ return input * relu6(input + 3) / 6 @triton.jit def hardswish_grad(input): """ Calculates the gradient of hard Swish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of hard Swish. """ return (relu6(input + 3) + input * relu6_grad(input + 3)) / 6 @triton.jit def selu(input): """ Applies SELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * (tl.maximum(0, input) + tl.minimum(0, alpha * (tl.exp(input) - 1))) @triton.jit def selu_grad(input): """ Calculates the gradient of SELU. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of SELU. """ scale = 1.0507009873554804934193349852946 alpha = 1.6732632423543772848170429916717 return scale * tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def mish(input): """ Applies Mish to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by Mish. """ return input * tanh(tl.log(1 + tl.exp(input))) @triton.jit def mish_grad(input): """ Calculates the gradient of Mish. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of Mish. """ exp = tl.exp(input) delta = exp * (exp + 2) + 2 return (exp * (exp * ((4 * input + 6) + exp * (exp + 4)) + 4 * (input + 1)) / (delta * delta)) @triton.jit def softplus(input): """ Applies softplus to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softplus. """ return tl.log(1 + tl.exp(input)) @triton.jit def softplus_grad(input): """ Calculates the gradient of softplus. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softplus. """ return sigmoid(input) @triton.jit def softsign(input): """ Applies softsign to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by softsign. """ return input / (1 + tl.abs(input)) @triton.jit def softsign_grad(input): """ Calculates the gradient of softsign. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of softsign. """ denom = 1 + tl.abs(input) return 1 / (denom * denom) @triton.jit def tanhshrink(input): """ Applies tanh shrink to the input. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Input transformed by tanh shrink. """ return input - tanh(input) @triton.jit def tanhshrink_grad(input): """ Calculates the gradient of tanh shrink. Args: input: Input. The input must be loaded and cannot be a pointer. Returns: Gradient of tanh shrink. """ return 1 - tanh_grad(input) @triton.jit def leaky_relu(input, negative_slope): """ Applies leaky ReLU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Input transformed by leaky ReLU. """ return relu(input) + negative_slope * tl.minimum(0, input) @triton.jit def leaky_relu_grad(input, negative_slope): """ Calculates the gradient of leaky ReLU. Args: input: Input. The input must be loaded and cannot be a pointer. negative_slope: Slope of the negative component. Returns: Gradient of leaky ReLU. """ return tl.where(input <= 0, negative_slope, 1) @triton.jit def elu(input, alpha): """ Applies ELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by ELU. """ return tl.where(input <= 0, alpha * (tl.exp(input) - 1), input) @triton.jit def elu_grad(input, alpha): """ Calculates the gradient of ELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of ELU. """ return tl.where(input <= 0, alpha * tl.exp(input), 1) @triton.jit def celu(input, alpha): """ Applies CELU to the input. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Input transformed by CELU. """ return relu(input) + tl.minimum(0, alpha * (tl.exp(input / alpha) - 1)) @triton.jit def celu_grad(input, alpha): """ Calculates the gradient of CELU. Args: input: Input. The input must be loaded and cannot be a pointer. alpha: Alpha value. Returns: Gradient of CELU. """ return tl.where(input <= 0, tl.exp(input / alpha), 1) @triton.jit def apply_act_func(input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Applies an activation function to the input, optionally fusing dropout. Args: input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Input transformed by the desired activation function, potentially with fused dropout. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh(input) elif act_func == 'relu': output = relu(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu(input) elif act_func == 'relu6': output = relu6(input) elif act_func == 'hardsigmoid': output = hardsigmoid(input) elif act_func == 'hardtanh': output = hardtanh(input) elif act_func == 'hardswish': output = hardswish(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus(input) elif act_func == 'softsign': output = softsign(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink(input) elif act_func == 'leaky_relu': output = leaky_relu(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu(input, param) if dropout: output = apply_dropout(output, drop_p, seed, offset) return output @triton.jit def apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func: tl.constexpr, dropout: tl.constexpr): """ Calculates the gradient of an activation function. Args: output_grad: Output gradients. The output gradients must be loaded and cannot be a pointer. input: Input. The input must be loaded and cannot be a pointer. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. offset: Offset to generate the dropout mask for if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. Returns: Gradient of the desired activation function. """ if act_func == 'sigmoid': input = input.to(tl.float32) output = sigmoid_grad(input) if act_func == 'logsigmoid': input = input.to(tl.float32) output = logsigmoid_grad(input) elif act_func == 'tanh': input = input.to(tl.float32) output = tanh_grad(input) elif act_func == 'relu': output = relu_grad(input) elif act_func == 'gelu': input = input.to(tl.float32) output = gelu_grad(input) elif act_func == 'silu': input = input.to(tl.float32) output = silu_grad(input) elif act_func == 'relu6': output = relu6_grad(input) elif act_func == 'hardsigmoid': output = hardsigmoid_grad(input) elif act_func == 'hardtanh': output = hardtanh_grad(input) elif act_func == 'hardswish': output = hardswish_grad(input) elif act_func == 'selu': input = input.to(tl.float32) output = selu_grad(input) elif act_func == 'mish': input = input.to(tl.float32) output = mish_grad(input) elif act_func == 'softplus': input = input.to(tl.float32) output = softplus_grad(input) elif act_func == 'softsign': output = softsign_grad(input) elif act_func == 'tanhshrink': input = input.to(tl.float32) output = tanhshrink_grad(input) elif act_func == 'leaky_relu': output = leaky_relu_grad(input, param) elif act_func == 'elu': input = input.to(tl.float32) output = elu_grad(input, param) elif act_func == 'celu': input = input.to(tl.float32) output = celu_grad(input, param) if dropout: output_grad = apply_dropout_grad(output_grad, drop_p, seed, offset) return output_grad * output @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_forward_kernel( input_pointer, output_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Applies an activation function to the input, optionally fusing dropout. Args: input_pointer: Pointer to the input to transform. The input must be of shape [size]. output_pointer: Pointer to a container the result is written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size input = tl.load(input_pointer + offset, mask=mask) tl.store(output_pointer + offset, apply_act_func(input, drop_p, seed, offset, param, act_func, dropout), mask=mask) @triton.autotune( configs=element_wise_kernel_configs(), key=['size'], ) @triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
@triton.jit def act_func_backward_kernel( output_grad_pointer, input_pointer, input_grad_pointer, size, drop_p, seed, param, act_func: tl.constexpr, dropout: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): """ Calculates the input gradient of an activation function. Args: output_grad_pointer: Pointer to the activation's output gradients. The output gradients must be of shape [size]. input_pointer: Pointer to the activation's input. The input must be of shape [size]. input_grad_pointer: Pointer to a container the input's gradients are written to. The container must be of shape [size]. size: Number of elements in the input. drop_p: Probability of dropping an element if dropout is True. seed: Seed for generating the dropout mask if dropout is True. param: Parameter in the case of parameterized activation functions. act_func: Name of activation function to apply. Options are 'sigmoid', 'logsigmoid', 'tanh', 'relu', 'gelu', 'silu', 'relu6', 'hardsigmoid', 'hardtanh', 'hardswish', 'selu', 'mish', 'softplus', 'softsign', 'tanhshrink', 'leaky_relu', 'elu', and 'celu'. dropout: Flag for performing dropout on the activation output. BLOCK_SIZE: Block size. """ # This program processes BLOCK_SIZE rows. pid = tl.program_id(axis=0) offset = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) mask = offset < size output_grad = tl.load(output_grad_pointer + offset, mask=mask) input = tl.load(input_pointer + offset, mask=mask) tl.store(input_grad_pointer + offset, apply_act_func_grad(output_grad, input, drop_p, seed, offset, param, act_func, dropout), mask=mask)
ThinamXx/cuda-mode
triton/main.py
https://github.com/ThinamXx/cuda-mode/blob/43ecf1c4813af135b277e18dbd7bc84a059ebb78/triton/main.py
import torch import triton import triton.language as tl @triton.jit def _attn_fwd( Q, # (batch_size, num_heads, seq_len, head_dim) K, # (batch_size, num_heads, seq_len, head_dim) V, # (batch_size, num_heads, seq_len, head_dim) softmax_scale, output, # (batch_size, num_heads, seq_len, head_dim) M, # (batch_size, num_heads, seq_len) stride_Q_batch, stride_Q_head, stride_Q_seq, stride_Q_dim, stride_K_batch, stride_K_head, stride_K_seq, stride_K_dim, stride_V_batch, stride_V_head, stride_V_seq, stride_V_dim, stride_output_batch, stride_output_head, stride_output_seq, stride_output_dim, batch_size, num_heads: tl.constexpr, seq_len: tl.constexpr, head_dim: tl.constexpr, block_size_Q: tl.constexpr, block_size_KV: tl.constexpr, stage: tl.constexpr, ): tl.static_assert(block_size_KV <= head_dim) # this indicate which block of queries to process. block_index_q = tl.program_id(axis=0) # this indicate which head of which batch to process. index_batch_head = tl.program_id(axis=1) # this indicate which batch to process i.e. each batch has num_heads. index_batch = index_batch_head // num_heads # this indicate which head to process. index_head = index_batch_head % num_heads class TritonAttention(torch.autograd.Function): @staticmethod def forward(ctx, Q, K, V, causal, softmax_scale): head_dim_Q, head_dim_K, head_dim_V = Q.shape[-1], K.shape[-1], V.shape[-1] assert head_dim_Q == head_dim_K and head_dim_K == head_dim_V batch_size, num_heads, seq_len, head_dim = Q.shape output = torch.empty_like(Q) stage = 3 if causal else 1 grid = lambda meta: ( triton.cdiv( seq_len, meta["BLOCK_SIZE"] ), # which block of queries we are working with. batch_size * num_heads, # which heads of which batch we are working with. 1, # Z dimension. ) # used to save logsumexp for the backward pass. M = torch.empty( (batch_size, num_heads, seq_len), device=output.device, dtype=torch.float32 ) _attn_fwd[grid]( Q=Q, K=K, V=V, softmax_scale=softmax_scale, output=output, M=M, stride_Q_batch=Q.stride(0), stride_Q_head=Q.stride(1), stride_Q_seq=Q.stride(2), stride_Q_dim=Q.stride(3), stride_K_batch=K.stride(0), stride_K_head=K.stride(1), stride_K_seq=K.stride(2), stride_K_dim=K.stride(3), stride_V_batch=V.stride(0), stride_V_head=V.stride(1), stride_V_seq=V.stride(2), stride_V_dim=V.stride(3), stride_output_batch=output.stride(0), stride_output_head=output.stride(1), stride_output_seq=output.stride(2), stride_output_dim=output.stride(3), batch_size=batch_size, num_heads=num_heads, seq_len=seq_len, head_dim=head_dim, stage=stage, ) ctx.save_for_backward(Q, K, V, output, M) ctx.grid = grid ctx.head_dim = head_dim ctx.causal = causal ctx.softmax_scale = softmax_scale return output def test_ops(batch_size, num_heads, seq_len, head_dim, causal, dtype=torch.float16): Q = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) K = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) V = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) softmax_scale = 1 / (head_dim**0.5) dO = torch.randn_like(Q) # reference implementation: mask = torch.tril(torch.ones((seq_len, seq_len), device="cuda")) P = torch.matmul(Q, K.transpose(-2, -1)) * softmax_scale if causal: P[:, :, mask == 0] = float("-inf") P = torch.softmax(P.float(), dim=-1).half() ref_O = torch.matmul(P, V) ref_O.backward(dO) ref_dQ, Q.grad = Q.grad.clone(), None ref_dK, K.grad = K.grad.clone(), None ref_dV, V.grad = V.grad.clone(), None # triton implementation: tri_O = TritonAttention.apply(Q, K, V, causal, softmax_scale).half() tri_O.backward(dO) tri_dQ, Q.grad = Q.grad.clone(), None tri_dK, K.grad = K.grad.clone(), None tri_dV, V.grad = V.grad.clone(), None # comparison of the outputs: atol = 1e-2 rtol = 0.0 assert torch.allclose(ref_O, tri_O, rtol=rtol, atol=atol) assert torch.allclose(ref_dQ, tri_dQ, rtol=rtol, atol=atol) assert torch.allclose(ref_dK, tri_dK, rtol=rtol, atol=atol) assert torch.allclose(ref_dV, tri_dV, rtol=rtol, atol=atol)
@triton.jit def _attn_fwd( Q, # (batch_size, num_heads, seq_len, head_dim) K, # (batch_size, num_heads, seq_len, head_dim) V, # (batch_size, num_heads, seq_len, head_dim) softmax_scale, output, # (batch_size, num_heads, seq_len, head_dim) M, # (batch_size, num_heads, seq_len) stride_Q_batch, stride_Q_head, stride_Q_seq, stride_Q_dim, stride_K_batch, stride_K_head, stride_K_seq, stride_K_dim, stride_V_batch, stride_V_head, stride_V_seq, stride_V_dim, stride_output_batch, stride_output_head, stride_output_seq, stride_output_dim, batch_size, num_heads: tl.constexpr, seq_len: tl.constexpr, head_dim: tl.constexpr, block_size_Q: tl.constexpr, block_size_KV: tl.constexpr, stage: tl.constexpr, ): tl.static_assert(block_size_KV <= head_dim) # this indicate which block of queries to process. block_index_q = tl.program_id(axis=0) # this indicate which head of which batch to process. index_batch_head = tl.program_id(axis=1) # this indicate which batch to process i.e. each batch has num_heads. index_batch = index_batch_head // num_heads # this indicate which head to process. index_head = index_batch_head % num_heads class TritonAttention(torch.autograd.Function): @staticmethod def forward(ctx, Q, K, V, causal, softmax_scale): head_dim_Q, head_dim_K, head_dim_V = Q.shape[-1], K.shape[-1], V.shape[-1] assert head_dim_Q == head_dim_K and head_dim_K == head_dim_V batch_size, num_heads, seq_len, head_dim = Q.shape output = torch.empty_like(Q) stage = 3 if causal else 1 grid = lambda meta: ( triton.cdiv( seq_len, meta["BLOCK_SIZE"] ), # which block of queries we are working with. batch_size * num_heads, # which heads of which batch we are working with. 1, # Z dimension. ) # used to save logsumexp for the backward pass. M = torch.empty( (batch_size, num_heads, seq_len), device=output.device, dtype=torch.float32 ) _attn_fwd[grid]( Q=Q, K=K, V=V, softmax_scale=softmax_scale, output=output, M=M, stride_Q_batch=Q.stride(0), stride_Q_head=Q.stride(1), stride_Q_seq=Q.stride(2), stride_Q_dim=Q.stride(3), stride_K_batch=K.stride(0), stride_K_head=K.stride(1), stride_K_seq=K.stride(2), stride_K_dim=K.stride(3), stride_V_batch=V.stride(0), stride_V_head=V.stride(1), stride_V_seq=V.stride(2), stride_V_dim=V.stride(3), stride_output_batch=output.stride(0), stride_output_head=output.stride(1), stride_output_seq=output.stride(2), stride_output_dim=output.stride(3), batch_size=batch_size, num_heads=num_heads, seq_len=seq_len, head_dim=head_dim, stage=stage, ) ctx.save_for_backward(Q, K, V, output, M) ctx.grid = grid ctx.head_dim = head_dim ctx.causal = causal ctx.softmax_scale = softmax_scale return output def test_ops(batch_size, num_heads, seq_len, head_dim, causal, dtype=torch.float16): Q = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) K = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) V = ( torch.empty( (batch_size, num_heads, seq_len, head_dim), dtype=dtype, device="cuda" ) .normal_(mean=0.0, std=0.5) .requires_grad_() ) softmax_scale = 1 / (head_dim**0.5) dO = torch.randn_like(Q) # reference implementation: mask = torch.tril(torch.ones((seq_len, seq_len), device="cuda")) P = torch.matmul(Q, K.transpose(-2, -1)) * softmax_scale if causal: P[:, :, mask == 0] = float("-inf") P = torch.softmax(P.float(), dim=-1).half() ref_O = torch.matmul(P, V) ref_O.backward(dO) ref_dQ, Q.grad = Q.grad.clone(), None ref_dK, K.grad = K.grad.clone(), None ref_dV, V.grad = V.grad.clone(), None # triton implementation: tri_O = TritonAttention.apply(Q, K, V, causal, softmax_scale).half() tri_O.backward(dO) tri_dQ, Q.grad = Q.grad.clone(), None tri_dK, K.grad = K.grad.clone(), None tri_dV, V.grad = V.grad.clone(), None # comparison of the outputs: atol = 1e-2 rtol = 0.0 assert torch.allclose(ref_O, tri_O, rtol=rtol, atol=atol) assert torch.allclose(ref_dQ, tri_dQ, rtol=rtol, atol=atol) assert torch.allclose(ref_dK, tri_dK, rtol=rtol, atol=atol) assert torch.allclose(ref_dV, tri_dV, rtol=rtol, atol=atol)
zjhellofss/KuiperTriton
kernel/repeat_kv.py
https://github.com/zjhellofss/KuiperTriton/blob/1abdb405768b4c2251ab259ffd34f1e853ed3e0c/kernel/repeat_kv.py
import torch import triton import triton.language as tl @triton.jit def repeat_kernel_triton(input, output, repeat, head_dim, block_size: tl.constexpr): tid = tl.program_id(0) input_ptr = input + tid * head_dim output_ptr = output + tid * repeat * head_dim block_n = tl.arange(0, block_size) for block_idx in range(0, head_dim, block_size): offset = block_idx + block_n mask = offset < head_dim input_ = tl.load(input_ptr + offset, mask) for r in range(repeat): output_ptr_repeat = output_ptr + r * head_dim + offset tl.store(output_ptr_repeat, input_, mask) def repeat_kv(input, output, repeat): bs, seq_len, kv_heads, head_dim = input.shape head_dim_blocks = bs * seq_len * kv_heads block_size = 32 repeat_kernel_triton[head_dim_blocks,](input, output, repeat, head_dim, block_size) if __name__ == '__main__': bs = 12 seq_len = 5 kv_heads = 16 repeat = 4 head_dim = 1024 input = torch.randn((bs, seq_len, kv_heads, head_dim)).cuda() output1 = torch.randn((bs, seq_len, kv_heads * repeat, head_dim)).cuda() import time # repeat_kv(input, output1, repeat=repeat) for i in range(5): t1 = time.time() repeat_kv(input, output1, repeat=repeat) t2 = time.time() t3 = time.time() output2 = input[:, :, :, None, :].expand(bs, seq_len, kv_heads, repeat, head_dim).contiguous() t4 = time.time() print('triton time:{}'.format(t2 - t1)) print('torch time:{}'.format(t4 - t3)) print('-' * 32)
@triton.jit def repeat_kernel_triton(input, output, repeat, head_dim, block_size: tl.constexpr): tid = tl.program_id(0) input_ptr = input + tid * head_dim output_ptr = output + tid * repeat * head_dim block_n = tl.arange(0, block_size) for block_idx in range(0, head_dim, block_size): offset = block_idx + block_n mask = offset < head_dim input_ = tl.load(input_ptr + offset, mask) for r in range(repeat): output_ptr_repeat = output_ptr + r * head_dim + offset tl.store(output_ptr_repeat, input_, mask) def repeat_kv(input, output, repeat): bs, seq_len, kv_heads, head_dim = input.shape head_dim_blocks = bs * seq_len * kv_heads block_size = 32 repeat_kernel_triton[head_dim_blocks,](input, output, repeat, head_dim, block_size) if __name__ == '__main__': bs = 12 seq_len = 5 kv_heads = 16 repeat = 4 head_dim = 1024 input = torch.randn((bs, seq_len, kv_heads, head_dim)).cuda() output1 = torch.randn((bs, seq_len, kv_heads * repeat, head_dim)).cuda() import time # repeat_kv(input, output1, repeat=repeat) for i in range(5): t1 = time.time() repeat_kv(input, output1, repeat=repeat) t2 = time.time() t3 = time.time() output2 = input[:, :, :, None, :].expand(bs, seq_len, kv_heads, repeat, head_dim).contiguous() t4 = time.time() print('triton time:{}'.format(t2 - t1)) print('torch time:{}'.format(t4 - t3)) print('-' * 32)
HaiShaw/ater
ater/fused_moe.py
https://github.com/HaiShaw/ater/blob/32eea1253ba59b0685ec81774dc8f64712859fe9/ater/fused_moe.py
"""Fused MoE kernel.""" import functools import json import os from typing import Any, Callable, Dict, Optional, Tuple import math import torch import triton import triton.language as tl import ater as moe_kernels logger = moe_kernels.getLogger() VLLM_MOE_PADDING = bool(int(os.getenv("VLLM_MOE_PADDING", "1"))) FUSED_MOE_PERSISTENT = bool(int(os.getenv("FUSED_MOE_PERSISTENT", "0"))) ENABLE_MOE_LDS_BYPASS = bool(int(os.getenv("ENABLE_MOE_LDS_BYPASS", "1"))) print(f'{FUSED_MOE_PERSISTENT=}, {ENABLE_MOE_LDS_BYPASS=}, {VLLM_MOE_PADDING=}') VLLM_FUSED_MOE_CHUNK_SIZE = 65536 padding_size = 128 if VLLM_MOE_PADDING else 0 @triton.jit def fused_moe_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, token_nums_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, stride_bse, stride_bsn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Map program ids `pid` to the block of C it should compute. # This is done in a grouped ordering to promote L2 data reuse. pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m # ---------------------------------------------------------- # Create pointers for the first blocks of A and B. # We will advance this pointer as we move in the K direction # and accumulate # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers block_token_num = tl.load(token_nums_ptr + pid_m) num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return blk_m_range = tl.arange(0, BLOCK_SIZE_M) token_mask = blk_m_range < block_token_num offs_token_id = pid_m * BLOCK_SIZE_M + blk_m_range offs_token = tl.load(sorted_token_ids_ptr + offs_token_id, mask=token_mask) # token_mask = offs_token < num_valid_tokens offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) if use_int8_w8a16: b_scale_ptrs = b_scale_ptr + off_experts * stride_bse + offs_bn[ None, :] * stride_bsn b_scale = tl.load(b_scale_ptrs) if use_fp8_w8a8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) # ----------------------------------------------------------- # Iterate to compute a block of the C matrix. # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block # of fp32 values for higher accuracy. # `accumulator` will be converted back to fp16 after the loop. accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) # We accumulate along the K dimension. if use_int8_w8a16: accumulator = tl.dot(a, b.to(compute_type), acc=accumulator) elif use_fp8_w8a8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator = tl.dot(a, b, acc=accumulator) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_int8_w8a16: accumulator = (accumulator * b_scale).to(compute_type) elif use_fp8_w8a8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[ None, :] c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) @triton.heuristics({ 'EVEN_K': lambda args: args['K'] % args['BLOCK_SIZE_K'] == 0, }) @triton.jit def fused_moe_persistent_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, EVEN_K: tl.constexpr, NUM_SMS: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8: tl.constexpr, ): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. This is the persistent version of the fused_moe kernel. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Simply compute how many iterations each persistent block needs to do start_pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) # num_tiles = num_pid_m * num_pid_n tile_id = start_pid offs_k = tl.arange(0, BLOCK_SIZE_K) # offs_token = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int32) # token_mask = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int1) # Load tile-invariant runtime constant num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) # Compute how many tiles are outside the padding region num_pid_in_group = GROUP_SIZE_M * num_pid_n pid_m = 0 tile_id2 = start_pid - NUM_SMS num_valid_tiles = -1 while pid_m * BLOCK_SIZE_M < num_tokens_post_padded: num_valid_tiles += 1 tile_id2 += NUM_SMS group_id = tile_id2 // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id2 % num_pid_in_group) % group_size_m) for _ in range(0, num_valid_tiles): if GROUP_SIZE_M == 1: pid_m = tile_id // num_pid_n pid_n = tile_id % num_pid_n else: group_id = tile_id // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id % num_pid_in_group) % group_size_m) pid_n = (tile_id % num_pid_in_group) // group_size_m # Compute the mask offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens # Compute the A pointer a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) # Compute the B pointer offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = (b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)) if use_fp8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. if EVEN_K: a = tl.load(a_ptrs, mask=token_mask[:, None], other=0.0) b = tl.load(b_ptrs) else: a = tl.load( a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0 ) b = tl.load( b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0 ) # We accumulate along the K dimension. if use_fp8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator += tl.dot(a, b) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_fp8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = (c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]) c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) # advance tile_id tile_id += NUM_SMS def moe_align_block_size( topk_ids: torch.Tensor, block_size: int, num_experts: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Aligns the token distribution across experts to be compatible with block size for matrix multiplication. Parameters: - topk_ids: A tensor of shape [total_tokens, top_k] representing the top-k expert indices for each token. - block_size: The block size used in block matrix multiplication. - num_experts: The total number of experts. Returns: - sorted_token_ids: A tensor containing the sorted token indices according to their allocated expert. - expert_ids: A tensor indicating the assigned expert index for each block. - num_tokens_post_padded: The total number of tokens after padding, ensuring divisibility by block_size. This function pads the number of tokens that each expert needs to process so that it is divisible by block_size. Padding ensures that during block matrix multiplication, the dimensions align correctly. Example: Given topk_ids = [[2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 2, 3]], block_size = 4, and num_experts = 4: - We initially have 12 tokens (after repeating 'top_k' times) and 4 experts, with each expert needing to process 3 tokens. - As block_size is 4, we pad 1 token for each expert. - First, flatten topk_ids to [2, 3, 4, 1, 2, 4, 1, 3, 4, 1, 2, 3]. - Then append padding tokens [12, 12, 12, 12] for each block. - After sorting by expert index, we obtain token_ids [3, 6, 9, 12, 0, 4, 10, 12, 1, 7, 11, 12, 2, 5, 8, 12]. Tokens 12 are non-existent (padding) and are ignored in the subsequent matrix multiplication. - The padding ensures that the total number of tokens is now divisible by block_size for proper block matrix operations. """ max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) sorted_ids = torch.empty((max_num_tokens_padded, ), dtype=torch.int32, device=topk_ids.device) # sorted_ids.fill_(topk_ids.numel()) max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size) expert_ids = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) token_nums = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, expert_ids, token_nums, num_tokens_post_pad) return sorted_ids, expert_ids, token_nums, num_tokens_post_pad def invoke_fused_moe_kernel(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, A_scale: Optional[torch.Tensor], B_scale: Optional[torch.Tensor], topk_weights: torch.Tensor, topk_ids: torch.Tensor, sorted_token_ids: torch.Tensor, expert_ids: torch.Tensor, token_nums: torch.Tensor, num_tokens_post_padded: torch.Tensor, mul_routed_weight: bool, top_k: int, config: Dict[str, Any], compute_type: tl.dtype, use_fp8_w8a8: bool, use_int8_w8a16: bool) -> None: assert topk_weights.stride(1) == 1 assert sorted_token_ids.stride(0) == 1 if use_fp8_w8a8: A, A_scale = moe_kernels.scaled_fp8_quant(A, A_scale) assert B_scale is not None elif use_int8_w8a16: assert B_scale is not None else: assert A_scale is None assert B_scale is None if not FUSED_MOE_PERSISTENT: grid = lambda META: (triton.cdiv(sorted_token_ids.shape[0], META[ "BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]), ) fused_moe_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), B_scale.stride(0) if B_scale is not None and use_int8_w8a16 else 0, B_scale.stride(1) if B_scale is not None and use_int8_w8a16 else 0, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) else: NUM_SMS = torch.cuda.get_device_properties("cuda").multi_processor_count * 2 grid = lambda META: (min( NUM_SMS, triton.cdiv(sorted_token_ids.shape[0], META["BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]) ), ) fused_moe_persistent_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), NUM_SMS=NUM_SMS, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8=use_fp8_w8a8, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) def get_config_file_name(E: int, N: int, dtype: Optional[str]) -> str: # device_name = current_platform.get_device_name().replace(" ", "_") device_name = 'AMD_Instinct_MI308X_OAM' # TODO: need to update dtype_selector = "" if not dtype else f",dtype={dtype}" return f"E={E},N={N},device_name={device_name}{dtype_selector}.json" @functools.lru_cache def get_moe_configs(E: int, N: int, dtype: Optional[str]) -> Optional[Dict[int, Any]]: """ Return optimized configurations for the fused MoE kernel. The return value will be a dictionary that maps an irregular grid of batch sizes to configurations of the fused_moe kernel. To evaluate the kernel on a given batch size bs, the closest batch size in the grid should be picked and the associated configuration chosen to invoke the kernel. """ # First look up if an optimized configuration is available in the configs # directory json_file_name = get_config_file_name(E, N, dtype) config_file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "configs", json_file_name) if os.path.exists(config_file_path): with open(config_file_path) as f: logger.info("Using configuration from %s for MoE layer.", config_file_path) # If a configuration has been found, return it return {int(key): val for key, val in json.load(f).items()} # If no optimized configuration is available, we will use the default # configuration logger.info("---> MOE tuned file not found at %s",config_file_path) return None def get_default_config( M: int, E: int, N: int, K: int, topk: int, dtype: Optional[str], is_marlin: bool, ) -> Dict[str, int]: config = { 'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 8 } # A heuristic: fused marlin works faster with this config for small M if M <= E or (is_marlin and M <= 32): config = { 'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 1 } return config def try_get_optimal_moe_config( w1_shape: Tuple[int, ...], w2_shape: Tuple[int, ...], top_k: int, dtype: Optional[str], M: int, override_config: Optional[Dict[str, Any]] = None, is_marlin: bool = False, ): if override_config: config = override_config else: # First try to load optimal config from the file E, _, N = w2_shape configs = get_moe_configs(E, N, dtype) if configs: # If an optimal configuration map has been found, look up the # optimal config config = configs[min(configs.keys(), key=lambda x: abs(x - M))] else: # Else use the default config config = get_default_config(M, E, N, w1_shape[2], top_k, dtype, is_marlin) return config def fused_topk( hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, ): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") M, _ = hidden_states.shape topk_weights = torch.empty(M, topk, dtype=torch.float32, device=hidden_states.device) topk_ids = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) token_expert_indicies = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) moe_kernels.topk_softmax( topk_weights, topk_ids, token_expert_indicies, gating_output.float(), # TODO(woosuk): Optimize this. renormalize ) del token_expert_indicies # Not used. Will be used in the future. # if renormalize: # topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights, topk_ids # This is used by the Deepseek-V2 model def grouped_topk(hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, num_expert_group: int = 0, topk_group: int = 0): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") scores = torch.softmax(gating_output, dim=-1) num_token = scores.shape[0] group_scores = scores.view(num_token, num_expert_group, -1).max(dim=-1).values # [n, n_group] group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1] # [n, top_k_group] group_mask = torch.zeros_like(group_scores) # [n, n_group] group_mask.scatter_(1, group_idx, 1) # [n, n_group] score_mask = group_mask.unsqueeze(-1).expand( num_token, num_expert_group, scores.shape[-1] // num_expert_group).reshape(num_token, -1) # [n, e] tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e] topk_weights, topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False) if renormalize: topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights.to(torch.float32), topk_ids.to(torch.int32) def get_config_dtype_str(dtype: torch.dtype, use_int8_w8a16: Optional[bool] = False, use_fp8_w8a8: Optional[bool] = False): if use_fp8_w8a8: return "fp8_w8a8" elif use_int8_w8a16: return "int8_w8a16" elif dtype == torch.float: # avoiding cases where kernel fails when float32 MoE # use fp16/bfloat16 configs return "float32" return None def fused_experts(hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None): # Check constraints. assert hidden_states.shape[1] == w1.shape[2] - padding_size, "Hidden size mismatch" assert topk_weights.shape == topk_ids.shape, "topk shape mismatch" assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" assert w1.is_contiguous(), "Expert weights1 must be contiguous" assert w2.is_contiguous(), "Expert weights2 must be contiguous" assert hidden_states.dtype in [ torch.float32, torch.float16, torch.bfloat16 ] num_tokens, _ = hidden_states.shape E, N, _ = w1.shape # We execute the fused_moe kernel in chunks to circumvent this issue: # https://github.com/vllm-project/vllm/issues/5938 CHUNK_SIZE = VLLM_FUSED_MOE_CHUNK_SIZE M = min(num_tokens, CHUNK_SIZE) config_dtype = get_config_dtype_str(use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, dtype=hidden_states.dtype) get_config_func = functools.partial( try_get_optimal_moe_config, w1.shape, (w2.shape[0], w2.shape[1], w2.shape[2] - padding_size), topk_ids.shape[1], config_dtype, override_config=override_config, ) config = get_config_func(M) intermediate_cache1 = torch.empty((M, topk_ids.shape[1], N), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache2 = torch.empty((M * topk_ids.shape[1], N // 2), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache3 = torch.empty((M, topk_ids.shape[1], w2.shape[1]), device=hidden_states.device, dtype=hidden_states.dtype) compute_type = (tl.bfloat16 if hidden_states.dtype == torch.bfloat16 else tl.float16) if inplace: out_hidden_states = hidden_states else: out_hidden_states = torch.empty_like(hidden_states) # print("init config:", config) for chunk in range((num_tokens // CHUNK_SIZE) + 1): begin_chunk_idx, end_chunk_idx = (chunk * CHUNK_SIZE, min((chunk + 1) * CHUNK_SIZE, num_tokens)) curr_hidden_states = hidden_states[begin_chunk_idx:end_chunk_idx] tokens_in_chunk, _ = curr_hidden_states.shape if tokens_in_chunk == 0: break if tokens_in_chunk < CHUNK_SIZE and chunk > 0: # Adjust the intermediate cache size and config for the last # chunk. Note that in most cases we only have one chunk # so the cache size and config are already set correctly and # do not need to be adjusted. intermediate_cache1 = intermediate_cache1[:tokens_in_chunk] intermediate_cache2 = intermediate_cache2[:tokens_in_chunk] intermediate_cache3 = intermediate_cache3[:tokens_in_chunk] config = get_config_func(tokens_in_chunk) # print("inside config:", config) curr_topk_ids = topk_ids[begin_chunk_idx:end_chunk_idx] curr_topk_weights = topk_weights[begin_chunk_idx:end_chunk_idx] sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded = ( moe_align_block_size(curr_topk_ids, config['BLOCK_SIZE_M'], E)) invoke_fused_moe_kernel(curr_hidden_states, w1, intermediate_cache1, a1_scale, w1_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, False, topk_ids.shape[1], config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, N)) invoke_fused_moe_kernel(intermediate_cache2, w2, intermediate_cache3, a2_scale, w2_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, True, 1, config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.moe_sum(intermediate_cache3.view(*intermediate_cache3.shape), out_hidden_states[begin_chunk_idx:end_chunk_idx]) return out_hidden_states def fused_moe( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_grouped_topk: bool = False, num_expert_group: Optional[int] = None, topk_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ This function computes a Mixture of Experts (MoE) layer using two sets of weights, w1 and w2, and top-k gating mechanism. Parameters: - hidden_states (torch.Tensor): The input tensor to the MoE layer. - w1 (torch.Tensor): The first set of expert weights. - w2 (torch.Tensor): The second set of expert weights. - gating_output (torch.Tensor): The output of the gating operation (before softmax). - topk (int): The number of top-k experts to select. - renormalize (bool): If True, renormalize the top-k weights to sum to 1. - inplace (bool): If True, perform the operation in-place. Defaults to False. - override_config (Optional[Dict[str, Any]]): Optional override for the kernel configuration. - num_expert_group: Optional[int]: additional parameter for grouped_topk - topk_group: Optional[int]: additional parameter for grouped_topk - use_grouped_topk: If True, use grouped_topk instead of fused_topk note: Deepseekv2 model uses grouped_topk - use_fp8_w8a8 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - use_int8_w8a16 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - w1_scale (Optional[torch.Tensor]): Optional scale to be used for w1. - w2_scale (Optional[torch.Tensor]): Optional scale to be used for w2. Returns: - torch.Tensor: The output tensor after applying the MoE layer. """ # Check constraints. assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" if use_grouped_topk: assert num_expert_group is not None and topk_group is not None topk_weights, topk_ids = grouped_topk(hidden_states, gating_output, topk, renormalize, num_expert_group, topk_group) elif custom_routing_function is None: topk_weights, topk_ids = fused_topk(hidden_states, gating_output, topk, renormalize) else: topk_weights, topk_ids = custom_routing_function( hidden_states, gating_output, topk, renormalize) return fused_experts(hidden_states, w1, w2, topk_weights, topk_ids, inplace=inplace, override_config=override_config, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale) def fused_experts_ck( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ): block_m = 32 tokens = hidden_states.shape[0] experts = w1.shape[0] topk = topk_ids.shape[1] out_hidden_states = torch.empty_like(hidden_states) max_num_tokens_padded = topk * tokens + experts * block_m - topk sorted_token_ids = torch.empty( (max_num_tokens_padded,), dtype=torch.int32, device=topk_ids.device ) sorted_weight = torch.empty( (max_num_tokens_padded,), dtype=topk_weights.dtype, device=topk_ids.device ) max_num_m_blocks = math.floor((max_num_tokens_padded + block_m - 1) / block_m) sorted_expert_ids = torch.empty( (max_num_m_blocks,), dtype=torch.int32, device=topk_ids.device ) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_fused_experts_ck( hidden_states, w1, w2, topk_weights, topk_ids, w1_scale, w2_scale, a1_scale, a2_scale, sorted_token_ids, sorted_weight, sorted_expert_ids, num_tokens_post_pad, out_hidden_states, 32, (use_fp8_w8a8 or use_int8_w8a16), 0) return out_hidden_states
@triton.jit def fused_moe_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, token_nums_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, stride_bse, stride_bsn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Map program ids `pid` to the block of C it should compute. # This is done in a grouped ordering to promote L2 data reuse. pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m # ---------------------------------------------------------- # Create pointers for the first blocks of A and B. # We will advance this pointer as we move in the K direction # and accumulate # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers block_token_num = tl.load(token_nums_ptr + pid_m) num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return blk_m_range = tl.arange(0, BLOCK_SIZE_M) token_mask = blk_m_range < block_token_num offs_token_id = pid_m * BLOCK_SIZE_M + blk_m_range offs_token = tl.load(sorted_token_ids_ptr + offs_token_id, mask=token_mask) # token_mask = offs_token < num_valid_tokens offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) if use_int8_w8a16: b_scale_ptrs = b_scale_ptr + off_experts * stride_bse + offs_bn[ None, :] * stride_bsn b_scale = tl.load(b_scale_ptrs) if use_fp8_w8a8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) # ----------------------------------------------------------- # Iterate to compute a block of the C matrix. # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block # of fp32 values for higher accuracy. # `accumulator` will be converted back to fp16 after the loop. accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) # We accumulate along the K dimension. if use_int8_w8a16: accumulator = tl.dot(a, b.to(compute_type), acc=accumulator) elif use_fp8_w8a8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator = tl.dot(a, b, acc=accumulator) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_int8_w8a16: accumulator = (accumulator * b_scale).to(compute_type) elif use_fp8_w8a8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[ None, :] c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) @triton.heuristics({ 'EVEN_K': lambda args: args['K'] % args['BLOCK_SIZE_K'] == 0, })
HaiShaw/ater
ater/fused_moe.py
https://github.com/HaiShaw/ater/blob/32eea1253ba59b0685ec81774dc8f64712859fe9/ater/fused_moe.py
"""Fused MoE kernel.""" import functools import json import os from typing import Any, Callable, Dict, Optional, Tuple import math import torch import triton import triton.language as tl import ater as moe_kernels logger = moe_kernels.getLogger() VLLM_MOE_PADDING = bool(int(os.getenv("VLLM_MOE_PADDING", "1"))) FUSED_MOE_PERSISTENT = bool(int(os.getenv("FUSED_MOE_PERSISTENT", "0"))) ENABLE_MOE_LDS_BYPASS = bool(int(os.getenv("ENABLE_MOE_LDS_BYPASS", "1"))) print(f'{FUSED_MOE_PERSISTENT=}, {ENABLE_MOE_LDS_BYPASS=}, {VLLM_MOE_PADDING=}') VLLM_FUSED_MOE_CHUNK_SIZE = 65536 padding_size = 128 if VLLM_MOE_PADDING else 0 @triton.jit def fused_moe_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, token_nums_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, stride_bse, stride_bsn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8_w8a8: tl.constexpr, use_int8_w8a16: tl.constexpr): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Map program ids `pid` to the block of C it should compute. # This is done in a grouped ordering to promote L2 data reuse. pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((pid % num_pid_in_group) % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m # ---------------------------------------------------------- # Create pointers for the first blocks of A and B. # We will advance this pointer as we move in the K direction # and accumulate # `a_ptrs` is a block of [BLOCK_SIZE_M, BLOCK_SIZE_K] pointers # `b_ptrs` is a block of [BLOCK_SIZE_K, BLOCK_SIZE_N] pointers block_token_num = tl.load(token_nums_ptr + pid_m) num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) if pid_m * BLOCK_SIZE_M >= num_tokens_post_padded: return blk_m_range = tl.arange(0, BLOCK_SIZE_M) token_mask = blk_m_range < block_token_num offs_token_id = pid_m * BLOCK_SIZE_M + blk_m_range offs_token = tl.load(sorted_token_ids_ptr + offs_token_id, mask=token_mask) # token_mask = offs_token < num_valid_tokens offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) if use_int8_w8a16: b_scale_ptrs = b_scale_ptr + off_experts * stride_bse + offs_bn[ None, :] * stride_bsn b_scale = tl.load(b_scale_ptrs) if use_fp8_w8a8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) # ----------------------------------------------------------- # Iterate to compute a block of the C matrix. # We accumulate into a `[BLOCK_SIZE_M, BLOCK_SIZE_N]` block # of fp32 values for higher accuracy. # `accumulator` will be converted back to fp16 after the loop. accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. a = tl.load(a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0) b = tl.load(b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0) # We accumulate along the K dimension. if use_int8_w8a16: accumulator = tl.dot(a, b.to(compute_type), acc=accumulator) elif use_fp8_w8a8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator = tl.dot(a, b, acc=accumulator) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_int8_w8a16: accumulator = (accumulator * b_scale).to(compute_type) elif use_fp8_w8a8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[ None, :] c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) @triton.heuristics({ 'EVEN_K': lambda args: args['K'] % args['BLOCK_SIZE_K'] == 0, }) @triton.jit def fused_moe_persistent_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, EVEN_K: tl.constexpr, NUM_SMS: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8: tl.constexpr, ): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. This is the persistent version of the fused_moe kernel. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Simply compute how many iterations each persistent block needs to do start_pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) # num_tiles = num_pid_m * num_pid_n tile_id = start_pid offs_k = tl.arange(0, BLOCK_SIZE_K) # offs_token = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int32) # token_mask = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int1) # Load tile-invariant runtime constant num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) # Compute how many tiles are outside the padding region num_pid_in_group = GROUP_SIZE_M * num_pid_n pid_m = 0 tile_id2 = start_pid - NUM_SMS num_valid_tiles = -1 while pid_m * BLOCK_SIZE_M < num_tokens_post_padded: num_valid_tiles += 1 tile_id2 += NUM_SMS group_id = tile_id2 // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id2 % num_pid_in_group) % group_size_m) for _ in range(0, num_valid_tiles): if GROUP_SIZE_M == 1: pid_m = tile_id // num_pid_n pid_n = tile_id % num_pid_n else: group_id = tile_id // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id % num_pid_in_group) % group_size_m) pid_n = (tile_id % num_pid_in_group) // group_size_m # Compute the mask offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens # Compute the A pointer a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) # Compute the B pointer offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = (b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)) if use_fp8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. if EVEN_K: a = tl.load(a_ptrs, mask=token_mask[:, None], other=0.0) b = tl.load(b_ptrs) else: a = tl.load( a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0 ) b = tl.load( b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0 ) # We accumulate along the K dimension. if use_fp8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator += tl.dot(a, b) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_fp8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = (c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]) c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) # advance tile_id tile_id += NUM_SMS def moe_align_block_size( topk_ids: torch.Tensor, block_size: int, num_experts: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Aligns the token distribution across experts to be compatible with block size for matrix multiplication. Parameters: - topk_ids: A tensor of shape [total_tokens, top_k] representing the top-k expert indices for each token. - block_size: The block size used in block matrix multiplication. - num_experts: The total number of experts. Returns: - sorted_token_ids: A tensor containing the sorted token indices according to their allocated expert. - expert_ids: A tensor indicating the assigned expert index for each block. - num_tokens_post_padded: The total number of tokens after padding, ensuring divisibility by block_size. This function pads the number of tokens that each expert needs to process so that it is divisible by block_size. Padding ensures that during block matrix multiplication, the dimensions align correctly. Example: Given topk_ids = [[2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 2, 3]], block_size = 4, and num_experts = 4: - We initially have 12 tokens (after repeating 'top_k' times) and 4 experts, with each expert needing to process 3 tokens. - As block_size is 4, we pad 1 token for each expert. - First, flatten topk_ids to [2, 3, 4, 1, 2, 4, 1, 3, 4, 1, 2, 3]. - Then append padding tokens [12, 12, 12, 12] for each block. - After sorting by expert index, we obtain token_ids [3, 6, 9, 12, 0, 4, 10, 12, 1, 7, 11, 12, 2, 5, 8, 12]. Tokens 12 are non-existent (padding) and are ignored in the subsequent matrix multiplication. - The padding ensures that the total number of tokens is now divisible by block_size for proper block matrix operations. """ max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) sorted_ids = torch.empty((max_num_tokens_padded, ), dtype=torch.int32, device=topk_ids.device) # sorted_ids.fill_(topk_ids.numel()) max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size) expert_ids = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) token_nums = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, expert_ids, token_nums, num_tokens_post_pad) return sorted_ids, expert_ids, token_nums, num_tokens_post_pad def invoke_fused_moe_kernel(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, A_scale: Optional[torch.Tensor], B_scale: Optional[torch.Tensor], topk_weights: torch.Tensor, topk_ids: torch.Tensor, sorted_token_ids: torch.Tensor, expert_ids: torch.Tensor, token_nums: torch.Tensor, num_tokens_post_padded: torch.Tensor, mul_routed_weight: bool, top_k: int, config: Dict[str, Any], compute_type: tl.dtype, use_fp8_w8a8: bool, use_int8_w8a16: bool) -> None: assert topk_weights.stride(1) == 1 assert sorted_token_ids.stride(0) == 1 if use_fp8_w8a8: A, A_scale = moe_kernels.scaled_fp8_quant(A, A_scale) assert B_scale is not None elif use_int8_w8a16: assert B_scale is not None else: assert A_scale is None assert B_scale is None if not FUSED_MOE_PERSISTENT: grid = lambda META: (triton.cdiv(sorted_token_ids.shape[0], META[ "BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]), ) fused_moe_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), B_scale.stride(0) if B_scale is not None and use_int8_w8a16 else 0, B_scale.stride(1) if B_scale is not None and use_int8_w8a16 else 0, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) else: NUM_SMS = torch.cuda.get_device_properties("cuda").multi_processor_count * 2 grid = lambda META: (min( NUM_SMS, triton.cdiv(sorted_token_ids.shape[0], META["BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]) ), ) fused_moe_persistent_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), NUM_SMS=NUM_SMS, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8=use_fp8_w8a8, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) def get_config_file_name(E: int, N: int, dtype: Optional[str]) -> str: # device_name = current_platform.get_device_name().replace(" ", "_") device_name = 'AMD_Instinct_MI308X_OAM' # TODO: need to update dtype_selector = "" if not dtype else f",dtype={dtype}" return f"E={E},N={N},device_name={device_name}{dtype_selector}.json" @functools.lru_cache def get_moe_configs(E: int, N: int, dtype: Optional[str]) -> Optional[Dict[int, Any]]: """ Return optimized configurations for the fused MoE kernel. The return value will be a dictionary that maps an irregular grid of batch sizes to configurations of the fused_moe kernel. To evaluate the kernel on a given batch size bs, the closest batch size in the grid should be picked and the associated configuration chosen to invoke the kernel. """ # First look up if an optimized configuration is available in the configs # directory json_file_name = get_config_file_name(E, N, dtype) config_file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "configs", json_file_name) if os.path.exists(config_file_path): with open(config_file_path) as f: logger.info("Using configuration from %s for MoE layer.", config_file_path) # If a configuration has been found, return it return {int(key): val for key, val in json.load(f).items()} # If no optimized configuration is available, we will use the default # configuration logger.info("---> MOE tuned file not found at %s",config_file_path) return None def get_default_config( M: int, E: int, N: int, K: int, topk: int, dtype: Optional[str], is_marlin: bool, ) -> Dict[str, int]: config = { 'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 8 } # A heuristic: fused marlin works faster with this config for small M if M <= E or (is_marlin and M <= 32): config = { 'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 1 } return config def try_get_optimal_moe_config( w1_shape: Tuple[int, ...], w2_shape: Tuple[int, ...], top_k: int, dtype: Optional[str], M: int, override_config: Optional[Dict[str, Any]] = None, is_marlin: bool = False, ): if override_config: config = override_config else: # First try to load optimal config from the file E, _, N = w2_shape configs = get_moe_configs(E, N, dtype) if configs: # If an optimal configuration map has been found, look up the # optimal config config = configs[min(configs.keys(), key=lambda x: abs(x - M))] else: # Else use the default config config = get_default_config(M, E, N, w1_shape[2], top_k, dtype, is_marlin) return config def fused_topk( hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, ): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") M, _ = hidden_states.shape topk_weights = torch.empty(M, topk, dtype=torch.float32, device=hidden_states.device) topk_ids = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) token_expert_indicies = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) moe_kernels.topk_softmax( topk_weights, topk_ids, token_expert_indicies, gating_output.float(), # TODO(woosuk): Optimize this. renormalize ) del token_expert_indicies # Not used. Will be used in the future. # if renormalize: # topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights, topk_ids # This is used by the Deepseek-V2 model def grouped_topk(hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, num_expert_group: int = 0, topk_group: int = 0): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") scores = torch.softmax(gating_output, dim=-1) num_token = scores.shape[0] group_scores = scores.view(num_token, num_expert_group, -1).max(dim=-1).values # [n, n_group] group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1] # [n, top_k_group] group_mask = torch.zeros_like(group_scores) # [n, n_group] group_mask.scatter_(1, group_idx, 1) # [n, n_group] score_mask = group_mask.unsqueeze(-1).expand( num_token, num_expert_group, scores.shape[-1] // num_expert_group).reshape(num_token, -1) # [n, e] tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e] topk_weights, topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False) if renormalize: topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights.to(torch.float32), topk_ids.to(torch.int32) def get_config_dtype_str(dtype: torch.dtype, use_int8_w8a16: Optional[bool] = False, use_fp8_w8a8: Optional[bool] = False): if use_fp8_w8a8: return "fp8_w8a8" elif use_int8_w8a16: return "int8_w8a16" elif dtype == torch.float: # avoiding cases where kernel fails when float32 MoE # use fp16/bfloat16 configs return "float32" return None def fused_experts(hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None): # Check constraints. assert hidden_states.shape[1] == w1.shape[2] - padding_size, "Hidden size mismatch" assert topk_weights.shape == topk_ids.shape, "topk shape mismatch" assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" assert w1.is_contiguous(), "Expert weights1 must be contiguous" assert w2.is_contiguous(), "Expert weights2 must be contiguous" assert hidden_states.dtype in [ torch.float32, torch.float16, torch.bfloat16 ] num_tokens, _ = hidden_states.shape E, N, _ = w1.shape # We execute the fused_moe kernel in chunks to circumvent this issue: # https://github.com/vllm-project/vllm/issues/5938 CHUNK_SIZE = VLLM_FUSED_MOE_CHUNK_SIZE M = min(num_tokens, CHUNK_SIZE) config_dtype = get_config_dtype_str(use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, dtype=hidden_states.dtype) get_config_func = functools.partial( try_get_optimal_moe_config, w1.shape, (w2.shape[0], w2.shape[1], w2.shape[2] - padding_size), topk_ids.shape[1], config_dtype, override_config=override_config, ) config = get_config_func(M) intermediate_cache1 = torch.empty((M, topk_ids.shape[1], N), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache2 = torch.empty((M * topk_ids.shape[1], N // 2), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache3 = torch.empty((M, topk_ids.shape[1], w2.shape[1]), device=hidden_states.device, dtype=hidden_states.dtype) compute_type = (tl.bfloat16 if hidden_states.dtype == torch.bfloat16 else tl.float16) if inplace: out_hidden_states = hidden_states else: out_hidden_states = torch.empty_like(hidden_states) # print("init config:", config) for chunk in range((num_tokens // CHUNK_SIZE) + 1): begin_chunk_idx, end_chunk_idx = (chunk * CHUNK_SIZE, min((chunk + 1) * CHUNK_SIZE, num_tokens)) curr_hidden_states = hidden_states[begin_chunk_idx:end_chunk_idx] tokens_in_chunk, _ = curr_hidden_states.shape if tokens_in_chunk == 0: break if tokens_in_chunk < CHUNK_SIZE and chunk > 0: # Adjust the intermediate cache size and config for the last # chunk. Note that in most cases we only have one chunk # so the cache size and config are already set correctly and # do not need to be adjusted. intermediate_cache1 = intermediate_cache1[:tokens_in_chunk] intermediate_cache2 = intermediate_cache2[:tokens_in_chunk] intermediate_cache3 = intermediate_cache3[:tokens_in_chunk] config = get_config_func(tokens_in_chunk) # print("inside config:", config) curr_topk_ids = topk_ids[begin_chunk_idx:end_chunk_idx] curr_topk_weights = topk_weights[begin_chunk_idx:end_chunk_idx] sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded = ( moe_align_block_size(curr_topk_ids, config['BLOCK_SIZE_M'], E)) invoke_fused_moe_kernel(curr_hidden_states, w1, intermediate_cache1, a1_scale, w1_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, False, topk_ids.shape[1], config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, N)) invoke_fused_moe_kernel(intermediate_cache2, w2, intermediate_cache3, a2_scale, w2_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, True, 1, config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.moe_sum(intermediate_cache3.view(*intermediate_cache3.shape), out_hidden_states[begin_chunk_idx:end_chunk_idx]) return out_hidden_states def fused_moe( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_grouped_topk: bool = False, num_expert_group: Optional[int] = None, topk_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ This function computes a Mixture of Experts (MoE) layer using two sets of weights, w1 and w2, and top-k gating mechanism. Parameters: - hidden_states (torch.Tensor): The input tensor to the MoE layer. - w1 (torch.Tensor): The first set of expert weights. - w2 (torch.Tensor): The second set of expert weights. - gating_output (torch.Tensor): The output of the gating operation (before softmax). - topk (int): The number of top-k experts to select. - renormalize (bool): If True, renormalize the top-k weights to sum to 1. - inplace (bool): If True, perform the operation in-place. Defaults to False. - override_config (Optional[Dict[str, Any]]): Optional override for the kernel configuration. - num_expert_group: Optional[int]: additional parameter for grouped_topk - topk_group: Optional[int]: additional parameter for grouped_topk - use_grouped_topk: If True, use grouped_topk instead of fused_topk note: Deepseekv2 model uses grouped_topk - use_fp8_w8a8 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - use_int8_w8a16 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - w1_scale (Optional[torch.Tensor]): Optional scale to be used for w1. - w2_scale (Optional[torch.Tensor]): Optional scale to be used for w2. Returns: - torch.Tensor: The output tensor after applying the MoE layer. """ # Check constraints. assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" if use_grouped_topk: assert num_expert_group is not None and topk_group is not None topk_weights, topk_ids = grouped_topk(hidden_states, gating_output, topk, renormalize, num_expert_group, topk_group) elif custom_routing_function is None: topk_weights, topk_ids = fused_topk(hidden_states, gating_output, topk, renormalize) else: topk_weights, topk_ids = custom_routing_function( hidden_states, gating_output, topk, renormalize) return fused_experts(hidden_states, w1, w2, topk_weights, topk_ids, inplace=inplace, override_config=override_config, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale) def fused_experts_ck( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ): block_m = 32 tokens = hidden_states.shape[0] experts = w1.shape[0] topk = topk_ids.shape[1] out_hidden_states = torch.empty_like(hidden_states) max_num_tokens_padded = topk * tokens + experts * block_m - topk sorted_token_ids = torch.empty( (max_num_tokens_padded,), dtype=torch.int32, device=topk_ids.device ) sorted_weight = torch.empty( (max_num_tokens_padded,), dtype=topk_weights.dtype, device=topk_ids.device ) max_num_m_blocks = math.floor((max_num_tokens_padded + block_m - 1) / block_m) sorted_expert_ids = torch.empty( (max_num_m_blocks,), dtype=torch.int32, device=topk_ids.device ) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_fused_experts_ck( hidden_states, w1, w2, topk_weights, topk_ids, w1_scale, w2_scale, a1_scale, a2_scale, sorted_token_ids, sorted_weight, sorted_expert_ids, num_tokens_post_pad, out_hidden_states, 32, (use_fp8_w8a8 or use_int8_w8a16), 0) return out_hidden_states
@triton.jit def fused_moe_persistent_kernel( # Pointers to matrices a_ptr, b_ptr, c_ptr, a_scale_ptr, b_scale_ptr, topk_weights_ptr, sorted_token_ids_ptr, expert_ids_ptr, num_tokens_post_padded_ptr, # Matrix dimensions N, K, EM, num_valid_tokens, # The stride variables represent how much to increase the ptr by when # moving by 1 element in a particular dimension. E.g. `stride_am` is # how much to increase `a_ptr` by to get the element one row down # (A has M rows). stride_am, stride_ak, stride_be, stride_bk, stride_bn, stride_cm, stride_cn, # Meta-parameters BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr, GROUP_SIZE_M: tl.constexpr, EVEN_K: tl.constexpr, NUM_SMS: tl.constexpr, MUL_ROUTED_WEIGHT: tl.constexpr, top_k: tl.constexpr, compute_type: tl.constexpr, use_fp8: tl.constexpr, ): """ Implements the fused computation for a Mixture of Experts (MOE) using token and expert matrices. This is the persistent version of the fused_moe kernel. Key Parameters: - A: The input tensor representing tokens with shape (*, K), where '*' can be any shape representing batches and K is the feature dimension of each token. - B: The stacked MOE weight tensor with shape (E, N, K), where E is the number of experts, K is the input feature dimension, and N is the output feature dimension. - C: The output cache tensor with shape (M, topk, N), where M is the total number of tokens post padding, topk is the number of times each token is repeated, and N is the output feature dimension. - sorted_token_ids: A tensor containing the sorted indices of tokens, repeated topk times and arranged by the expert index they are assigned to. - expert_ids: A tensor containing the indices of the expert for each block. It determines which expert matrix from B should be used for each block in A. This kernel performs the multiplication of a token by its corresponding expert matrix as determined by `expert_ids`. The sorting of `sorted_token_ids` by expert index and padding ensures divisibility by BLOCK_SIZE_M, which is necessary to maintain consistency in block matrix multiplication across different blocks processed by the same expert. """ # ----------------------------------------------------------- # Simply compute how many iterations each persistent block needs to do start_pid = tl.program_id(axis=0) num_pid_m = tl.cdiv(EM, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) # num_tiles = num_pid_m * num_pid_n tile_id = start_pid offs_k = tl.arange(0, BLOCK_SIZE_K) # offs_token = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int32) # token_mask = tl.zeros((BLOCK_SIZE_M,), dtype=tl.int1) # Load tile-invariant runtime constant num_tokens_post_padded = tl.load(num_tokens_post_padded_ptr) # Compute how many tiles are outside the padding region num_pid_in_group = GROUP_SIZE_M * num_pid_n pid_m = 0 tile_id2 = start_pid - NUM_SMS num_valid_tiles = -1 while pid_m * BLOCK_SIZE_M < num_tokens_post_padded: num_valid_tiles += 1 tile_id2 += NUM_SMS group_id = tile_id2 // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id2 % num_pid_in_group) % group_size_m) for _ in range(0, num_valid_tiles): if GROUP_SIZE_M == 1: pid_m = tile_id // num_pid_n pid_n = tile_id % num_pid_n else: group_id = tile_id // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + ((tile_id % num_pid_in_group) % group_size_m) pid_n = (tile_id % num_pid_in_group) // group_size_m # Compute the mask offs_token_id = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_token = tl.load(sorted_token_ids_ptr + offs_token_id) token_mask = offs_token < num_valid_tokens # Compute the A pointer a_ptrs = a_ptr + (offs_token[:, None] // top_k * stride_am + offs_k[None, :] * stride_ak) # Compute the B pointer offs_bn = (pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)) % N off_experts = tl.load(expert_ids_ptr + pid_m) b_ptrs = (b_ptr + off_experts * stride_be + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn)) if use_fp8: a_scale = tl.load(a_scale_ptr) b_scale = tl.load(b_scale_ptr + off_experts) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, tl.cdiv(K, BLOCK_SIZE_K)): # Load the next block of A and B, generate a mask by checking the # K dimension. if EVEN_K: a = tl.load(a_ptrs, mask=token_mask[:, None], other=0.0) b = tl.load(b_ptrs) else: a = tl.load( a_ptrs, mask=token_mask[:, None] & (offs_k[None, :] < K - k * BLOCK_SIZE_K), other=0.0 ) b = tl.load( b_ptrs, mask=offs_k[:, None] < K - k * BLOCK_SIZE_K, other=0.0 ) # We accumulate along the K dimension. if use_fp8: accumulator = tl.dot(a, b, acc=accumulator) else: accumulator += tl.dot(a, b) # Advance the ptrs to the next K block. a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk if MUL_ROUTED_WEIGHT: moe_weight = tl.load(topk_weights_ptr + offs_token, mask=token_mask, other=0) accumulator = accumulator * moe_weight[:, None] if use_fp8: accumulator = (accumulator * a_scale * b_scale).to(compute_type) else: accumulator = accumulator.to(compute_type) # ----------------------------------------------------------- # Write back the block of the output offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = (c_ptr + stride_cm * offs_token[:, None] + stride_cn * offs_cn[None, :]) c_mask = token_mask[:, None] & (offs_cn[None, :] < N) tl.store(c_ptrs, accumulator, mask=c_mask) # advance tile_id tile_id += NUM_SMS def moe_align_block_size( topk_ids: torch.Tensor, block_size: int, num_experts: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Aligns the token distribution across experts to be compatible with block size for matrix multiplication. Parameters: - topk_ids: A tensor of shape [total_tokens, top_k] representing the top-k expert indices for each token. - block_size: The block size used in block matrix multiplication. - num_experts: The total number of experts. Returns: - sorted_token_ids: A tensor containing the sorted token indices according to their allocated expert. - expert_ids: A tensor indicating the assigned expert index for each block. - num_tokens_post_padded: The total number of tokens after padding, ensuring divisibility by block_size. This function pads the number of tokens that each expert needs to process so that it is divisible by block_size. Padding ensures that during block matrix multiplication, the dimensions align correctly. Example: Given topk_ids = [[2, 3, 4], [1, 2, 4], [1, 3, 4], [1, 2, 3]], block_size = 4, and num_experts = 4: - We initially have 12 tokens (after repeating 'top_k' times) and 4 experts, with each expert needing to process 3 tokens. - As block_size is 4, we pad 1 token for each expert. - First, flatten topk_ids to [2, 3, 4, 1, 2, 4, 1, 3, 4, 1, 2, 3]. - Then append padding tokens [12, 12, 12, 12] for each block. - After sorting by expert index, we obtain token_ids [3, 6, 9, 12, 0, 4, 10, 12, 1, 7, 11, 12, 2, 5, 8, 12]. Tokens 12 are non-existent (padding) and are ignored in the subsequent matrix multiplication. - The padding ensures that the total number of tokens is now divisible by block_size for proper block matrix operations. """ max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) sorted_ids = torch.empty((max_num_tokens_padded, ), dtype=torch.int32, device=topk_ids.device) # sorted_ids.fill_(topk_ids.numel()) max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size) expert_ids = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) token_nums = torch.empty((max_num_m_blocks, ), dtype=torch.int32, device=topk_ids.device) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, expert_ids, token_nums, num_tokens_post_pad) return sorted_ids, expert_ids, token_nums, num_tokens_post_pad def invoke_fused_moe_kernel(A: torch.Tensor, B: torch.Tensor, C: torch.Tensor, A_scale: Optional[torch.Tensor], B_scale: Optional[torch.Tensor], topk_weights: torch.Tensor, topk_ids: torch.Tensor, sorted_token_ids: torch.Tensor, expert_ids: torch.Tensor, token_nums: torch.Tensor, num_tokens_post_padded: torch.Tensor, mul_routed_weight: bool, top_k: int, config: Dict[str, Any], compute_type: tl.dtype, use_fp8_w8a8: bool, use_int8_w8a16: bool) -> None: assert topk_weights.stride(1) == 1 assert sorted_token_ids.stride(0) == 1 if use_fp8_w8a8: A, A_scale = moe_kernels.scaled_fp8_quant(A, A_scale) assert B_scale is not None elif use_int8_w8a16: assert B_scale is not None else: assert A_scale is None assert B_scale is None if not FUSED_MOE_PERSISTENT: grid = lambda META: (triton.cdiv(sorted_token_ids.shape[0], META[ "BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]), ) fused_moe_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), B_scale.stride(0) if B_scale is not None and use_int8_w8a16 else 0, B_scale.stride(1) if B_scale is not None and use_int8_w8a16 else 0, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) else: NUM_SMS = torch.cuda.get_device_properties("cuda").multi_processor_count * 2 grid = lambda META: (min( NUM_SMS, triton.cdiv(sorted_token_ids.shape[0], META["BLOCK_SIZE_M"]) * triton.cdiv(B.shape[1], META["BLOCK_SIZE_N"]) ), ) fused_moe_persistent_kernel[grid]( A, B, C, A_scale, B_scale, topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded, B.shape[1], B.shape[2] - padding_size, sorted_token_ids.shape[0], topk_ids.numel(), A.stride(0), A.stride(1), B.stride(0), B.stride(2), B.stride(1), C.stride(1), C.stride(2), NUM_SMS=NUM_SMS, MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, compute_type=compute_type, use_fp8=use_fp8_w8a8, **config, enable_moe_lds_bypass=ENABLE_MOE_LDS_BYPASS ) def get_config_file_name(E: int, N: int, dtype: Optional[str]) -> str: # device_name = current_platform.get_device_name().replace(" ", "_") device_name = 'AMD_Instinct_MI308X_OAM' # TODO: need to update dtype_selector = "" if not dtype else f",dtype={dtype}" return f"E={E},N={N},device_name={device_name}{dtype_selector}.json" @functools.lru_cache def get_moe_configs(E: int, N: int, dtype: Optional[str]) -> Optional[Dict[int, Any]]: """ Return optimized configurations for the fused MoE kernel. The return value will be a dictionary that maps an irregular grid of batch sizes to configurations of the fused_moe kernel. To evaluate the kernel on a given batch size bs, the closest batch size in the grid should be picked and the associated configuration chosen to invoke the kernel. """ # First look up if an optimized configuration is available in the configs # directory json_file_name = get_config_file_name(E, N, dtype) config_file_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "configs", json_file_name) if os.path.exists(config_file_path): with open(config_file_path) as f: logger.info("Using configuration from %s for MoE layer.", config_file_path) # If a configuration has been found, return it return {int(key): val for key, val in json.load(f).items()} # If no optimized configuration is available, we will use the default # configuration logger.info("---> MOE tuned file not found at %s",config_file_path) return None def get_default_config( M: int, E: int, N: int, K: int, topk: int, dtype: Optional[str], is_marlin: bool, ) -> Dict[str, int]: config = { 'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 8 } # A heuristic: fused marlin works faster with this config for small M if M <= E or (is_marlin and M <= 32): config = { 'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128, # reqd. for MOE shuffle 'BLOCK_SIZE_K': 128, # reqd. for MOE shuffle 'GROUP_SIZE_M': 1 } return config def try_get_optimal_moe_config( w1_shape: Tuple[int, ...], w2_shape: Tuple[int, ...], top_k: int, dtype: Optional[str], M: int, override_config: Optional[Dict[str, Any]] = None, is_marlin: bool = False, ): if override_config: config = override_config else: # First try to load optimal config from the file E, _, N = w2_shape configs = get_moe_configs(E, N, dtype) if configs: # If an optimal configuration map has been found, look up the # optimal config config = configs[min(configs.keys(), key=lambda x: abs(x - M))] else: # Else use the default config config = get_default_config(M, E, N, w1_shape[2], top_k, dtype, is_marlin) return config def fused_topk( hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, ): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") M, _ = hidden_states.shape topk_weights = torch.empty(M, topk, dtype=torch.float32, device=hidden_states.device) topk_ids = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) token_expert_indicies = torch.empty(M, topk, dtype=torch.int32, device=hidden_states.device) moe_kernels.topk_softmax( topk_weights, topk_ids, token_expert_indicies, gating_output.float(), # TODO(woosuk): Optimize this. renormalize ) del token_expert_indicies # Not used. Will be used in the future. # if renormalize: # topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights, topk_ids # This is used by the Deepseek-V2 model def grouped_topk(hidden_states: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, num_expert_group: int = 0, topk_group: int = 0): assert hidden_states.shape[0] == gating_output.shape[0], ( "Number of tokens mismatch") scores = torch.softmax(gating_output, dim=-1) num_token = scores.shape[0] group_scores = scores.view(num_token, num_expert_group, -1).max(dim=-1).values # [n, n_group] group_idx = torch.topk(group_scores, k=topk_group, dim=-1, sorted=False)[1] # [n, top_k_group] group_mask = torch.zeros_like(group_scores) # [n, n_group] group_mask.scatter_(1, group_idx, 1) # [n, n_group] score_mask = group_mask.unsqueeze(-1).expand( num_token, num_expert_group, scores.shape[-1] // num_expert_group).reshape(num_token, -1) # [n, e] tmp_scores = scores.masked_fill(~score_mask.bool(), 0.0) # [n, e] topk_weights, topk_ids = torch.topk(tmp_scores, k=topk, dim=-1, sorted=False) if renormalize: topk_weights = topk_weights / topk_weights.sum(dim=-1, keepdim=True) return topk_weights.to(torch.float32), topk_ids.to(torch.int32) def get_config_dtype_str(dtype: torch.dtype, use_int8_w8a16: Optional[bool] = False, use_fp8_w8a8: Optional[bool] = False): if use_fp8_w8a8: return "fp8_w8a8" elif use_int8_w8a16: return "int8_w8a16" elif dtype == torch.float: # avoiding cases where kernel fails when float32 MoE # use fp16/bfloat16 configs return "float32" return None def fused_experts(hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None): # Check constraints. assert hidden_states.shape[1] == w1.shape[2] - padding_size, "Hidden size mismatch" assert topk_weights.shape == topk_ids.shape, "topk shape mismatch" assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" assert w1.is_contiguous(), "Expert weights1 must be contiguous" assert w2.is_contiguous(), "Expert weights2 must be contiguous" assert hidden_states.dtype in [ torch.float32, torch.float16, torch.bfloat16 ] num_tokens, _ = hidden_states.shape E, N, _ = w1.shape # We execute the fused_moe kernel in chunks to circumvent this issue: # https://github.com/vllm-project/vllm/issues/5938 CHUNK_SIZE = VLLM_FUSED_MOE_CHUNK_SIZE M = min(num_tokens, CHUNK_SIZE) config_dtype = get_config_dtype_str(use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, dtype=hidden_states.dtype) get_config_func = functools.partial( try_get_optimal_moe_config, w1.shape, (w2.shape[0], w2.shape[1], w2.shape[2] - padding_size), topk_ids.shape[1], config_dtype, override_config=override_config, ) config = get_config_func(M) intermediate_cache1 = torch.empty((M, topk_ids.shape[1], N), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache2 = torch.empty((M * topk_ids.shape[1], N // 2), device=hidden_states.device, dtype=hidden_states.dtype) intermediate_cache3 = torch.empty((M, topk_ids.shape[1], w2.shape[1]), device=hidden_states.device, dtype=hidden_states.dtype) compute_type = (tl.bfloat16 if hidden_states.dtype == torch.bfloat16 else tl.float16) if inplace: out_hidden_states = hidden_states else: out_hidden_states = torch.empty_like(hidden_states) # print("init config:", config) for chunk in range((num_tokens // CHUNK_SIZE) + 1): begin_chunk_idx, end_chunk_idx = (chunk * CHUNK_SIZE, min((chunk + 1) * CHUNK_SIZE, num_tokens)) curr_hidden_states = hidden_states[begin_chunk_idx:end_chunk_idx] tokens_in_chunk, _ = curr_hidden_states.shape if tokens_in_chunk == 0: break if tokens_in_chunk < CHUNK_SIZE and chunk > 0: # Adjust the intermediate cache size and config for the last # chunk. Note that in most cases we only have one chunk # so the cache size and config are already set correctly and # do not need to be adjusted. intermediate_cache1 = intermediate_cache1[:tokens_in_chunk] intermediate_cache2 = intermediate_cache2[:tokens_in_chunk] intermediate_cache3 = intermediate_cache3[:tokens_in_chunk] config = get_config_func(tokens_in_chunk) # print("inside config:", config) curr_topk_ids = topk_ids[begin_chunk_idx:end_chunk_idx] curr_topk_weights = topk_weights[begin_chunk_idx:end_chunk_idx] sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded = ( moe_align_block_size(curr_topk_ids, config['BLOCK_SIZE_M'], E)) invoke_fused_moe_kernel(curr_hidden_states, w1, intermediate_cache1, a1_scale, w1_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, False, topk_ids.shape[1], config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.silu_and_mul(intermediate_cache2, intermediate_cache1.view(-1, N)) invoke_fused_moe_kernel(intermediate_cache2, w2, intermediate_cache3, a2_scale, w2_scale, curr_topk_weights, curr_topk_ids, sorted_token_ids, expert_ids, token_nums, num_tokens_post_padded, True, 1, config, compute_type=compute_type, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16) moe_kernels.moe_sum(intermediate_cache3.view(*intermediate_cache3.shape), out_hidden_states[begin_chunk_idx:end_chunk_idx]) return out_hidden_states def fused_moe( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, gating_output: torch.Tensor, topk: int, renormalize: bool, inplace: bool = False, override_config: Optional[Dict[str, Any]] = None, use_grouped_topk: bool = False, num_expert_group: Optional[int] = None, topk_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ) -> torch.Tensor: """ This function computes a Mixture of Experts (MoE) layer using two sets of weights, w1 and w2, and top-k gating mechanism. Parameters: - hidden_states (torch.Tensor): The input tensor to the MoE layer. - w1 (torch.Tensor): The first set of expert weights. - w2 (torch.Tensor): The second set of expert weights. - gating_output (torch.Tensor): The output of the gating operation (before softmax). - topk (int): The number of top-k experts to select. - renormalize (bool): If True, renormalize the top-k weights to sum to 1. - inplace (bool): If True, perform the operation in-place. Defaults to False. - override_config (Optional[Dict[str, Any]]): Optional override for the kernel configuration. - num_expert_group: Optional[int]: additional parameter for grouped_topk - topk_group: Optional[int]: additional parameter for grouped_topk - use_grouped_topk: If True, use grouped_topk instead of fused_topk note: Deepseekv2 model uses grouped_topk - use_fp8_w8a8 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - use_int8_w8a16 (bool): If True, use fp8 arithmetic to compute the inner products for w1 and w2. Defaults to False. - w1_scale (Optional[torch.Tensor]): Optional scale to be used for w1. - w2_scale (Optional[torch.Tensor]): Optional scale to be used for w2. Returns: - torch.Tensor: The output tensor after applying the MoE layer. """ # Check constraints. assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" if use_grouped_topk: assert num_expert_group is not None and topk_group is not None topk_weights, topk_ids = grouped_topk(hidden_states, gating_output, topk, renormalize, num_expert_group, topk_group) elif custom_routing_function is None: topk_weights, topk_ids = fused_topk(hidden_states, gating_output, topk, renormalize) else: topk_weights, topk_ids = custom_routing_function( hidden_states, gating_output, topk, renormalize) return fused_experts(hidden_states, w1, w2, topk_weights, topk_ids, inplace=inplace, override_config=override_config, use_fp8_w8a8=use_fp8_w8a8, use_int8_w8a16=use_int8_w8a16, w1_scale=w1_scale, w2_scale=w2_scale, a1_scale=a1_scale, a2_scale=a2_scale) def fused_experts_ck( hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, topk_weights: torch.Tensor, topk_ids: torch.Tensor, use_fp8_w8a8: bool = False, use_int8_w8a16: bool = False, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, a1_scale: Optional[torch.Tensor] = None, a2_scale: Optional[torch.Tensor] = None, ): block_m = 32 tokens = hidden_states.shape[0] experts = w1.shape[0] topk = topk_ids.shape[1] out_hidden_states = torch.empty_like(hidden_states) max_num_tokens_padded = topk * tokens + experts * block_m - topk sorted_token_ids = torch.empty( (max_num_tokens_padded,), dtype=torch.int32, device=topk_ids.device ) sorted_weight = torch.empty( (max_num_tokens_padded,), dtype=topk_weights.dtype, device=topk_ids.device ) max_num_m_blocks = math.floor((max_num_tokens_padded + block_m - 1) / block_m) sorted_expert_ids = torch.empty( (max_num_m_blocks,), dtype=torch.int32, device=topk_ids.device ) num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) moe_kernels.moe_fused_experts_ck( hidden_states, w1, w2, topk_weights, topk_ids, w1_scale, w2_scale, a1_scale, a2_scale, sorted_token_ids, sorted_weight, sorted_expert_ids, num_tokens_post_pad, out_hidden_states, 32, (use_fp8_w8a8 or use_int8_w8a16), 0) return out_hidden_states
CentML/Sylva
sylva/optimizer.py
https://github.com/CentML/Sylva/blob/8e6fa57c87babafe80623ac885bade397848b706/sylva/optimizer.py
# Copyright 2024 CentML Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import triton import triton.language as tl @torch.no_grad def batch_sparse_optimize(self, model, adam_buf, scope, block_size, m_masks): if not hasattr(self, "sparse_stats"): weights = [] weight_indices = [] block_x = [] block_y = [] total_nnz_blocks = 0 for n, m in model.named_modules(): if scope in n and m.__class__.__name__ == "BlockSparseLinear": weights.append(m.w0) num_nnz_blocks = torch.sum(m_masks[m]) total_nnz_blocks += num_nnz_blocks weight_indices += [len(weights) - 1 for _ in range(num_nnz_blocks)] for i in range(m_masks[m].size(0)): for j in range(m_masks[m].size(1)): if m_masks[m][i][j] == 1: block_x.append(i * block_size) block_y.append(j * block_size) self.sparse_stats = weights, weight_indices, block_x, block_y, total_nnz_blocks step_size = 0 alpha = 0 exp_avgs = [] denoms = [] for n, m in model.named_modules(): if scope in n and m.__class__.__name__ == "BlockSparseLinear": p = m.w1 step_size, exp_avg, denom, alpha = ( adam_buf[p]["step_size"], adam_buf[p]["exp_avg"], adam_buf[p]["denom"], adam_buf[p]["alpha"], ) block_num_bytes = block_size**2 * (16 // 8) # float 16 / 1 byte start_data_ptr = exp_avg[:, 0, :, :].data_ptr() exp_avgs += [ start_data_ptr + block_num_bytes * i for i in range(exp_avg.size(1)) ] start_data_ptr = denom[:, 0, :, :].data_ptr() denoms += [ start_data_ptr + block_num_bytes * i for i in range(denom.size(1)) ] weights, weight_indices, block_x, block_y, total_nnz_blocks = self.sparse_stats add( weights, weight_indices, block_x, block_y, step_size, exp_avgs, denoms, alpha, size_x=4096, size_y=4096, block_size=64, ) @triton.jit def _add( weights_ptr, weight_indices_ptr, step_size, exp_avgs_ptr, denoms_ptr, alpha, block_x_ptr, block_y_ptr, size_x, size_y, block_size: tl.constexpr, ): pid = tl.program_id(axis=0) weight_idx = tl.load(weight_indices_ptr + pid) weight_ptr = tl.load(weights_ptr + weight_idx).to(tl.pointer_type(tl.bfloat16)) blk_x = tl.load(block_x_ptr + pid) blk_y = tl.load(block_y_ptr + pid) exp_avg_ptr = tl.load(exp_avgs_ptr + pid).to(tl.pointer_type(tl.bfloat16)) denom_ptr = tl.load(denoms_ptr + pid).to(tl.pointer_type(tl.bfloat16)) for i in range(block_size): offsets = (blk_x + i) * size_y + blk_y + tl.arange(0, block_size) mask = offsets < size_x * size_y w = tl.load(weight_ptr + offsets, mask=mask) block_offsets = i * block_size + tl.arange(0, block_size) block_mask = block_offsets < block_size * block_size exp_avg = tl.load(exp_avg_ptr + block_offsets, mask=block_mask) denom = tl.load(denom_ptr + block_offsets, mask=block_mask) output = w + step_size * exp_avg / denom + w * alpha tl.store(weight_ptr + offsets, output, mask=mask) def add( weights, weight_indices, block_x, block_y, step_size, exp_avgs, denoms, alpha, size_x, size_y, block_size, ): num_nnz_blocks = len(block_x) grid = (num_nnz_blocks,) weights_ptr = torch.tensor( [w.data_ptr() for w in weights], device="cuda" ).contiguous() weight_indices_ptr = torch.tensor(weight_indices, device="cuda").contiguous() exp_avgs_ptr = torch.tensor(exp_avgs, device="cuda").contiguous() denoms_ptr = torch.tensor(denoms, device="cuda").contiguous() block_x_ptr = torch.tensor(block_x, device="cuda").contiguous() block_y_ptr = torch.tensor(block_y, device="cuda").contiguous() _add[grid]( weights_ptr, weight_indices_ptr, step_size, exp_avgs_ptr, denoms_ptr, alpha, block_x_ptr, block_y_ptr, size_x, size_y, block_size, ) return weights
@triton.jit def _add( weights_ptr, weight_indices_ptr, step_size, exp_avgs_ptr, denoms_ptr, alpha, block_x_ptr, block_y_ptr, size_x, size_y, block_size: tl.constexpr, ): pid = tl.program_id(axis=0) weight_idx = tl.load(weight_indices_ptr + pid) weight_ptr = tl.load(weights_ptr + weight_idx).to(tl.pointer_type(tl.bfloat16)) blk_x = tl.load(block_x_ptr + pid) blk_y = tl.load(block_y_ptr + pid) exp_avg_ptr = tl.load(exp_avgs_ptr + pid).to(tl.pointer_type(tl.bfloat16)) denom_ptr = tl.load(denoms_ptr + pid).to(tl.pointer_type(tl.bfloat16)) for i in range(block_size): offsets = (blk_x + i) * size_y + blk_y + tl.arange(0, block_size) mask = offsets < size_x * size_y w = tl.load(weight_ptr + offsets, mask=mask) block_offsets = i * block_size + tl.arange(0, block_size) block_mask = block_offsets < block_size * block_size exp_avg = tl.load(exp_avg_ptr + block_offsets, mask=block_mask) denom = tl.load(denom_ptr + block_offsets, mask=block_mask) output = w + step_size * exp_avg / denom + w * alpha tl.store(weight_ptr + offsets, output, mask=mask) def add( weights, weight_indices, block_x, block_y, step_size, exp_avgs, denoms, alpha, size_x, size_y, block_size, ): num_nnz_blocks = len(block_x) grid = (num_nnz_blocks,) weights_ptr = torch.tensor( [w.data_ptr() for w in weights], device="cuda" ).contiguous() weight_indices_ptr = torch.tensor(weight_indices, device="cuda").contiguous() exp_avgs_ptr = torch.tensor(exp_avgs, device="cuda").contiguous() denoms_ptr = torch.tensor(denoms, device="cuda").contiguous() block_x_ptr = torch.tensor(block_x, device="cuda").contiguous() block_y_ptr = torch.tensor(block_y, device="cuda").contiguous() _add[grid]( weights_ptr, weight_indices_ptr, step_size, exp_avgs_ptr, denoms_ptr, alpha, block_x_ptr, block_y_ptr, size_x, size_y, block_size, ) return weights
Gregory-Pereira/split_k_reduce
split_k_reduce.py
https://github.com/Gregory-Pereira/split_k_reduce/blob/2e5cd101541ef0e0305e05b754b827978d57e354/split_k_reduce.py
import torch import triton import triton.language as tl def get_cuda_autotune_config(): return [ triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), # Good config for fp8 inputs. triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 128, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4) ] def get_hip_autotune_config(): return [ triton.Config( {'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 1, 'waves_per_eu': 2}, num_warps=4, num_stages=2), triton.Config( {'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 16, 'GROUP_SIZE_M': 4, 'waves_per_eu': 2}, num_warps=8, num_stages=2), triton.Config( {'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 1, 'waves_per_eu': 2}, num_warps=8, num_stages=2), triton.Config( {'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8, 'waves_per_eu': 3}, num_warps=4, num_stages=2), triton.Config( {'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 1, 'waves_per_eu': 8}, num_warps=4, num_stages=2), ] @triton.jit def split_k_reduce_kernel( A_ptr, B_ptr, C_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, SPLIT_K: tl.constexpr ): pid_m = tl.program_id(0) pid_n = tl.program_id(1) pid_k = tl.program_id(2) offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) offs_k = pid_k * BLOCK_K + tl.arange(0, BLOCK_K) A = tl.load(A_ptr + (offs_m[:, None] * stride_am + offs_k[None, :] * stride_ak), mask=(offs_m[:, None] < M) & (offs_k[None, :] < K), other=0.0) B = tl.load(B_ptr + (offs_k[:, None] * stride_bk + offs_n[None, :] * stride_bn), mask=(offs_k[:, None] < K) & (offs_n[None, :] < N), other=0.0) partial = tl.dot(A, B) C_offset = offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn + pid_k * M * N tl.store(C_ptr + C_offset, partial, mask=(offs_m[:, None] < M) & (offs_n[None] < N)) # Problem sizes M, N, K = 1024, 1024, 1024 # Input matrices A = torch.randn((M, K), dtype=torch.float32, device="cuda") B = torch.randn((K, N), dtype=torch.float32, device="cuda") C = torch.zeros((M, N), dtype=torch.float32, device="cuda") # Strides stride_am, stride_ak = A.stride() stride_bk, stride_bn = B.stride() stride_cm, stride_cn = C.stride() # Kernel call grid = lambda META: ( (M + META["BLOCK_M"] - 1) // META["BLOCK_M"], (N + META["BLOCK_N"] - 1) // META["BLOCK_N"], META["SPLIT_K"] ) split_k_reduce_kernel[grid]( A.data_ptr(), B.data_ptr(), C.data_ptr(), M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn ) # Verify the result C_ref = torch.matmul(A, B) print("Max difference:", torch.max(torch.abs(C - C_ref)).item())
@triton.jit def split_k_reduce_kernel( A_ptr, B_ptr, C_ptr, M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_K: tl.constexpr, SPLIT_K: tl.constexpr ): pid_m = tl.program_id(0) pid_n = tl.program_id(1) pid_k = tl.program_id(2) offs_m = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = pid_n * BLOCK_N + tl.arange(0, BLOCK_N) offs_k = pid_k * BLOCK_K + tl.arange(0, BLOCK_K) A = tl.load(A_ptr + (offs_m[:, None] * stride_am + offs_k[None, :] * stride_ak), mask=(offs_m[:, None] < M) & (offs_k[None, :] < K), other=0.0) B = tl.load(B_ptr + (offs_k[:, None] * stride_bk + offs_n[None, :] * stride_bn), mask=(offs_k[:, None] < K) & (offs_n[None, :] < N), other=0.0) partial = tl.dot(A, B) C_offset = offs_m[:, None] * stride_cm + offs_n[None, :] * stride_cn + pid_k * M * N tl.store(C_ptr + C_offset, partial, mask=(offs_m[:, None] < M) & (offs_n[None] < N)) # Problem sizes M, N, K = 1024, 1024, 1024 # Input matrices A = torch.randn((M, K), dtype=torch.float32, device="cuda") B = torch.randn((K, N), dtype=torch.float32, device="cuda") C = torch.zeros((M, N), dtype=torch.float32, device="cuda") # Strides stride_am, stride_ak = A.stride() stride_bk, stride_bn = B.stride() stride_cm, stride_cn = C.stride() # Kernel call grid = lambda META: ( (M + META["BLOCK_M"] - 1) // META["BLOCK_M"], (N + META["BLOCK_N"] - 1) // META["BLOCK_N"], META["SPLIT_K"] ) split_k_reduce_kernel[grid]( A.data_ptr(), B.data_ptr(), C.data_ptr(), M, N, K, stride_am, stride_ak, stride_bk, stride_bn, stride_cm, stride_cn ) # Verify the result C_ref = torch.matmul(A, B) print("Max difference:", torch.max(torch.abs(C - C_ref)).item())
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans()
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16))
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None
JL-er/RWKV-PEFT
rwkvt/operator/rwkvop.py
https://github.com/JL-er/RWKV-PEFT/blob/5a311b2eb112cc361c451fc48bf16bea0a71c668/rwkvt/operator/rwkvop.py
from einops import rearrange import os, math, gc, importlib import torch ######################################################################################################## # CUDA Kernel ######################################################################################################## def RUN_CUDA_RWKV7g(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_RWKV7_INFCTX(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV6_STATE(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') def RUN_CUDA_RWKV5(): raise NotImplementedError('RUN_CUDA_RUN_KV not implemented') if os.environ["WKV"] == 'fla': if 'x070' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv7 import chunk_rwkv7 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_RWKV7_INFCTX(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state if os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64): # for State-tuning, infctx B,T,HC = w.shape C = HEAD_SIZE H = HC//C s = s.transpose(1, 2).expand(B,*s.shape) r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, state = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=s, output_final_state=True, head_first=False) return o, state else: def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64): #compatible with cuda implement B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] # when use w=, -exp(w) is not needed, w=-torch.exp(w), otherwise, use log_w = -torch.exp(w) o, _ = chunk_rwkv7(r=r, w=w, k=k, v=v, a=a, b=b, scale=1.0, initial_state=None, output_final_state=False, head_first=False) return o if 'x060' in os.environ["RWKV_MY_TESTING"]: from rwkvfla.ops.rwkv6 import chunk_rwkv6 if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o, state = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=True) x = rearrange(o, 'b h l d -> b l (h d)') return x, state elif os.environ["RWKV_TRAIN_TYPE"] == 'state': def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) s = s.transpose(1, 2).expand(B,*s.shape) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=s, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x else: def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): r = rearrange(r, 'b l (h d) -> b h l d', h = H) k = rearrange(k, 'b l (h d) -> b h l d', h = H) v = rearrange(v, 'b l (h d) -> b h l d', h = H) w = rearrange(-torch.exp(w), 'b l (h d) -> b h l d', h = H) o,_ = chunk_rwkv6(r, k, v, w, u=u, scale=1., initial_state=None, output_final_state=False) x = rearrange(o, 'b h l d -> b l (h d)') return x elif os.environ["WKV"] == 'triton': print('x070 Wind Triton Kernel Mode') import torch as th import triton import triton.language as tl @triton.jit def IND4(a,b,c,d,nb,nc,nd): return ((a*nb+b)*nc+c)*nd+d @triton.jit def IND5(a,b,c,d,e,nb,nc,nd,ne): return (((a*nb+b)*nc+c)*nd+d)*ne+e @triton.jit def _prod(a,b): return a*b # inv(I-A) where A is a strictly lower triangular nxn matrix @triton.jit def tri_minv(A, n:tl.constexpr, prec:tl.constexpr): i = tl.arange(0,n) prod = (i[None,:]==i[:,None]).to(tl.float32) for j in range(n-1): prod += tl_dot(prec, prod, (A*((i[None,:]==j)*(i[:,None]>i[None,:]))).trans()) return prod.trans() @triton.jit def fw_attn_triton(w_,q_,k_,v_,a_,b_, s0_,y_,s_,sT_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] state = tl.load(s0_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT): t = t0*dT+tl.arange(0,dT)[:,None] sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) w = (-sw.exp()).exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 yy = tl_dot(prec, qk, sv) + tl_dot(prec, qb, u) + tl_dot(prec, wq, state.trans()) tl.store(y_+IND4(bi,t,hi,i, T,H,C), yy.to(tl.bfloat16)) tl.store(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C), state.to(tl.float32)) state = state * fw + tl_dot(prec, sv.trans(), kwi*fw) + tl_dot(prec, u.trans(), bwi*fw) tl.store(sT_+IND4(bi,hi,i.trans(),i, H,C,C), state.to(tl.bfloat16)) @triton.jit def bw_attn_triton(w_,q_,k_,v_,a_,b_, dy_,s_,dsT_, dw_,dq_,dk_,dv_,da_,db_,ds0_, B:tl.constexpr,T:tl.constexpr,H:tl.constexpr,C:tl.constexpr,dT:tl.constexpr, prec:tl.constexpr): bi = tl.program_id(1) hi = tl.program_id(0) i = tl.arange(0,C)[None,:] dstate = tl.load(dsT_+IND4(bi,hi,i.trans(),i, H,C,C)).to(tl.float32) for t0 in range(T//dT-1,-1,-1): t = t0*dT+tl.arange(0,dT)[:,None] state = tl.load(s_+IND5(bi,hi,t0,i.trans(),i, H,T//dT,C,C)).to(tl.float32) sw = tl.load(w_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sq = tl.load(q_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sk = tl.load(k_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sv = tl.load(v_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sa = tl.load(a_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sb = tl.load(b_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) sdy = tl.load(dy_+IND4(bi,t,hi,i, T,H,C)).to(tl.float32) dw_fac = -sw.exp() w = dw_fac.exp() fw = tl.reduce(w, 0, _prod, keep_dims=True) incl_pref = tl.cumprod(w,axis=0) non_incl_pref = incl_pref / w inv_incl_pref = 1 / incl_pref wq = sq * incl_pref wa = sa * non_incl_pref kwi = sk * inv_incl_pref bwi = sb * inv_incl_pref mask1 = (t > t.trans()) ab = tl_dot(prec, wa, bwi.trans()) * mask1 ak = tl_dot(prec, wa, kwi.trans()) * mask1 ab_inv = tri_minv(ab, dT, prec) ab_u = tl_dot(prec, ak, sv) + tl_dot(prec, wa, state.trans()) u = tl_dot(prec, ab_inv, ab_u) mask2 = (t >= t.trans()) qk = tl_dot(prec, wq, kwi.trans()) * mask2 qb = tl_dot(prec, wq, bwi.trans()) * mask2 du = tl_dot(prec, qb.trans(), sdy) + tl_dot(prec, bwi*fw, dstate.trans()) dab_u = tl_dot(prec, ab_inv.trans(), du) dv = tl_dot(prec, qk.trans(), sdy) + tl_dot(prec, kwi*fw, dstate.trans()) + tl_dot(prec, ak.trans(), dab_u) tl.store(dv_+IND4(bi,t,hi,i, T,H,C), dv.to(tl.bfloat16)) dab = tl_dot(prec, tl_dot(prec, ab_inv.trans(), du), u.trans()) * mask1 dak = tl_dot(prec, dab_u, sv.trans()) * mask1 dab_u_state = tl_dot(prec, dab_u, state) da = non_incl_pref * (tl_dot(prec, dab, bwi) + tl_dot(prec, dak, kwi) + dab_u_state) tl.store(da_+IND4(bi,t,hi,i, T,H,C), da.to(tl.bfloat16)) dqb = tl_dot(prec, sdy, u.trans()) * mask2 dqk = tl_dot(prec, sdy, sv.trans()) * mask2 dy_state = tl_dot(prec, sdy, state) dq = incl_pref * (tl_dot(prec, dqb, bwi) + tl_dot(prec, dqk, kwi) + dy_state) tl.store(dq_+IND4(bi,t,hi,i, T,H,C), dq.to(tl.bfloat16)) fw_u_dstate = fw * tl_dot(prec, u, dstate) db = inv_incl_pref * (tl_dot(prec, dab.trans(), wa) + tl_dot(prec, dqb.trans(), wq) + fw_u_dstate) tl.store(db_+IND4(bi,t,hi,i, T,H,C), db.to(tl.bfloat16)) fw_v_dstate = fw * tl_dot(prec, sv, dstate) dk = inv_incl_pref * (tl_dot(prec, dak.trans(), wa) + tl_dot(prec, dqk.trans(), wq) + fw_v_dstate) tl.store(dk_+IND4(bi,t,hi,i, T,H,C), dk.to(tl.bfloat16)) dw0 = fw * tl.sum(state*dstate, axis=0,keep_dims=True) for k in range(t0*dT,t0*dT+dT): lmask = (t<k).trans() A = (tl_dot(prec, dab*lmask, bwi) + tl_dot(prec, dak*lmask, kwi)) * wa * (t>k) A += (tl_dot(prec, dqb*lmask, bwi) + tl_dot(prec, dqk*lmask, kwi)) * wq * (t>=k) A += (fw_v_dstate*kwi + fw_u_dstate*bwi) * (t<k) A += dab_u_state*wa * (t>k) + dy_state*wq * (t>=k) dw = tl.sum(A, axis=0,keep_dims=True) + dw0 wk = tl.load(w_+IND4(bi,k,hi,i, T,H,C)).to(tl.float32) dw *= -wk.exp() tl.store(dw_+IND4(bi,k,hi,i, T,H,C), dw.to(tl.bfloat16)) dstate = dstate * fw + tl_dot(prec, sdy.trans(), wq) + tl_dot(prec, dab_u.trans(), wa) tl.store(ds0_+IND4(bi,hi,i.trans(),i, H,C,C), dstate.to(tl.bfloat16)) class TritonRWKV7(th.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b,s0, dot_prec): K = 16 B,T,H,C = w.shape s0 = th.zeros(B,H,C,C, dtype=w.dtype,device=w.device) if s0 is None else s0 y = th.empty_like(v) sT = th.empty_like(s0) s = th.zeros(B,H,T//K,C,C, dtype=th.float32,device=w.device) fw_attn_triton[(H,B)](w,q,k,v,z,b, s0,y,s,sT, B,T,H,C,K, dot_prec) ctx.dot_prec = dot_prec ctx.save_for_backward(w,q,k,v,z,b,s) return y, sT @staticmethod def backward(ctx, dy, dsT): K = 16 w,q,k,v,z,b,s = ctx.saved_tensors B,T,H,C = w.shape dw,dq,dk,dv,dz,db,ds0 = [th.empty_like(x) for x in [w,q,k,v,z,b,dsT]] bw_attn_triton[(H,B)](w,q,k,v,z,b, dy,s,dsT, dw,dq,dk,dv,dz,db,ds0, B,T,H,C,K, ctx.dot_prec) return dw,dq,dk,dv,dz,db,ds0,None @triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
@triton.jit def tl_dot(prec:tl.constexpr, a, b): if prec == 'fp32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=False) elif prec == 'tf32': return tl.dot(a.to(tl.float32),b.trans().to(tl.float32).trans(), allow_tf32=True) elif prec == 'bf16': return tl.dot(a.to(tl.bfloat16),b.trans().to(tl.bfloat16).trans(), allow_tf32=True) else: tl.static_assert(False) def RUN_CUDA_RWKV7g(r,w,k,v,a,b, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = th.zeros(B,H,C,C, dtype=th.bfloat16,device=w.device) return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC) def RUN_RWKV7_STATE(r, k, v, w, a, b, s, HEAD_SIZE=64, dot_prec = 'fp32'): B,T,HC = w.shape C = HEAD_SIZE H = HC//C r,w,k,v,a,b = [i.view(B,T,H,C) for i in [r,w,k,v,a,b]] s0 = s return TritonRWKV7.apply(w,r,k,v,a,b,s0,dot_prec)[0].view(B,T,HC), None else: from torch.utils.cpp_extension import load HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x070' in os.environ["RWKV_MY_TESTING"]: CHUNK_LEN = 16 flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) class WindBackstepping(torch.autograd.Function): @staticmethod def forward(ctx, w,q,k,v,z,b): B,T,H,C = w.shape assert T%CHUNK_LEN == 0 assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) y = torch.empty_like(v) s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) ctx.save_for_backward(w,q,k,v,z,b,s,sa) return y @staticmethod def backward(ctx, dy): assert all(i.dtype==torch.bfloat16 for i in [dy]) assert all(i.is_contiguous() for i in [dy]) w,q,k,v,z,b,s,sa = ctx.saved_tensors dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) return dw,dq,dk,dv,dz,db def RUN_CUDA_RWKV7g(q,w,k,v,a,b): B,T,HC = q.shape q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) elif 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'infctx': wkv6state_cuda = load(name="wkv6infctx", sources=["cuda/wkv6infctx_op.cpp", f"cuda/wkv6infctx_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): x = WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) return x, s elif os.environ["RWKV_TRAIN_TYPE"] == 'state': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.backward(B, T, C, H, r, k, v, w, u, s, gy, gr, gk, gv, gw, gu, gs) gu = torch.sum(gu, 0).view(H, C//H) gs = torch.sum(gs, 0).view(H, C//H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu, gs) def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) else: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): return WKV_6.apply(B, T, C, H, r, k, v, w, u) else: wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) gw = torch.sum(gw, 0).view(H, C//H) gu = torch.sum(gu, 0).view(H, C//H) return (None, None, None, None, gr, gk, gv, gw, gu) def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): return WKV_5.apply(B, T, C, H, r, k, v, w, u) ########################################################################################################
amitkumarj441/mlperf_benchmark
perfs.py
https://github.com/amitkumarj441/mlperf_benchmark/blob/8d42d1be049de8e68551359fe3aae4997561073b/perfs.py
from typing import Optional, Tuple import torch import torch.nn as nn import triton import triton.language as tl from transformers.models.llama.modeling_llama import (LlamaAttention, apply_rotary_pos_emb, repeat_kv) from transformers.utils import logging logger = logging.get_logger(__name__) @triton.jit def abc_fwd_kernel( q, k, v, o, sk, sv, stride_qb, stride_qh, stride_qt, stride_qd, stride_skb, stride_skh, stride_skt, stride_skd, B, H, T, D, M, scale, BD: tl.constexpr, BM: tl.constexpr ): i_bh = tl.program_id(0) p_q = tl.make_block_ptr(base=q + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_k = tl.make_block_ptr(base=k + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_v = tl.make_block_ptr(base=v + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_o = tl.make_block_ptr(base=o + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_sk = tl.make_block_ptr(base=sk + i_bh * stride_skh, shape=(T * M,), strides=(stride_skd,), offsets=(0,), block_shape=(BM,), order=(0,)) p_sv = tl.make_block_ptr(base=sv + i_bh * stride_skh, shape=(T * M,), strides=(stride_skd,), offsets=(0,), block_shape=(BM,), order=(0,)) m_sk, m_sv = tl.full([BM,], float('-inf'), dtype=tl.float32), tl.full([BM,], float('-inf'), dtype=tl.float32) a_sk, a_sv = tl.zeros([BM,], dtype=tl.float32), tl.zeros([BM,], dtype=tl.float32) a_k = tl.zeros([BM, BD], dtype=tl.float32) a_v = tl.zeros([BM, BD], dtype=tl.float32) for _ in range(T): # [BM,] b_sk = tl.load(p_sk) m_ski = tl.maximum(m_sk, b_sk) b_sk = tl.exp(b_sk - m_ski) a_sk = a_sk * tl.exp(m_sk - m_ski) a_ski = b_sk + a_sk # [BM, BD] a_k = a_k * (a_sk / a_ski)[:, None] + (b_sk / a_ski)[:, None] * tl.load(p_k)[None, :] # [BM,] b_sv = tl.load(p_sv) m_svi = tl.maximum(m_sv, b_sv) b_sv = tl.exp(b_sv - m_svi) a_sv = a_sv * tl.exp(m_sv - m_svi) a_svi = b_sv + a_sv # [BM, BD] a_v = a_v * (a_sv / a_svi)[:, None] + (b_sv / a_svi)[:, None] * tl.load(p_v)[None, :] # [BD,] b_q = tl.load(p_q) * scale # [BD,] b_o = tl.sum(tl.softmax(tl.sum(b_q[None, :] * a_k, 1), 0)[:, None] * a_v, 0) tl.store(p_o, b_o.to(p_q.dtype.element_ty)) m_sk, m_sv = m_ski, m_svi a_sk, a_sv = a_ski, a_svi p_q = tl.advance(p_q, (BD,)) p_k = tl.advance(p_k, (BD,)) p_v = tl.advance(p_v, (BD,)) p_o = tl.advance(p_o, (BD,)) p_sk = tl.advance(p_sk, (BM,)) p_sv = tl.advance(p_sv, (BM,)) class ABCAttention(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, sk, sv): BD, BM = q.shape[-1], sk.shape[-1] batch_size, n_heads, seq_len, d_head = q.shape num_stages = 3 if d_head <= 64 else 2 num_warps = 4 grid = (batch_size * n_heads,) scale = d_head ** -0.5 assert d_head in {16, 32, 64, 128} o = torch.empty_like(q) abc_fwd_kernel[grid]( q, k, v, o, sk, sv, q.stride(0), q.stride(1), q.stride(2), q.stride(3), sk.stride(0), sk.stride(1), sk.stride(2), sk.stride(3), batch_size, n_heads, seq_len, d_head, sk.shape[-1], scale, BD=BD, BM=BM, num_warps=num_warps, num_stages=num_stages ) ctx.save_for_backward(q, k, v, sk, sv, o) ctx.grid = grid ctx.scale = scale return o @staticmethod def backward(ctx, do): def reversed_cumsum(x, dim=-1): c = x.cumsum(dim) return x + c.index_select(dim, x.new_tensor([c.shape[dim]-1], dtype=torch.long)) - c q, k, v, ek, ev, ak, av, p, o = ctx.saved_tensors scale = ctx.scale K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = (ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1) dq, dk, dv, dsk, dsv = None, None, None, None, None dp = (p * (torch.einsum('...qd,...qmd->...qm', do, V) - (do * o).sum(-1, True))) * scale dq = torch.einsum('...qm,...qmd->...qd', dp, K) dK = torch.einsum('...qm,...qd->...qmd', dp / ak, q) dK1 = reversed_cumsum(dK, 2) dk = torch.einsum('...qm,...qmd->...qd', ek, dK1) dsk = ek * (torch.einsum('...qd,...qmd->...qm', k, dK1) - reversed_cumsum((dK * K).sum(-1), 2)) dV = torch.einsum('...qd,...qm->...qmd', do, p / av) dV1 = reversed_cumsum(dV, 2) dv = torch.einsum('...qm,...qmd->...qd', ev, dV1) dsv = ev * (torch.einsum('...qd,...qmd->...qm', v, dV1) - reversed_cumsum((dV * V).sum(-1), 2)) return dq, dk, dv, dsk, dsv def naive_attention(q, k, v, sk, sv): dtype = q.dtype *_, d_head = q.shape # [batch_size, n_heads, seq_len, 64] ek = (sk - sk.max(2, True)[0]).exp() ev = (sv - sv.max(2, True)[0]).exp() ak, av = ek.cumsum(2), ev.cumsum(2) # [batch_size, n_heads, seq_len, 64, d_head] K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = (ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1) # [batch_size, n_heads, seq_len, 64] p = torch.einsum('...d,...md->...m', q * d_head ** -0.5, K).softmax(-1, dtype=torch.float).to(dtype) # [batch_size, n_heads, seq_len, d_head] o = torch.einsum('...m,...md->...d', p, V) return o class NaiveAttention1(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, sk, sv): *_, d_head = q.shape dtype, scale = q.dtype, d_head ** -0.5 # [batch_size, n_heads, seq_len, 64] ek = (sk - sk.max(2, True)[0]).to(torch.float).exp() ev = (sv - sv.max(2, True)[0]).to(torch.float).exp() ak, av = ek.cumsum(2), ev.cumsum(2) # [batch_size, n_heads, seq_len, 64, d_head] K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = ((ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1)).to(dtype) # [batch_size, n_heads, seq_len, 64] p = torch.einsum('...d,...md->...m', q.to(torch.float) * scale, K).softmax(-1).to(dtype) # [batch_size, n_heads, seq_len, d_head] o = torch.einsum('...m,...md->...d', p, V) ctx.save_for_backward(q, k, v, ek, ev, ak, av, p, o) ctx.dtype, ctx.scale = dtype, scale return o @staticmethod def backward(ctx, do): def reversed_cumsum(x, dim=-1): c = x.cumsum(dim) return x + c.index_select(dim, x.new_tensor([c.shape[dim]-1], dtype=torch.long)) - c q, k, v, ek, ev, ak, av, p, o = ctx.saved_tensors dtype, scale = ctx.dtype, ctx.scale K = ((ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1)).to(dtype) V = ((ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1)).to(dtype) dq, dk, dv, dsk, dsv = None, None, None, None, None dp = (p * (torch.einsum('...qd,...qmd->...qm', do, V) - (do * o).sum(-1, True))) * scale dq = torch.einsum('...qm,...qmd->...qd', dp, K) dK = torch.einsum('...qm,...qd->...qmd', (dp / ak).to(dtype), q) dK1 = reversed_cumsum(dK, 2) dk = torch.einsum('...qm,...qmd->...qd', ek.to(dtype), dK1) dsk = ek * (torch.einsum('...qd,...qmd->...qm', k, dK1) - reversed_cumsum((dK * K).sum(-1), 2)) dV = torch.einsum('...qd,...qm->...qmd', do, (p / av).to(dtype)) dV1 = reversed_cumsum(dV, 2) dv = torch.einsum('...qm,...qmd->...qd', ev.to(dtype), dV1) dsv = ev * (torch.einsum('...qd,...qmd->...qm', v, dV1) - reversed_cumsum((dV * V).sum(-1), 2)) return dq, dk, dv, dsk, dsv naive_attention1 = NaiveAttention1.apply abc_attention = ABCAttention.apply class LLaMAABCAttention(LlamaAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.w_k = nn.Linear(self.hidden_size, 64, bias=False) self.w_v = nn.Linear(self.hidden_size, 64, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False batch_size, seq_len, _ = hidden_states.shape # [batch_size, seq_len, n_heads * d_head] q = self.q_proj(hidden_states) k = self.k_proj(hidden_states) v = self.v_proj(hidden_states) q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(batch_size, seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) v = v.view(batch_size, seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = k.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(v, seq_len=kv_seq_len) q, k = apply_rotary_pos_emb(q, k, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention k = torch.cat([past_key_value[0], k], dim=2) v = torch.cat([past_key_value[1], v], dim=2) past_key_value = (k, v) if use_cache else None # cast to half precision input_dtype = q.dtype if input_dtype == torch.float32: logger.warning_once("The input hidden states seems to be silently casted in float32.") q = q.to(self.config.torch_dtype) k = k.to(self.config.torch_dtype) v = v.to(self.config.torch_dtype) if getattr(self, "num_key_value_groups", None): k = repeat_kv(k, self.num_key_value_groups) v = repeat_kv(v, self.num_key_value_groups) # [batch_size, n_heads, seq_len, 64] sk = self.w_k(hidden_states).view(batch_size, 1, seq_len, -1).repeat(1, self.num_heads, 1, 1) sv = self.w_v(hidden_states).view(batch_size, 1, seq_len, -1).repeat(1, self.num_heads, 1, 1) o = naive_attention(q, k, v, sk, sv) o = o.transpose(1, 2).reshape(batch_size, seq_len, self.hidden_size) o = self.o_proj(o) if not output_attentions: p = None return o, p, past_key_value if __name__ == '__main__': B, H, T, D, M = 2, 8, 128, 32, 16 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] q = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() k = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() v = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() # [batch_size, n_heads, seq_len, 64] sk = torch.randn((B, H, T, M), dtype=dtype, device='cuda').requires_grad_() sv = torch.randn((B, H, T, M), dtype=dtype, device='cuda').requires_grad_() do = torch.randn_like(q) ref = naive_attention(q, k, v, sk, sv) ref.backward(do) ref_dq, q.grad = q.grad.clone(), None ref_dk, k.grad = k.grad.clone(), None ref_dv, v.grad = v.grad.clone(), None ref_dsk, sk.grad = sk.grad.clone(), None ref_dsv, sv.grad = sv.grad.clone(), None ref1 = naive_attention1(q, k, v, sk, sv) ref1.backward(do) ref1_dq, q.grad = q.grad.clone(), None ref1_dk, k.grad = k.grad.clone(), None ref1_dv, v.grad = v.grad.clone(), None ref1_dsk, sk.grad = sk.grad.clone(), None ref1_dsv, sv.grad = sv.grad.clone(), None #assert ref.allclose(ref1, 0, 1e-2) #import pdb #pdb.set_trace() #assert ref_dq.allclose(ref1_dq, 0, 1e-2) #assert ref_dk.allclose(ref1_dk, 0, 1e-2) #assert ref_dv.allclose(ref1_dv, 0, 1e-2) #assert ref_dsk.allclose(ref1_dsk, 0, 1e-2) #assert ref_dsv.allclose(ref1_dsv, 0, 1e-2) # triton implementation tri = abc_attention(q, k, v, sk, sv) # tri.backward(do) # tri_dv, v.grad = v.grad.clone(), None # tri_dk, k.grad = k.grad.clone(), None # tri_dq, q.grad = q.grad.clone(), None # assert ref.allclose(tri, 0, 1e-2) # assert torch.allclose(ref_dv, tri_dv, 0, 1e-2) # assert torch.allclose(ref_dk, tri_dk, 0, 1e-2) print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 10)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['torch', 'triton', 'torch_bwd', 'triton_bwd'], # label name for the lines line_names=['torch', 'triton', 'torch_bwd', 'triton_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): device = 'cuda' requires_grad = 'bwd' in provider batch_size, n_heads, d_head, n_mem = 2, 8, 64, 64 q = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) k = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) v = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) sk = torch.randn(batch_size, n_heads, seq_len, n_mem, device=device, requires_grad=requires_grad) sv = torch.randn(batch_size, n_heads, seq_len, n_mem, device=device, requires_grad=requires_grad) do = torch.ones_like(q) quantiles = [0.5, 0.2, 0.8] if provider == 'torch': if seq_len > 40000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention(q, k, v, sk, sv), quantiles=quantiles) elif provider == 'triton': results = triton.testing.do_bench(lambda: abc_attention(q, k, v, sk, sv), quantiles=quantiles) elif provider == 'torch_bwd': if seq_len > 20000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention(q, k, v, sk, sv).backward(do), quantiles=quantiles) elif provider == 'triton_bwd': if seq_len > 20000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention1(q, k, v, sk, sv).backward(do), quantiles=quantiles) return results benchmark.run(show_plots=True, print_data=True, save_path='.')
@triton.jit def abc_fwd_kernel( q, k, v, o, sk, sv, stride_qb, stride_qh, stride_qt, stride_qd, stride_skb, stride_skh, stride_skt, stride_skd, B, H, T, D, M, scale, BD: tl.constexpr, BM: tl.constexpr ): i_bh = tl.program_id(0) p_q = tl.make_block_ptr(base=q + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_k = tl.make_block_ptr(base=k + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_v = tl.make_block_ptr(base=v + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_o = tl.make_block_ptr(base=o + i_bh * stride_qh, shape=(T * D,), strides=(stride_qd,), offsets=(0,), block_shape=(BD,), order=(0,)) p_sk = tl.make_block_ptr(base=sk + i_bh * stride_skh, shape=(T * M,), strides=(stride_skd,), offsets=(0,), block_shape=(BM,), order=(0,)) p_sv = tl.make_block_ptr(base=sv + i_bh * stride_skh, shape=(T * M,), strides=(stride_skd,), offsets=(0,), block_shape=(BM,), order=(0,)) m_sk, m_sv = tl.full([BM,], float('-inf'), dtype=tl.float32), tl.full([BM,], float('-inf'), dtype=tl.float32) a_sk, a_sv = tl.zeros([BM,], dtype=tl.float32), tl.zeros([BM,], dtype=tl.float32) a_k = tl.zeros([BM, BD], dtype=tl.float32) a_v = tl.zeros([BM, BD], dtype=tl.float32) for _ in range(T): # [BM,] b_sk = tl.load(p_sk) m_ski = tl.maximum(m_sk, b_sk) b_sk = tl.exp(b_sk - m_ski) a_sk = a_sk * tl.exp(m_sk - m_ski) a_ski = b_sk + a_sk # [BM, BD] a_k = a_k * (a_sk / a_ski)[:, None] + (b_sk / a_ski)[:, None] * tl.load(p_k)[None, :] # [BM,] b_sv = tl.load(p_sv) m_svi = tl.maximum(m_sv, b_sv) b_sv = tl.exp(b_sv - m_svi) a_sv = a_sv * tl.exp(m_sv - m_svi) a_svi = b_sv + a_sv # [BM, BD] a_v = a_v * (a_sv / a_svi)[:, None] + (b_sv / a_svi)[:, None] * tl.load(p_v)[None, :] # [BD,] b_q = tl.load(p_q) * scale # [BD,] b_o = tl.sum(tl.softmax(tl.sum(b_q[None, :] * a_k, 1), 0)[:, None] * a_v, 0) tl.store(p_o, b_o.to(p_q.dtype.element_ty)) m_sk, m_sv = m_ski, m_svi a_sk, a_sv = a_ski, a_svi p_q = tl.advance(p_q, (BD,)) p_k = tl.advance(p_k, (BD,)) p_v = tl.advance(p_v, (BD,)) p_o = tl.advance(p_o, (BD,)) p_sk = tl.advance(p_sk, (BM,)) p_sv = tl.advance(p_sv, (BM,)) class ABCAttention(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, sk, sv): BD, BM = q.shape[-1], sk.shape[-1] batch_size, n_heads, seq_len, d_head = q.shape num_stages = 3 if d_head <= 64 else 2 num_warps = 4 grid = (batch_size * n_heads,) scale = d_head ** -0.5 assert d_head in {16, 32, 64, 128} o = torch.empty_like(q) abc_fwd_kernel[grid]( q, k, v, o, sk, sv, q.stride(0), q.stride(1), q.stride(2), q.stride(3), sk.stride(0), sk.stride(1), sk.stride(2), sk.stride(3), batch_size, n_heads, seq_len, d_head, sk.shape[-1], scale, BD=BD, BM=BM, num_warps=num_warps, num_stages=num_stages ) ctx.save_for_backward(q, k, v, sk, sv, o) ctx.grid = grid ctx.scale = scale return o @staticmethod def backward(ctx, do): def reversed_cumsum(x, dim=-1): c = x.cumsum(dim) return x + c.index_select(dim, x.new_tensor([c.shape[dim]-1], dtype=torch.long)) - c q, k, v, ek, ev, ak, av, p, o = ctx.saved_tensors scale = ctx.scale K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = (ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1) dq, dk, dv, dsk, dsv = None, None, None, None, None dp = (p * (torch.einsum('...qd,...qmd->...qm', do, V) - (do * o).sum(-1, True))) * scale dq = torch.einsum('...qm,...qmd->...qd', dp, K) dK = torch.einsum('...qm,...qd->...qmd', dp / ak, q) dK1 = reversed_cumsum(dK, 2) dk = torch.einsum('...qm,...qmd->...qd', ek, dK1) dsk = ek * (torch.einsum('...qd,...qmd->...qm', k, dK1) - reversed_cumsum((dK * K).sum(-1), 2)) dV = torch.einsum('...qd,...qm->...qmd', do, p / av) dV1 = reversed_cumsum(dV, 2) dv = torch.einsum('...qm,...qmd->...qd', ev, dV1) dsv = ev * (torch.einsum('...qd,...qmd->...qm', v, dV1) - reversed_cumsum((dV * V).sum(-1), 2)) return dq, dk, dv, dsk, dsv def naive_attention(q, k, v, sk, sv): dtype = q.dtype *_, d_head = q.shape # [batch_size, n_heads, seq_len, 64] ek = (sk - sk.max(2, True)[0]).exp() ev = (sv - sv.max(2, True)[0]).exp() ak, av = ek.cumsum(2), ev.cumsum(2) # [batch_size, n_heads, seq_len, 64, d_head] K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = (ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1) # [batch_size, n_heads, seq_len, 64] p = torch.einsum('...d,...md->...m', q * d_head ** -0.5, K).softmax(-1, dtype=torch.float).to(dtype) # [batch_size, n_heads, seq_len, d_head] o = torch.einsum('...m,...md->...d', p, V) return o class NaiveAttention1(torch.autograd.Function): @staticmethod def forward(ctx, q, k, v, sk, sv): *_, d_head = q.shape dtype, scale = q.dtype, d_head ** -0.5 # [batch_size, n_heads, seq_len, 64] ek = (sk - sk.max(2, True)[0]).to(torch.float).exp() ev = (sv - sv.max(2, True)[0]).to(torch.float).exp() ak, av = ek.cumsum(2), ev.cumsum(2) # [batch_size, n_heads, seq_len, 64, d_head] K = (ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1) V = ((ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1)).to(dtype) # [batch_size, n_heads, seq_len, 64] p = torch.einsum('...d,...md->...m', q.to(torch.float) * scale, K).softmax(-1).to(dtype) # [batch_size, n_heads, seq_len, d_head] o = torch.einsum('...m,...md->...d', p, V) ctx.save_for_backward(q, k, v, ek, ev, ak, av, p, o) ctx.dtype, ctx.scale = dtype, scale return o @staticmethod def backward(ctx, do): def reversed_cumsum(x, dim=-1): c = x.cumsum(dim) return x + c.index_select(dim, x.new_tensor([c.shape[dim]-1], dtype=torch.long)) - c q, k, v, ek, ev, ak, av, p, o = ctx.saved_tensors dtype, scale = ctx.dtype, ctx.scale K = ((ek.unsqueeze(-1) * k.unsqueeze(-2)).cumsum(2) / ak.unsqueeze(-1)).to(dtype) V = ((ev.unsqueeze(-1) * v.unsqueeze(-2)).cumsum(2) / av.unsqueeze(-1)).to(dtype) dq, dk, dv, dsk, dsv = None, None, None, None, None dp = (p * (torch.einsum('...qd,...qmd->...qm', do, V) - (do * o).sum(-1, True))) * scale dq = torch.einsum('...qm,...qmd->...qd', dp, K) dK = torch.einsum('...qm,...qd->...qmd', (dp / ak).to(dtype), q) dK1 = reversed_cumsum(dK, 2) dk = torch.einsum('...qm,...qmd->...qd', ek.to(dtype), dK1) dsk = ek * (torch.einsum('...qd,...qmd->...qm', k, dK1) - reversed_cumsum((dK * K).sum(-1), 2)) dV = torch.einsum('...qd,...qm->...qmd', do, (p / av).to(dtype)) dV1 = reversed_cumsum(dV, 2) dv = torch.einsum('...qm,...qmd->...qd', ev.to(dtype), dV1) dsv = ev * (torch.einsum('...qd,...qmd->...qm', v, dV1) - reversed_cumsum((dV * V).sum(-1), 2)) return dq, dk, dv, dsk, dsv naive_attention1 = NaiveAttention1.apply abc_attention = ABCAttention.apply class LLaMAABCAttention(LlamaAttention): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.w_k = nn.Linear(self.hidden_size, 64, bias=False) self.w_v = nn.Linear(self.hidden_size, 64, bias=False) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: # LlamaFlashAttention2 attention does not support output_attentions output_attentions = False batch_size, seq_len, _ = hidden_states.shape # [batch_size, seq_len, n_heads * d_head] q = self.q_proj(hidden_states) k = self.k_proj(hidden_states) v = self.v_proj(hidden_states) q = q.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2) k = k.view(batch_size, seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) v = v.view(batch_size, seq_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = k.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(v, seq_len=kv_seq_len) q, k = apply_rotary_pos_emb(q, k, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention k = torch.cat([past_key_value[0], k], dim=2) v = torch.cat([past_key_value[1], v], dim=2) past_key_value = (k, v) if use_cache else None # cast to half precision input_dtype = q.dtype if input_dtype == torch.float32: logger.warning_once("The input hidden states seems to be silently casted in float32.") q = q.to(self.config.torch_dtype) k = k.to(self.config.torch_dtype) v = v.to(self.config.torch_dtype) if getattr(self, "num_key_value_groups", None): k = repeat_kv(k, self.num_key_value_groups) v = repeat_kv(v, self.num_key_value_groups) # [batch_size, n_heads, seq_len, 64] sk = self.w_k(hidden_states).view(batch_size, 1, seq_len, -1).repeat(1, self.num_heads, 1, 1) sv = self.w_v(hidden_states).view(batch_size, 1, seq_len, -1).repeat(1, self.num_heads, 1, 1) o = naive_attention(q, k, v, sk, sv) o = o.transpose(1, 2).reshape(batch_size, seq_len, self.hidden_size) o = self.o_proj(o) if not output_attentions: p = None return o, p, past_key_value if __name__ == '__main__': B, H, T, D, M = 2, 8, 128, 32, 16 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] q = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() k = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() v = torch.randn((B, H, T, D), dtype=dtype, device='cuda').requires_grad_() # [batch_size, n_heads, seq_len, 64] sk = torch.randn((B, H, T, M), dtype=dtype, device='cuda').requires_grad_() sv = torch.randn((B, H, T, M), dtype=dtype, device='cuda').requires_grad_() do = torch.randn_like(q) ref = naive_attention(q, k, v, sk, sv) ref.backward(do) ref_dq, q.grad = q.grad.clone(), None ref_dk, k.grad = k.grad.clone(), None ref_dv, v.grad = v.grad.clone(), None ref_dsk, sk.grad = sk.grad.clone(), None ref_dsv, sv.grad = sv.grad.clone(), None ref1 = naive_attention1(q, k, v, sk, sv) ref1.backward(do) ref1_dq, q.grad = q.grad.clone(), None ref1_dk, k.grad = k.grad.clone(), None ref1_dv, v.grad = v.grad.clone(), None ref1_dsk, sk.grad = sk.grad.clone(), None ref1_dsv, sv.grad = sv.grad.clone(), None #assert ref.allclose(ref1, 0, 1e-2) #import pdb #pdb.set_trace() #assert ref_dq.allclose(ref1_dq, 0, 1e-2) #assert ref_dk.allclose(ref1_dk, 0, 1e-2) #assert ref_dv.allclose(ref1_dv, 0, 1e-2) #assert ref_dsk.allclose(ref1_dsk, 0, 1e-2) #assert ref_dsv.allclose(ref1_dsv, 0, 1e-2) # triton implementation tri = abc_attention(q, k, v, sk, sv) # tri.backward(do) # tri_dv, v.grad = v.grad.clone(), None # tri_dk, k.grad = k.grad.clone(), None # tri_dq, q.grad = q.grad.clone(), None # assert ref.allclose(tri, 0, 1e-2) # assert torch.allclose(ref_dv, tri_dv, 0, 1e-2) # assert torch.allclose(ref_dk, tri_dk, 0, 1e-2) print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 10)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['torch', 'triton', 'torch_bwd', 'triton_bwd'], # label name for the lines line_names=['torch', 'triton', 'torch_bwd', 'triton_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): device = 'cuda' requires_grad = 'bwd' in provider batch_size, n_heads, d_head, n_mem = 2, 8, 64, 64 q = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) k = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) v = torch.randn(batch_size, n_heads, seq_len, d_head, device=device, requires_grad=requires_grad) sk = torch.randn(batch_size, n_heads, seq_len, n_mem, device=device, requires_grad=requires_grad) sv = torch.randn(batch_size, n_heads, seq_len, n_mem, device=device, requires_grad=requires_grad) do = torch.ones_like(q) quantiles = [0.5, 0.2, 0.8] if provider == 'torch': if seq_len > 40000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention(q, k, v, sk, sv), quantiles=quantiles) elif provider == 'triton': results = triton.testing.do_bench(lambda: abc_attention(q, k, v, sk, sv), quantiles=quantiles) elif provider == 'torch_bwd': if seq_len > 20000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention(q, k, v, sk, sv).backward(do), quantiles=quantiles) elif provider == 'triton_bwd': if seq_len > 20000: return 0, 0, 0 results = triton.testing.do_bench(lambda: naive_attention1(q, k, v, sk, sv).backward(do), quantiles=quantiles) return results benchmark.run(show_plots=True, print_data=True, save_path='.')
ssslakter/cuda-pmpp
profiling/compiled.py
https://github.com/ssslakter/cuda-pmpp/blob/a00c31be5cc3549b1faf9251099457ad3b27feff/profiling/compiled.py
from ctypes import c_void_p, c_long import torch import math import random import os import tempfile from math import inf, nan from torch._inductor.hooks import run_intermediate_hooks from torch._inductor.utils import maybe_profile from torch._inductor.codegen.memory_planning import _align as align from torch import device, empty, empty_strided from torch._inductor.codecache import AsyncCompile from torch._inductor.select_algorithm import extern_kernels aten = torch.ops.aten inductor_ops = torch.ops.inductor assert_size_stride = torch._C._dynamo.guards.assert_size_stride alloc_from_pool = torch.ops.inductor._alloc_from_pool reinterpret_tensor = torch.ops.inductor._reinterpret_tensor async_compile = AsyncCompile() # kernel path: /tmp/torchinductor_slakter/xx/cxxb6tj3m2w7s4eqi3qpcmvk4yjm2zvslalchu4m2mzrc5uq5hvn.py # Source Nodes: [softmax], Original ATen: [aten._softmax] # softmax => amax, div, exp, sub, sum_1 triton_per_fused__softmax_0 = async_compile.triton('triton_', ''' import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, persistent_reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @persistent_reduction( size_hints=[1, 16], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_0', 'mutated_arg_names': []} ) @triton.jit def triton_(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp11, rmask) ''') import triton import triton.language as tl from torch._inductor.triton_heuristics import grid, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_cuda_stream async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # no-op to ensure context buf2 = empty((10, ), device='cuda', dtype=torch.float32) # Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_cuda_stream(0) triton_per_fused__softmax_0.run(arg0_1, buf2, 1, 10, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
@triton.jit def triton_(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1 rnumel = 10 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r0 = rindex tmp0 = tl.load(in_ptr0 + (r0), rmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask, tmp1, float("-inf")) tmp4 = triton_helpers.max2(tmp3, 1)[:, None] tmp5 = tmp0 - tmp4 tmp6 = tl.exp(tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = tl.where(rmask, tmp7, 0) tmp10 = tl.sum(tmp9, 1)[:, None] tmp11 = tmp6 / tmp10 tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp11, rmask) ''') import triton import triton.language as tl from torch._inductor.triton_heuristics import grid, start_graph, end_graph from torch._C import _cuda_getCurrentRawStream as get_cuda_stream async_compile.wait(globals()) del async_compile def call(args): arg0_1, = args args.clear() assert_size_stride(arg0_1, (10, ), (1, )) with torch.cuda._DeviceGuard(0): torch.cuda.set_device(0) # no-op to ensure context buf2 = empty((10, ), device='cuda', dtype=torch.float32) # Source Nodes: [softmax], Original ATen: [aten._softmax] stream0 = get_cuda_stream(0) triton_per_fused__softmax_0.run(arg0_1, buf2, 1, 10, grid=grid(1), stream=stream0) del arg0_1 return (buf2, ) def benchmark_compiled_module(times=10, repeat=10): from torch._dynamo.testing import rand_strided from torch._inductor.utils import print_performance arg0_1 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32) fn = lambda: call([arg0_1]) return print_performance(fn, times=times, repeat=repeat) if __name__ == "__main__": from torch._inductor.wrapper_benchmark import compiled_module_main compiled_module_main('None', benchmark_compiled_module)
SwekeR-463/100kernels
day42/tlfsoftmax.py
https://github.com/SwekeR-463/100kernels/blob/80b7cea5a2b66f428380b2cb723147379f849f88/day42/tlfsoftmax.py
import torch import triton import triton.language as tl DEVICE = torch.device(f'cuda:{torch.cuda.current_device()}') @triton.jit def _softmax_kernel( input_ptr, output_ptr, input_row_stride, output_row_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr, # lowest power-of-2 greater than n_cols num_stages: tl.constexpr, num_wraps: tl.constexpr, ): row_start = tl.program_id(0) row_step = tl.num_programs(0) for row_idx in tl.range(row_start, n_rows, row_step, num_stages=num_stages): # tl.range acts as an iterator row_start_ptr = input_ptr + row_idx * input_row_stride # load the row into the SRAM col_offsets = tl.arange(0, BLOCK_SIZE) # tl.arange provides an array of values input_ptrs = row_start_ptr + col_offsets mask = col_offsets < n_cols row = tl.load(input_ptrs, mask=mask, other=float('-inf')) # subtract max for numerical stability row_minus_max = row - tl.max(row, axis=0) numerator = tl.exp(row_minus_max) denominator = tl.sum(numerator, axis=0) softmax_output = numerator / denominator # write the output back to DRAM output_row_start_ptr = output_ptr + row_idx * output_row_stride tl.store(output_row_start_ptr + col_offsets, softmax_output, mask = mask) # fetching a dictionary full of the GPU's specifications properties = triton.runtime.driver.active.utils.get_device_properties(DEVICE.index) NUM_SM = properties["multiprocessor_count"] NUM_REGS = properties["max_num_regs"] TOTAL_SRAM_PER_SM = properties["max_shared_mem"] WARP_SIZE = properties["warpSize"] # 32 # wrapper function def softmax(x): assert x.ndim == 2 n_rows, n_cols = x.shape BLOCK_SIZE = triton.next_power_of_2(n_cols) num_wraps = 4 if BLOCK_SIZE >= 2048: num_wraps = 8 if BLOCK_SIZE >= 4096: num_wraps = 16 num_stages = 4 if TOTAL_SRAM_PER_SM > 200_000 else 2 y = torch.empty_like(x) kernel = _softmax_kernel.warmup(x, y, x.stride(0), y.stride(0), n_rows, n_cols, BLOCK_SIZE=BLOCK_SIZE, num_stages=num_stages, num_wraps=num_wraps, grid=(1,)) # .warmup pre compiles kernel & tells us how many registers and how much shared memory it needs # info from the warmup process gave kernel._init_handles() n_regs = kernel.n_regs sram_needed_per_program = kernel.metadata.shared # reg based occupancy reg_occupancy = NUM_REGS // (n_regs * WARP_SIZE * num_wraps) # shared memory-based occupancy sram_occupancy = TOTAL_SRAM_PER_SM // sram_needed_per_program # determines how many programs can run per SM based on register usage and shared memory usage programs_per_sm = min(reg_occupancy, sram_occupancy) # how many programs to run in total num_programs = min(NUM_SM * programs_per_sm, n_rows) # grid config grid = (num_programs, 1, 1) # launch the kernelll kernel[grid]( x, y, x.stride(0), y.stride(0), n_rows, n_cols, ) return y def test_softmax_kernel(size: tuple, atol = 1e-3, rtol = 1e-3, device=DEVICE): # creata input data torch.manual_seed(0) assert type(size) is tuple and len(size) == 2 x = torch.randn(size[0], size[1], device=DEVICE) z_tri = softmax(x) z_ref = torch.softmax(x, axis=1) torch.testing.assert_close(z_tri, z_ref, atol=atol, rtol=rtol) print(f'The maximum difference between torch and triton is {torch.max(torch.abs(z_ref - z_tri))}') print("PASSEDDDDDDDDD") @triton.testing.perf_report( triton.testing.Benchmark( x_names=['N'], x_vals=[128 * i for i in range(2, 100)], line_arg='provider', line_vals=['triton', 'torch'], line_names=["Triton", "Torch"], styles=[('blue', '-'), ('green', '-')], ylabel="GB/s", plot_name="softmax-performance", args={'M': 4096} # values for function arguments not in x_names ) ) def benchmark(M, N, provider): # making the input data x = torch.randn(M, N, device=DEVICE, dtype=torch.float32) # these two lines ensure more accurate benchmarks; i usually forget to use them but it's not a big deal stream = getattr(torch, DEVICE.type).Stream() getattr(torch, DEVICE.type).set_stream(stream) if provider == 'torch': ms = triton.testing.do_bench(lambda: torch.softmax(x, axis=-1)) if provider == 'triton': ms = triton.testing.do_bench(lambda: softmax(x)) gbps = lambda ms: 2 * x.numel() * x.element_size() * 1e-9 / (ms * 1e-3) # 2 = number of memory operations (1 read + 1 write) if __name__ == "__main__": test_softmax_kernel(size=(1823, 781)) import sys if len(sys.argv) > 1 and sys.argv[1] == "--benchmark": benchmark.run(save_path='/home/sweker/work/cuda/day42', print_data=False)
@triton.jit def _softmax_kernel( input_ptr, output_ptr, input_row_stride, output_row_stride, n_rows, n_cols, BLOCK_SIZE: tl.constexpr, # lowest power-of-2 greater than n_cols num_stages: tl.constexpr, num_wraps: tl.constexpr, ): row_start = tl.program_id(0) row_step = tl.num_programs(0) for row_idx in tl.range(row_start, n_rows, row_step, num_stages=num_stages): # tl.range acts as an iterator row_start_ptr = input_ptr + row_idx * input_row_stride # load the row into the SRAM col_offsets = tl.arange(0, BLOCK_SIZE) # tl.arange provides an array of values input_ptrs = row_start_ptr + col_offsets mask = col_offsets < n_cols row = tl.load(input_ptrs, mask=mask, other=float('-inf')) # subtract max for numerical stability row_minus_max = row - tl.max(row, axis=0) numerator = tl.exp(row_minus_max) denominator = tl.sum(numerator, axis=0) softmax_output = numerator / denominator # write the output back to DRAM output_row_start_ptr = output_ptr + row_idx * output_row_stride tl.store(output_row_start_ptr + col_offsets, softmax_output, mask = mask) # fetching a dictionary full of the GPU's specifications properties = triton.runtime.driver.active.utils.get_device_properties(DEVICE.index) NUM_SM = properties["multiprocessor_count"] NUM_REGS = properties["max_num_regs"] TOTAL_SRAM_PER_SM = properties["max_shared_mem"] WARP_SIZE = properties["warpSize"] # 32 # wrapper function def softmax(x): assert x.ndim == 2 n_rows, n_cols = x.shape BLOCK_SIZE = triton.next_power_of_2(n_cols) num_wraps = 4 if BLOCK_SIZE >= 2048: num_wraps = 8 if BLOCK_SIZE >= 4096: num_wraps = 16 num_stages = 4 if TOTAL_SRAM_PER_SM > 200_000 else 2 y = torch.empty_like(x) kernel = _softmax_kernel.warmup(x, y, x.stride(0), y.stride(0), n_rows, n_cols, BLOCK_SIZE=BLOCK_SIZE, num_stages=num_stages, num_wraps=num_wraps, grid=(1,)) # .warmup pre compiles kernel & tells us how many registers and how much shared memory it needs # info from the warmup process gave kernel._init_handles() n_regs = kernel.n_regs sram_needed_per_program = kernel.metadata.shared # reg based occupancy reg_occupancy = NUM_REGS // (n_regs * WARP_SIZE * num_wraps) # shared memory-based occupancy sram_occupancy = TOTAL_SRAM_PER_SM // sram_needed_per_program # determines how many programs can run per SM based on register usage and shared memory usage programs_per_sm = min(reg_occupancy, sram_occupancy) # how many programs to run in total num_programs = min(NUM_SM * programs_per_sm, n_rows) # grid config grid = (num_programs, 1, 1) # launch the kernelll kernel[grid]( x, y, x.stride(0), y.stride(0), n_rows, n_cols, ) return y def test_softmax_kernel(size: tuple, atol = 1e-3, rtol = 1e-3, device=DEVICE): # creata input data torch.manual_seed(0) assert type(size) is tuple and len(size) == 2 x = torch.randn(size[0], size[1], device=DEVICE) z_tri = softmax(x) z_ref = torch.softmax(x, axis=1) torch.testing.assert_close(z_tri, z_ref, atol=atol, rtol=rtol) print(f'The maximum difference between torch and triton is {torch.max(torch.abs(z_ref - z_tri))}') print("PASSEDDDDDDDDD") @triton.testing.perf_report( triton.testing.Benchmark( x_names=['N'], x_vals=[128 * i for i in range(2, 100)], line_arg='provider', line_vals=['triton', 'torch'], line_names=["Triton", "Torch"], styles=[('blue', '-'), ('green', '-')], ylabel="GB/s", plot_name="softmax-performance", args={'M': 4096} # values for function arguments not in x_names ) ) def benchmark(M, N, provider): # making the input data x = torch.randn(M, N, device=DEVICE, dtype=torch.float32) # these two lines ensure more accurate benchmarks; i usually forget to use them but it's not a big deal stream = getattr(torch, DEVICE.type).Stream() getattr(torch, DEVICE.type).set_stream(stream) if provider == 'torch': ms = triton.testing.do_bench(lambda: torch.softmax(x, axis=-1)) if provider == 'triton': ms = triton.testing.do_bench(lambda: softmax(x)) gbps = lambda ms: 2 * x.numel() * x.element_size() * 1e-9 / (ms * 1e-3) # 2 = number of memory operations (1 read + 1 write) if __name__ == "__main__": test_softmax_kernel(size=(1823, 781)) import sys if len(sys.argv) > 1 and sys.argv[1] == "--benchmark": benchmark.run(save_path='/home/sweker/work/cuda/day42', print_data=False)
indri-voice/vit.triton
vit/kernels/softmax.py
https://github.com/indri-voice/vit.triton/blob/55c80ab2985be2b31e7204f69ab2e804829fa2bb/vit/kernels/softmax.py
import torch import triton import triton.language as tl dtype = torch.float32 device = 'cuda:0' @triton.jit def softmax_kernel( input_ptr, input_batch_stride, input_row_stride, output_ptr, num_rows, num_cols, BLOCK_SIZE: tl.constexpr ): batch_id = tl.program_id(axis=0) row_id = tl.program_id(axis=1) batch_offset = batch_id * input_batch_stride row_offset = row_id * input_row_stride + tl.arange(0, BLOCK_SIZE) mask = tl.arange(0, BLOCK_SIZE) < num_cols data = tl.load(input_ptr + batch_offset + row_offset, mask, other=-float('inf')) data = data - tl.max(data, axis=0) row_wise_exp = tl.exp(data) row_wise_sum = tl.sum(row_wise_exp, axis=0) output = row_wise_exp/row_wise_sum tl.store(output_ptr + batch_offset + row_offset, output, mask=mask) def softmax_triton(A: torch.Tensor) -> torch.Tensor: """ Performs softmax on input. This function always performs softmax on the last axis of the input Args: A (torch.Tensor): Input matrix of size (B * N * D) Returns: {torch.Tensor}: Ouput tensor is of the same shape (B * N * D) """ assert A.is_cuda, "Input is not on GPU" assert len(A.shape) == 3, f"Input needs to be 3 dimensional, provided: {A.shape}" batch, rows, cols = A.shape output = torch.empty_like(A) BLOCK_SIZE = triton.next_power_of_2(cols) grid = (batch, rows, ) num_warps = 4 if BLOCK_SIZE >= 2048: num_warps = 8 if BLOCK_SIZE >= 4096: num_warps = 16 softmax_kernel[grid]( input_ptr=A, input_batch_stride=A.stride(0), input_row_stride=A.stride(1), output_ptr=output, num_rows=rows, num_cols=cols, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps ) return output if __name__ == '__main__': A = torch.randint(0, 10, size=(1, 1823, 781), device=device, dtype=dtype) y_torch = torch.softmax(A, dim=-1) y_triton = softmax_triton(A) assert torch.allclose(y_triton, y_torch), "Data is not the same" print("Data is same") @triton.testing.perf_report( triton.testing.Benchmark( x_names=['D'], x_vals=[128 * i for i in range(2, 100)], line_arg='provider', line_vals=[ 'triton', 'torch', ], line_names=[ "Triton", "Torch (native)", ], styles=[('blue', '-'), ('green', '-')], ylabel="GB/s", plot_name="Performance", args={'B': 4, 'N': 4096}, # values for function arguments not in `x_names` and `y_name` )) def benchmark(B, N, D, provider): x = torch.randn(B, N, D, device=device, dtype=dtype) quantiles = [0.5, 0.2, 0.8] if provider == 'triton': ms, min_ms, max_ms = triton.testing.do_bench(lambda: softmax_triton(x), quantiles=quantiles) if provider == 'torch': ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.softmax(x, dim=-1), quantiles=quantiles) def gbps(ms): return 2 * x.nelement() * \ x.element_size() * 1e-9 / (ms * 1e-3) return gbps(ms), gbps(max_ms), gbps(min_ms) benchmark.run( #show_plots=True, print_data=True, save_path='./benchmarks/softmax/' )
@triton.jit def softmax_kernel( input_ptr, input_batch_stride, input_row_stride, output_ptr, num_rows, num_cols, BLOCK_SIZE: tl.constexpr ): batch_id = tl.program_id(axis=0) row_id = tl.program_id(axis=1) batch_offset = batch_id * input_batch_stride row_offset = row_id * input_row_stride + tl.arange(0, BLOCK_SIZE) mask = tl.arange(0, BLOCK_SIZE) < num_cols data = tl.load(input_ptr + batch_offset + row_offset, mask, other=-float('inf')) data = data - tl.max(data, axis=0) row_wise_exp = tl.exp(data) row_wise_sum = tl.sum(row_wise_exp, axis=0) output = row_wise_exp/row_wise_sum tl.store(output_ptr + batch_offset + row_offset, output, mask=mask) def softmax_triton(A: torch.Tensor) -> torch.Tensor: """ Performs softmax on input. This function always performs softmax on the last axis of the input Args: A (torch.Tensor): Input matrix of size (B * N * D) Returns: {torch.Tensor}: Ouput tensor is of the same shape (B * N * D) """ assert A.is_cuda, "Input is not on GPU" assert len(A.shape) == 3, f"Input needs to be 3 dimensional, provided: {A.shape}" batch, rows, cols = A.shape output = torch.empty_like(A) BLOCK_SIZE = triton.next_power_of_2(cols) grid = (batch, rows, ) num_warps = 4 if BLOCK_SIZE >= 2048: num_warps = 8 if BLOCK_SIZE >= 4096: num_warps = 16 softmax_kernel[grid]( input_ptr=A, input_batch_stride=A.stride(0), input_row_stride=A.stride(1), output_ptr=output, num_rows=rows, num_cols=cols, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps ) return output if __name__ == '__main__': A = torch.randint(0, 10, size=(1, 1823, 781), device=device, dtype=dtype) y_torch = torch.softmax(A, dim=-1) y_triton = softmax_triton(A) assert torch.allclose(y_triton, y_torch), "Data is not the same" print("Data is same") @triton.testing.perf_report( triton.testing.Benchmark( x_names=['D'], x_vals=[128 * i for i in range(2, 100)], line_arg='provider', line_vals=[ 'triton', 'torch', ], line_names=[ "Triton", "Torch (native)", ], styles=[('blue', '-'), ('green', '-')], ylabel="GB/s", plot_name="Performance", args={'B': 4, 'N': 4096}, # values for function arguments not in `x_names` and `y_name` )) def benchmark(B, N, D, provider): x = torch.randn(B, N, D, device=device, dtype=dtype) quantiles = [0.5, 0.2, 0.8] if provider == 'triton': ms, min_ms, max_ms = triton.testing.do_bench(lambda: softmax_triton(x), quantiles=quantiles) if provider == 'torch': ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.softmax(x, dim=-1), quantiles=quantiles) def gbps(ms): return 2 * x.nelement() * \ x.element_size() * 1e-9 / (ms * 1e-3) return gbps(ms), gbps(max_ms), gbps(min_ms) benchmark.run( #show_plots=True, print_data=True, save_path='./benchmarks/softmax/' )
JohannesBertram/Cramming_NanoGPT
lion_pytorch/triton.py
https://github.com/JohannesBertram/Cramming_NanoGPT/blob/1f8a9af896a3b9793ac43089b7f087db76edb3d9/lion_pytorch/triton.py
import torch try: import triton import triton.language as tl except ImportError as e: print('triton is not installed, please install by running `pip install triton>=2.2.0`') exit() # triton cuda kernel @triton.autotune(configs = [ triton.Config({'BLOCK_SIZE': 128}, num_warps = 4), triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8), ], key = ['n_elements'], restore_value=['p_ptr', 'exp_avg_ptr']) @triton.jit def update_fn_kernel( p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis = 0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # offsetted pointers offset_p_ptr = p_ptr + offsets offset_grad_ptr = grad_ptr + offsets offset_exp_avg_ptr = exp_avg_ptr + offsets # load p = tl.load(offset_p_ptr, mask = mask) grad = tl.load(offset_grad_ptr, mask = mask) exp_avg = tl.load(offset_exp_avg_ptr, mask = mask) # stepweight decay p = p * (1 - lr * wd) # diff between momentum running average and grad diff = exp_avg - grad # weight update update = diff * beta1 + grad # torch.sign can_update = update != 0 update_sign = tl.where(update > 0, -lr, lr) p = p + update_sign * can_update # decay the momentum running average coefficient exp_avg = diff * beta2 + grad # store new params and momentum running average coefficient tl.store(offset_p_ptr, p, mask = mask) tl.store(offset_exp_avg_ptr, exp_avg, mask = mask) def update_fn( p: torch.Tensor, grad: torch.Tensor, exp_avg: torch.Tensor, lr: float, wd: float, beta1: float, beta2: float ): assert all([t.is_cuda for t in (p, grad, exp_avg)]) n_elements = p.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) update_fn_kernel[grid]( p, grad, exp_avg, lr, wd, beta1, beta2, n_elements )
@triton.jit def update_fn_kernel( p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis = 0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # offsetted pointers offset_p_ptr = p_ptr + offsets offset_grad_ptr = grad_ptr + offsets offset_exp_avg_ptr = exp_avg_ptr + offsets # load p = tl.load(offset_p_ptr, mask = mask) grad = tl.load(offset_grad_ptr, mask = mask) exp_avg = tl.load(offset_exp_avg_ptr, mask = mask) # stepweight decay p = p * (1 - lr * wd) # diff between momentum running average and grad diff = exp_avg - grad # weight update update = diff * beta1 + grad # torch.sign can_update = update != 0 update_sign = tl.where(update > 0, -lr, lr) p = p + update_sign * can_update # decay the momentum running average coefficient exp_avg = diff * beta2 + grad # store new params and momentum running average coefficient tl.store(offset_p_ptr, p, mask = mask) tl.store(offset_exp_avg_ptr, exp_avg, mask = mask) def update_fn( p: torch.Tensor, grad: torch.Tensor, exp_avg: torch.Tensor, lr: float, wd: float, beta1: float, beta2: float ): assert all([t.is_cuda for t in (p, grad, exp_avg)]) n_elements = p.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) update_fn_kernel[grid]( p, grad, exp_avg, lr, wd, beta1, beta2, n_elements )
internationalwe/MedicalDataAnalysis2024
modules/triton.py
https://github.com/internationalwe/MedicalDataAnalysis2024/blob/36a7c4630fdd3f705d264a330cd19c6aa2c2f578/modules/triton.py
import torch try: import triton import triton.language as tl except ImportError as e: print('triton is not installed, please install by running `pip install triton>=2.2.0`') exit() # triton cuda kernel @triton.autotune(configs = [ triton.Config({'BLOCK_SIZE': 128}, num_warps = 4), triton.Config({'BLOCK_SIZE': 1024}, num_warps = 8), ], key = ['n_elements'], restore_value=['p_ptr', 'exp_avg_ptr']) @triton.jit def update_fn_kernel( p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis = 0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # offsetted pointers offset_p_ptr = p_ptr + offsets offset_grad_ptr = grad_ptr + offsets offset_exp_avg_ptr = exp_avg_ptr + offsets # load p = tl.load(offset_p_ptr, mask = mask) grad = tl.load(offset_grad_ptr, mask = mask) exp_avg = tl.load(offset_exp_avg_ptr, mask = mask) # stepweight decay p = p * (1 - lr * wd) # diff between momentum running average and grad diff = exp_avg - grad # weight update update = diff * beta1 + grad # torch.sign can_update = update != 0 update_sign = tl.where(update > 0, -lr, lr) p = p + update_sign * can_update # decay the momentum running average coefficient exp_avg = diff * beta2 + grad # store new params and momentum running average coefficient tl.store(offset_p_ptr, p, mask = mask) tl.store(offset_exp_avg_ptr, exp_avg, mask = mask) def update_fn( p: torch.Tensor, grad: torch.Tensor, exp_avg: torch.Tensor, lr: float, wd: float, beta1: float, beta2: float ): assert all([t.is_cuda for t in (p, grad, exp_avg)]) n_elements = p.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) update_fn_kernel[grid]( p, grad, exp_avg, lr, wd, beta1, beta2, n_elements )
@triton.jit def update_fn_kernel( p_ptr, grad_ptr, exp_avg_ptr, lr, wd, beta1, beta2, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tl.program_id(axis = 0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # offsetted pointers offset_p_ptr = p_ptr + offsets offset_grad_ptr = grad_ptr + offsets offset_exp_avg_ptr = exp_avg_ptr + offsets # load p = tl.load(offset_p_ptr, mask = mask) grad = tl.load(offset_grad_ptr, mask = mask) exp_avg = tl.load(offset_exp_avg_ptr, mask = mask) # stepweight decay p = p * (1 - lr * wd) # diff between momentum running average and grad diff = exp_avg - grad # weight update update = diff * beta1 + grad # torch.sign can_update = update != 0 update_sign = tl.where(update > 0, -lr, lr) p = p + update_sign * can_update # decay the momentum running average coefficient exp_avg = diff * beta2 + grad # store new params and momentum running average coefficient tl.store(offset_p_ptr, p, mask = mask) tl.store(offset_exp_avg_ptr, exp_avg, mask = mask) def update_fn( p: torch.Tensor, grad: torch.Tensor, exp_avg: torch.Tensor, lr: float, wd: float, beta1: float, beta2: float ): assert all([t.is_cuda for t in (p, grad, exp_avg)]) n_elements = p.numel() grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) update_fn_kernel[grid]( p, grad, exp_avg, lr, wd, beta1, beta2, n_elements )
ROCm/jax-triton
examples/add.py
https://github.com/ROCm/jax-triton/blob/a99df7998dab61241962a52e9350220e1a307e03/examples/add.py
# Copyright 2024 The jax_triton Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Addition example.""" import jax import jax.numpy as jnp import jax_triton as jt import triton import triton.language as tl @triton.jit def add_kernel( x_ptr, y_ptr, output_ptr, block_size: tl.constexpr, ): """Adds two vectors.""" pid = tl.program_id(axis=0) block_start = pid * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < 8 x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_ptr + offsets, mask=mask) output = x + y tl.store(output_ptr + offsets, output, mask=mask) def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray: out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype) block_size = 8 grid = (triton.cdiv(x.size, block_size),) return jt.triton_call( x, y, kernel=add_kernel, out_shape=out_shape, grid=grid, block_size=block_size) def main(unused_argv): x_val = jnp.arange(8) y_val = jnp.arange(8, 16) print(add(x_val, y_val)) print(jax.jit(add)(x_val, y_val)) if __name__ == "__main__": from absl import app app.run(main)
@triton.jit def add_kernel( x_ptr, y_ptr, output_ptr, block_size: tl.constexpr, ): """Adds two vectors.""" pid = tl.program_id(axis=0) block_start = pid * block_size offsets = block_start + tl.arange(0, block_size) mask = offsets < 8 x = tl.load(x_ptr + offsets, mask=mask) y = tl.load(y_ptr + offsets, mask=mask) output = x + y tl.store(output_ptr + offsets, output, mask=mask) def add(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray: out_shape = jax.ShapeDtypeStruct(shape=x.shape, dtype=x.dtype) block_size = 8 grid = (triton.cdiv(x.size, block_size),) return jt.triton_call( x, y, kernel=add_kernel, out_shape=out_shape, grid=grid, block_size=block_size) def main(unused_argv): x_val = jnp.arange(8) y_val = jnp.arange(8, 16) print(add(x_val, y_val)) print(jax.jit(add)(x_val, y_val)) if __name__ == "__main__": from absl import app app.run(main)
hustvl/mmMamba
fla/ops/hgrn/chunk.py
https://github.com/hustvl/mmMamba/blob/a710f4d8f7685214c5929b4beef8d9b7c5444735/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D
hustvl/mmMamba
fla/ops/hgrn/chunk.py
https://github.com/hustvl/mmMamba/blob/a710f4d8f7685214c5929b4beef8d9b7c5444735/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] )
hustvl/mmMamba
fla/ops/hgrn/chunk.py
https://github.com/hustvl/mmMamba/blob/a710f4d8f7685214c5929b4beef8d9b7c5444735/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
hustvl/mmMamba
fla/ops/hgrn/chunk.py
https://github.com/hustvl/mmMamba/blob/a710f4d8f7685214c5929b4beef8d9b7c5444735/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
AGENDD/RWKV-ASR
fla/ops/hgrn/chunk.py
https://github.com/AGENDD/RWKV-ASR/blob/1f3c0d90db76c426820112476f125e988fe16130/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D
AGENDD/RWKV-ASR
fla/ops/hgrn/chunk.py
https://github.com/AGENDD/RWKV-ASR/blob/1f3c0d90db76c426820112476f125e988fe16130/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] )
AGENDD/RWKV-ASR
fla/ops/hgrn/chunk.py
https://github.com/AGENDD/RWKV-ASR/blob/1f3c0d90db76c426820112476f125e988fe16130/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
AGENDD/RWKV-ASR
fla/ops/hgrn/chunk.py
https://github.com/AGENDD/RWKV-ASR/blob/1f3c0d90db76c426820112476f125e988fe16130/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
yynil/RWKV_LM_EXT
fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKV_LM_EXT/blob/ac0d4724ce0aae0469676569e8deb78f0de390a1/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D
yynil/RWKV_LM_EXT
fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKV_LM_EXT/blob/ac0d4724ce0aae0469676569e8deb78f0de390a1/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] )
yynil/RWKV_LM_EXT
fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKV_LM_EXT/blob/ac0d4724ce0aae0469676569e8deb78f0de390a1/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
yynil/RWKV_LM_EXT
fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKV_LM_EXT/blob/ac0d4724ce0aae0469676569e8deb78f0de390a1/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
Quixon-w/WechatRobot
模型关键代码/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/Quixon-w/WechatRobot/blob/149012c7eb3095afe63ba0876bd42b2c482176e3/%E6%A8%A1%E5%9E%8B%E5%85%B3%E9%94%AE%E4%BB%A3%E7%A0%81/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D
Quixon-w/WechatRobot
模型关键代码/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/Quixon-w/WechatRobot/blob/149012c7eb3095afe63ba0876bd42b2c482176e3/%E6%A8%A1%E5%9E%8B%E5%85%B3%E9%94%AE%E4%BB%A3%E7%A0%81/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] )
Quixon-w/WechatRobot
模型关键代码/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/Quixon-w/WechatRobot/blob/149012c7eb3095afe63ba0876bd42b2c482176e3/%E6%A8%A1%E5%9E%8B%E5%85%B3%E9%94%AE%E4%BB%A3%E7%A0%81/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
Quixon-w/WechatRobot
模型关键代码/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
https://github.com/Quixon-w/WechatRobot/blob/149012c7eb3095afe63ba0876bd42b2c482176e3/%E6%A8%A1%E5%9E%8B%E5%85%B3%E9%94%AE%E4%BB%A3%E7%A0%81/RWKV-Runner/finetune/lora/v6/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dg = tl.load(p_dg, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T, D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o) dg = torch.empty_like(g) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T, D, BT=BT ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T, D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = initial_state * dx[:, :, 0] * g[:, :, 0].exp() return dx, dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: if initial_state is not None: initial_state = initial_state.detach() o, final_state = ChunkHGRNFunction.apply(x, g, initial_state, output_final_state) return o, final_state if __name__ == '__main__': import torch.nn.functional as F from fla.ops.hgrn.naive import naive_recurrent_hgrn from fla.ops.hgrn.recurrent_fuse import fused_recurrent_hgrn B, H, T, D = 8, 4, 512, 128 dtype = torch.bfloat16 torch.manual_seed(42) # [batch_size, n_heads, seq_len, d_head] x = torch.randn((B, H, T, D), dtype=dtype, device='cuda') g = torch.randn((B, H, T, D), dtype=dtype, device='cuda') x, g = (1 - g.sigmoid()) * x, F.logsigmoid(g) print(f'x:\t{float(x.min()):>10.6f}\t{float(x.max()):>10.6f}') print(f'g:\t{float(g.min()):>10.6f}\t{float(g.max()):>10.6f}') x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) print(f"DTYPE:\t{x.dtype}") do = torch.randn_like(x) h0 = torch.randn_like(x[:, :, 0]) ref, ref_ht = naive_recurrent_hgrn(x, g, h0, output_final_state=True) ref.backward(do) ref_dx, x.grad = x.grad.clone(), None ref_dg, g.grad = g.grad.clone(), None tri, tri_ht = fused_recurrent_hgrn(x, g, h0, output_final_state=True) tri.backward(do) tri_dx, x.grad = x.grad.clone(), None tri_dg, g.grad = g.grad.clone(), None print(" \t DIFF\t MAX") print(' o\t', f"{float((ref - tri).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('ht\t', f"{float((ref_ht[0] - tri_ht[0]).abs().max()):>10.6f}\t{float(ref.max()):>10.6f}") print('dx\t', f"{float((ref_dx - tri_dx).abs().max()):>10.6f}\t{float(ref_dx.max()):>10.6f}") print('dg\t', f"{float((ref_dg - tri_dg).abs().max()):>10.6f}\t{float(ref_dg.max()):>10.6f}") print('Done!') @triton.testing.perf_report( triton.testing.Benchmark( # argument names to use as an x-axis for the plot x_names=['seq_len'], # different possible values for `x_name` x_vals=[128 * 2 ** i for i in range(0, 8)], # argument name whose value corresponds to a different line in the plot line_arg='provider', # possible values for `line_arg`` line_vals=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # label name for the lines line_names=['chunk', 'recurrent', 'chunk_bwd', 'recurrent_bwd'], # line styles styles=[('green', '-'), ('blue', '--'), ('red', '-.'), ('cyan', ':'), ('yellow', 'dotted'), ('black', 'dashed')], ylabel="Execution Time (ms)", # label name for the y-axis # name for the plot. Used also as a file name for saving the plot. plot_name="Performance", args={}, ) ) def benchmark(seq_len, provider): dtype = torch.bfloat16 B, H, D = 16, 4, 128 x = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda') g = torch.randn((B, H, seq_len, D), dtype=dtype, device='cuda').sigmoid() x = (1 - g) * x x, g = (i.detach().clone().to(dtype).requires_grad_() for i in (x, g)) do = torch.randn_like(x, dtype=dtype) quantiles = [0.5, 0.2, 0.8] results = 0, 0, 0 if provider == 'chunk': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g), quantiles=quantiles) if provider == 'recurrent': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g), quantiles=quantiles) if provider == 'chunk_bwd': results = triton.testing.do_bench(lambda: chunk_hgrn(x, g)[0].backward(do), quantiles=quantiles) if provider == 'recurrent_bwd': results = triton.testing.do_bench(lambda: fused_recurrent_hgrn(x, g)[0].backward(do), quantiles=quantiles) return results benchmark.run(print_data=True)
automl/unlocking_state_tracking
flash-linear-attention_mod/fla/modules/l2norm.py
https://github.com/automl/unlocking_state_tracking/blob/68f8e5fc6d25043d4e6a1fc169b58281c9e01d12/flash-linear-attention_mod/fla/modules/l2norm.py
# -*- coding: utf-8 -*- import torch import triton import triton.language as tl @triton.autotune( configs=[ triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32), ], key=["N"], ) # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) # @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) @triton.jit def _l2_norm_fwd_1pass_kernel( X, # pointer to the input Y, # pointer to the output stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row Y += row * stride_x_row # Compute mean and variance cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) xbar = tl.where(cols < N, x, 0.0) var = tl.sum(xbar * xbar, axis=0) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N y = x * rstd # Write output tl.store(Y + cols, y, mask=mask) @triton.autotune( configs=[ triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32), ], key=["N"], ) # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) # @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) # @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) # @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) @triton.jit def _l2_norm_bwd_kernel( X, # pointer to the input # Y, # pointer to the output to be recomputed DY, # pointer to the output gradient DX, # pointer to the input gradient stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the elements of X, DX, and DY it should compute. # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row DX += row * stride_x_row DY += row * stride_x_row # Y += row * stride_y_row cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x, 0.0) var = tl.sum(x * x) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N # y = x * rstd dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32) dy = tl.where(cols < N, dy, 0.0) # dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x tl.store(DX + cols, dx, mask=mask) def _l2_norm_fwd( x, eps=1e-6 ): x_shape_og = x.shape x = x.reshape(-1, x.shape[-1]) if x.stride(-1) != 1: x = x.contiguous() M, N = x.shape assert x.stride(-1) == 1 # allocate output y = torch.empty_like(x) assert y.stride(-1) == 1 N = x.shape[-1] M = x.shape[0] # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_fwd_1pass_kernel[(M,)]( x, y, x.stride(0), N, eps, # is_rms_norm, BLOCK_N, # residual is not None, # residual_out is not None, # bias is not None, ) return y.reshape(x_shape_og) def _l2_norm_bwd( x, dy, eps=1e-5, ): x_shape_og = x.shape x = x.reshape(-1, dy.shape[-1]) dy = dy.reshape(-1, dy.shape[-1]) if dy.stride(-1) != 1: dy = dy.contiguous() assert dy.shape == x.shape # allocate output dx = torch.empty_like(x) N = x.shape[-1] M = x.shape[0] assert x.stride(-1) == 1 assert dy.stride(-1) == 1 # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_bwd_kernel[(M,)]( x, dy, dx, x.stride(0), N, eps, BLOCK_N, ) return dx.reshape(x_shape_og) class L2NormFN(torch.autograd.Function): @staticmethod def forward( ctx, x, eps=1e-6, ): # reshape input data into 2D tensor y = _l2_norm_fwd(x, eps) ctx.eps = eps ctx.x_dtype = x.dtype ctx.save_for_backward(x) return y @staticmethod def backward(ctx, dy, *args): x, = ctx.saved_tensors dx = _l2_norm_bwd( x, dy, ctx.eps, ) return ( dx, None ) l2_norm_fn = L2NormFN.apply
@triton.jit def _l2_norm_fwd_1pass_kernel( X, # pointer to the input Y, # pointer to the output stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row Y += row * stride_x_row # Compute mean and variance cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) xbar = tl.where(cols < N, x, 0.0) var = tl.sum(xbar * xbar, axis=0) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N y = x * rstd # Write output tl.store(Y + cols, y, mask=mask) @triton.autotune( configs=[ triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32), ], key=["N"], ) # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) # @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) # @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) # @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
automl/unlocking_state_tracking
flash-linear-attention_mod/fla/modules/l2norm.py
https://github.com/automl/unlocking_state_tracking/blob/68f8e5fc6d25043d4e6a1fc169b58281c9e01d12/flash-linear-attention_mod/fla/modules/l2norm.py
# -*- coding: utf-8 -*- import torch import triton import triton.language as tl @triton.autotune( configs=[ triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32), ], key=["N"], ) # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) # @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) @triton.jit def _l2_norm_fwd_1pass_kernel( X, # pointer to the input Y, # pointer to the output stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row Y += row * stride_x_row # Compute mean and variance cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) xbar = tl.where(cols < N, x, 0.0) var = tl.sum(xbar * xbar, axis=0) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N y = x * rstd # Write output tl.store(Y + cols, y, mask=mask) @triton.autotune( configs=[ triton.Config({}, num_warps=1), triton.Config({}, num_warps=2), triton.Config({}, num_warps=4), triton.Config({}, num_warps=8), triton.Config({}, num_warps=16), triton.Config({}, num_warps=32), ], key=["N"], ) # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) # @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) # @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) # @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) @triton.jit def _l2_norm_bwd_kernel( X, # pointer to the input # Y, # pointer to the output to be recomputed DY, # pointer to the output gradient DX, # pointer to the input gradient stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the elements of X, DX, and DY it should compute. # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row DX += row * stride_x_row DY += row * stride_x_row # Y += row * stride_y_row cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x, 0.0) var = tl.sum(x * x) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N # y = x * rstd dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32) dy = tl.where(cols < N, dy, 0.0) # dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x tl.store(DX + cols, dx, mask=mask) def _l2_norm_fwd( x, eps=1e-6 ): x_shape_og = x.shape x = x.reshape(-1, x.shape[-1]) if x.stride(-1) != 1: x = x.contiguous() M, N = x.shape assert x.stride(-1) == 1 # allocate output y = torch.empty_like(x) assert y.stride(-1) == 1 N = x.shape[-1] M = x.shape[0] # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_fwd_1pass_kernel[(M,)]( x, y, x.stride(0), N, eps, # is_rms_norm, BLOCK_N, # residual is not None, # residual_out is not None, # bias is not None, ) return y.reshape(x_shape_og) def _l2_norm_bwd( x, dy, eps=1e-5, ): x_shape_og = x.shape x = x.reshape(-1, dy.shape[-1]) dy = dy.reshape(-1, dy.shape[-1]) if dy.stride(-1) != 1: dy = dy.contiguous() assert dy.shape == x.shape # allocate output dx = torch.empty_like(x) N = x.shape[-1] M = x.shape[0] assert x.stride(-1) == 1 assert dy.stride(-1) == 1 # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_bwd_kernel[(M,)]( x, dy, dx, x.stride(0), N, eps, BLOCK_N, ) return dx.reshape(x_shape_og) class L2NormFN(torch.autograd.Function): @staticmethod def forward( ctx, x, eps=1e-6, ): # reshape input data into 2D tensor y = _l2_norm_fwd(x, eps) ctx.eps = eps ctx.x_dtype = x.dtype ctx.save_for_backward(x) return y @staticmethod def backward(ctx, dy, *args): x, = ctx.saved_tensors dx = _l2_norm_bwd( x, dy, ctx.eps, ) return ( dx, None ) l2_norm_fn = L2NormFN.apply
@triton.jit def _l2_norm_bwd_kernel( X, # pointer to the input # Y, # pointer to the output to be recomputed DY, # pointer to the output gradient DX, # pointer to the input gradient stride_x_row, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_N: tl.constexpr, ): # Map the program id to the elements of X, DX, and DY it should compute. # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X += row * stride_x_row DX += row * stride_x_row DY += row * stride_x_row # Y += row * stride_y_row cols = tl.arange(0, BLOCK_N) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x, 0.0) var = tl.sum(x * x) rstd = 1 / tl.sqrt(var + eps) # tl.store(Rstd + row, rstd) # Normalize and apply linear transformation mask = cols < N # y = x * rstd dy = tl.load(DY + cols, mask=cols < N, other=0.0).to(tl.float32) dy = tl.where(cols < N, dy, 0.0) # dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x dx = dy * rstd - tl.sum(dy * x) * (1 / (var+eps)) * rstd * x tl.store(DX + cols, dx, mask=mask) def _l2_norm_fwd( x, eps=1e-6 ): x_shape_og = x.shape x = x.reshape(-1, x.shape[-1]) if x.stride(-1) != 1: x = x.contiguous() M, N = x.shape assert x.stride(-1) == 1 # allocate output y = torch.empty_like(x) assert y.stride(-1) == 1 N = x.shape[-1] M = x.shape[0] # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_fwd_1pass_kernel[(M,)]( x, y, x.stride(0), N, eps, # is_rms_norm, BLOCK_N, # residual is not None, # residual_out is not None, # bias is not None, ) return y.reshape(x_shape_og) def _l2_norm_bwd( x, dy, eps=1e-5, ): x_shape_og = x.shape x = x.reshape(-1, dy.shape[-1]) dy = dy.reshape(-1, dy.shape[-1]) if dy.stride(-1) != 1: dy = dy.contiguous() assert dy.shape == x.shape # allocate output dx = torch.empty_like(x) N = x.shape[-1] M = x.shape[0] assert x.stride(-1) == 1 assert dy.stride(-1) == 1 # rstd = torch.empty((M,), dtype=torch.float32, device="cuda") # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_N: raise RuntimeError( "This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps with torch.cuda.device(x.device.index): _l2_norm_bwd_kernel[(M,)]( x, dy, dx, x.stride(0), N, eps, BLOCK_N, ) return dx.reshape(x_shape_og) class L2NormFN(torch.autograd.Function): @staticmethod def forward( ctx, x, eps=1e-6, ): # reshape input data into 2D tensor y = _l2_norm_fwd(x, eps) ctx.eps = eps ctx.x_dtype = x.dtype ctx.save_for_backward(x) return y @staticmethod def backward(ctx, dy, *args): x, = ctx.saved_tensors dx = _l2_norm_bwd( x, dy, ctx.eps, ) return ( dx, None ) l2_norm_fn = L2NormFN.apply
SwekeR-463/GK
kernels/activations/swiglu.py
https://github.com/SwekeR-463/GK/blob/84e1b9e20210add99b36145d3462c7e1f443473b/kernels/activations/swiglu.py
import triton import triton.language as tl import torch @triton.jit def swiglu_forward_kernel( x_ptr, y_ptr, n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load input tensor (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) # SwiGLU activation: SiLU(x_W) * x_V swiglu = (x_W * tl.sigmoid(x_W)) * x_V tl.store(y_ptr + offsets, swiglu, mask=mask) @triton.jit def swiglu_backward_kernel( dy_ptr, # pointer to gradient of loss w.r.t. output x_ptr, # pointer to input tensor dx_ptr, # pointer to gradient of loss w.r.t. input n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load inputs (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0) # compute SiLU and its derivative for x_W sig = tl.sigmoid(x_W) sig_derivative = sig * (1 - sig) silu_grad = sig + x_W * sig_derivative # compute gradients for x_W and x_V dx_W = dy * x_V * silu_grad dx_V = dy * (x_W * sig) tl.store(dx_ptr + offsets, dx_W, mask=mask) tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask) def swiglu_forward(x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves y = torch.empty_like(x[..., :feature_dim]) grid = (triton.cdiv(n_elements, 1024),) swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return y def swiglu_backward(dy, x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves dx = torch.empty_like(x) grid = (triton.cdiv(n_elements, 1024),) swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return dx class SwiGLUTriton(torch.autograd.Function): @staticmethod def forward(ctx, x): y = swiglu_forward(x) ctx.save_for_backward(x) # save input for backward pass return y @staticmethod def backward(ctx, dy): x, = ctx.saved_tensors dx = swiglu_backward(dy, x) return dx class TritonSwiGLULayer(torch.nn.Module): def forward(self, x): return SwiGLUTriton.apply(x) # test the implementation x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64) y = SwiGLUTriton.apply(x) # backward test dy = torch.ones_like(y) # assume dL/dy = 1 dx = torch.autograd.grad(y, x, grad_outputs=dy)[0] print(y) # forward pass output print(dx) # backward pass gradients # gradient check test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5) print("Gradient check passed:", test) # True
@triton.jit def swiglu_forward_kernel( x_ptr, y_ptr, n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load input tensor (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) # SwiGLU activation: SiLU(x_W) * x_V swiglu = (x_W * tl.sigmoid(x_W)) * x_V tl.store(y_ptr + offsets, swiglu, mask=mask)
SwekeR-463/GK
kernels/activations/swiglu.py
https://github.com/SwekeR-463/GK/blob/84e1b9e20210add99b36145d3462c7e1f443473b/kernels/activations/swiglu.py
import triton import triton.language as tl import torch @triton.jit def swiglu_forward_kernel( x_ptr, y_ptr, n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load input tensor (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) # SwiGLU activation: SiLU(x_W) * x_V swiglu = (x_W * tl.sigmoid(x_W)) * x_V tl.store(y_ptr + offsets, swiglu, mask=mask) @triton.jit def swiglu_backward_kernel( dy_ptr, # pointer to gradient of loss w.r.t. output x_ptr, # pointer to input tensor dx_ptr, # pointer to gradient of loss w.r.t. input n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load inputs (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0) # compute SiLU and its derivative for x_W sig = tl.sigmoid(x_W) sig_derivative = sig * (1 - sig) silu_grad = sig + x_W * sig_derivative # compute gradients for x_W and x_V dx_W = dy * x_V * silu_grad dx_V = dy * (x_W * sig) tl.store(dx_ptr + offsets, dx_W, mask=mask) tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask) def swiglu_forward(x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves y = torch.empty_like(x[..., :feature_dim]) grid = (triton.cdiv(n_elements, 1024),) swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return y def swiglu_backward(dy, x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves dx = torch.empty_like(x) grid = (triton.cdiv(n_elements, 1024),) swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return dx class SwiGLUTriton(torch.autograd.Function): @staticmethod def forward(ctx, x): y = swiglu_forward(x) ctx.save_for_backward(x) # save input for backward pass return y @staticmethod def backward(ctx, dy): x, = ctx.saved_tensors dx = swiglu_backward(dy, x) return dx class TritonSwiGLULayer(torch.nn.Module): def forward(self, x): return SwiGLUTriton.apply(x) # test the implementation x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64) y = SwiGLUTriton.apply(x) # backward test dy = torch.ones_like(y) # assume dL/dy = 1 dx = torch.autograd.grad(y, x, grad_outputs=dy)[0] print(y) # forward pass output print(dx) # backward pass gradients # gradient check test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5) print("Gradient check passed:", test) # True
@triton.jit def swiglu_backward_kernel( dy_ptr, # pointer to gradient of loss w.r.t. output x_ptr, # pointer to input tensor dx_ptr, # pointer to gradient of loss w.r.t. input n_elements: tl.constexpr, BLOCK_SIZE: tl.constexpr, feature_dim: tl.constexpr # feature dimension (size of x_W and x_V) ): pid = tl.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements # load inputs (split into x_W and x_V) x_W = tl.load(x_ptr + offsets, mask=mask, other=0.0) x_V = tl.load(x_ptr + offsets + feature_dim, mask=mask, other=0.0) dy = tl.load(dy_ptr + offsets, mask=mask, other=0.0) # compute SiLU and its derivative for x_W sig = tl.sigmoid(x_W) sig_derivative = sig * (1 - sig) silu_grad = sig + x_W * sig_derivative # compute gradients for x_W and x_V dx_W = dy * x_V * silu_grad dx_V = dy * (x_W * sig) tl.store(dx_ptr + offsets, dx_W, mask=mask) tl.store(dx_ptr + offsets + feature_dim, dx_V, mask=mask) def swiglu_forward(x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves y = torch.empty_like(x[..., :feature_dim]) grid = (triton.cdiv(n_elements, 1024),) swiglu_forward_kernel[grid](x, y, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return y def swiglu_backward(dy, x): n_elements = x.numel() // 2 # output has half the feature size feature_dim = x.shape[-1] // 2 # split input into two halves dx = torch.empty_like(x) grid = (triton.cdiv(n_elements, 1024),) swiglu_backward_kernel[grid](dy, x, dx, n_elements, BLOCK_SIZE=1024, feature_dim=feature_dim, num_warps=4) return dx class SwiGLUTriton(torch.autograd.Function): @staticmethod def forward(ctx, x): y = swiglu_forward(x) ctx.save_for_backward(x) # save input for backward pass return y @staticmethod def backward(ctx, dy): x, = ctx.saved_tensors dx = swiglu_backward(dy, x) return dx class TritonSwiGLULayer(torch.nn.Module): def forward(self, x): return SwiGLUTriton.apply(x) # test the implementation x = torch.randn(4096, device='cuda', requires_grad=True, dtype=torch.float64) y = SwiGLUTriton.apply(x) # backward test dy = torch.ones_like(y) # assume dL/dy = 1 dx = torch.autograd.grad(y, x, grad_outputs=dy)[0] print(y) # forward pass output print(dx) # backward pass gradients # gradient check test = torch.autograd.gradcheck(SwiGLUTriton.apply, (x,), eps=1e-6, atol=1e-5, nondet_tol=1e-5) print("Gradient check passed:", test) # True
WangZiyu012/STUM
modeling/mamba2/ops/triton/softplus.py
https://github.com/WangZiyu012/STUM/blob/ea2863fbf5aecfccc1379f334875e82e311ba3d2/modeling/mamba2/ops/triton/softplus.py
import triton import triton.language as tl from packaging import version TRITON3 = version.parse(triton.__version__) >= version.parse("3.0.0") if TRITON3: @triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt) return dt else: @triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) return dt
@triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt) return dt else:
WangZiyu012/STUM
modeling/mamba2/ops/triton/softplus.py
https://github.com/WangZiyu012/STUM/blob/ea2863fbf5aecfccc1379f334875e82e311ba3d2/modeling/mamba2/ops/triton/softplus.py
import triton import triton.language as tl from packaging import version TRITON3 = version.parse(triton.__version__) >= version.parse("3.0.0") if TRITON3: @triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log(tl.math.exp(dt) + 1), dt) return dt else: @triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) return dt
@triton.jit def softplus(dt): dt = tl.where(dt <= 20.0, tl.math.log1p(tl.exp(dt)), dt) return dt
hpcaitech/ColossalAI
colossalai/kernel/triton/flash_decoding.py
https://github.com/hpcaitech/ColossalAI/blob/44d4053fec005fe0b06b6bc755fdc962463145df/colossalai/kernel/triton/flash_decoding.py
# Applying Flash-Decoding as descibed in # https://pytorch.org/blog/flash-decoding/ # by Tri Dao, 2023 import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim], # or [num_blocks, num_kv_heads, head_dim//x, block_size, x], depends on strides provided block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, kv_group_num, x, sm_scale, stride_qt, stride_qh, stride_qd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_block = tl.arange(0, BLOCK_SIZE) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) cur_kv_head_idx = cur_head_idx // kv_group_num offset_kvcache = cur_block_id * stride_kcb + cur_kv_head_idx * stride_kch offsets_k = ( offset_kvcache + (offsets_dmodel[None, :] // x) * stride_kcsplit_x + (offsets_dmodel[None, :] % x) * stride_kcd + offsets_block[:, None] * stride_kcs ) k_cur_block = tl.load(KCache + offsets_k) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_vcs, stride_vcd), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij += tl.where(block_start_kv * BLOCK_KV + offsets_block < cur_kv_seq_len, 0, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _alibi_flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) cur_kv_head_idx = cur_head_idx // KV_GROUPS offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh K_block_ptr = tl.make_block_ptr( base=KCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) k_cur_block = tl.load(K_block_ptr) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) alibi_slope = tl.load(alibi_slopes + cur_head_idx) position_k_offset = block_start_kv * BLOCK_KV + tl.arange(0, BLOCK_SIZE) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij -= alibi_slope * (cur_kv_seq_len - 1 - position_k_offset) S_ij = tl.where(cur_kv_seq_len > position_k_offset, S_ij, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_reduce_kernel( mid_o, # [batch_size, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size, head_num, kv_split_num] O, # [batch_size, num_heads, head_dim] or [batch_size, 1, num_heads, head_dim] kv_seq_len, q_len, batch_size, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_o_lset, stride_o_lseh, stride_o_lseb, stride_ot, stride_oh, stride_od, BLOCK_KV: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) # cur_token_off is used as a "mask" here for spec-dec during verification process cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off offsets_dmodel = tl.arange(0, HEAD_DIM) # NOTE currently the block size BLOCK_KV splitting kv is relatively small as we have # BLOCK_KV == BLOCK_SIZE for now. We might want to decrease the number of blocks of kv splitted. kv_split_num = (cur_kv_seq_len + BLOCK_KV - 1) // BLOCK_KV m_i = float("-inf") # max logic l_i = 0.0 # sum exp acc = tl.zeros([HEAD_DIM], dtype=tl.float32) offsets_mid_o = cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + offsets_dmodel offset_mid_lse = cur_token_idx * stride_o_lset + cur_head_idx * stride_o_lseh for block_i in range(0, kv_split_num, 1): mid_o_block = tl.load(mid_o + offsets_mid_o + block_i * stride_mid_ob) lse = tl.load(mid_o_lse + offset_mid_lse + block_i * stride_o_lseb) m_ij = tl.maximum(m_i, lse) scale = tl.exp(m_i - m_ij) acc = acc * scale lse -= m_ij exp_logic = tl.exp(lse) acc += exp_logic * mid_o_block l_i = scale * l_i + exp_logic m_i = m_ij acc = acc / l_i offsets_O = cur_token_idx * stride_ot + cur_head_idx * stride_oh + offsets_dmodel tl.store(O + offsets_O, acc.to(O.type.element_ty)) return # Decoding Stage # Used with blocked KV Cache (PagedAttention) def flash_decoding_attention( q: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_seq_len: torch.Tensor, block_tables: torch.Tensor, block_size: int, max_seq_len_in_batch: int = None, output: torch.Tensor = None, mid_output: torch.Tensor = None, mid_output_lse: torch.Tensor = None, alibi_slopes: torch.Tensor = None, sm_scale: int = None, kv_group_num: int = 1, q_len: int = 1, # NOTE alibi flash decoding does not support q_len > 1 at this moment. use_new_kcache_layout: bool = False, ): """ Flash decoding implemented with a blocked KV Cache (PagedAttention) during decoding stage. Args: q (torch.Tensor): [bsz * q_len, num_heads, head_dim] q_len > 1 only for verification process in speculative-decoding. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] kv_seq_len (torch.Tensor): [batch_size] records the (kv) sequence lengths incorporating past kv sequence lengths. block_tables (torch.Tensor): [batch_size, max_blocks_per_sequence] max_seq_len_in_batch (int): Maximum sequence length in the batch. output (torch.Tensor): [bsz, num_heads * head_dim] mid_output (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num, head_dim] Intermediate output tensor. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. mid_output_lse (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num] Log-sum-exp of intermediate output. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. alibi_slopes (torch.Tensor): [num_heads] alibi slopes used for alibi flash decoding. block_size (int): Size of each block in the blocked key/value cache. num_kv_group (int, optional): Number of key/value groups. Defaults to 1. q_length (int): Query length. Use for speculative decoding when `q_length` > 1 (i.e. the last n tokens). Defaults to 1. use_new_kcache_layout (bool): Whether to use the new kcache layout. Defaults to False. Returns: Output tensor with shape [bsz * q_len, num_heads * head_dim] """ q = q.squeeze() if q.dim() == 4 else q assert q.dim() == 3, f"Incompatible q dim: {q.dim()}" n_tokens, num_heads, head_dim = q.shape assert n_tokens % q_len == 0, "Invalid q_len" bsz = n_tokens // q_len assert head_dim in {32, 64, 128, 256} assert kv_seq_len.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" KV seq lengths bsz {kv_seq_len.size(0)}, Block tables bsz {block_tables.size(0)}, " f"batch size {bsz}" ) assert k_cache.size(-2) == v_cache.size(-2) == block_size, ( f"Got incompatible block size on kv caches:\n" f" assigned block_size {block_size}, k_cache block_size {k_cache.size(-2)}, " f"v_cache block_size {v_cache.size(-2)}" ) # NOTE BLOCK_KV could be considered as block splitting the sequence on k/v # For now, BLOCK_KV is supposed to be equivalent with the size of physical cache block (i.e.`block_size`) assert block_size in {16, 32, 64, 128} BLOCK_KV = block_size sm_scale = 1.0 / (head_dim**0.5) if sm_scale is None else sm_scale max_seq_len_in_batch = kv_seq_len.max().item() if max_seq_len_in_batch is None else max_seq_len_in_batch # For compatibility (TODO revise modeling in future) kv_max_split_num = (max_seq_len_in_batch + BLOCK_KV - 1) // BLOCK_KV if mid_output is None: mid_output = torch.empty( (bsz * q_len, num_heads, kv_max_split_num, head_dim), dtype=torch.float32, device=q.device ) if mid_output_lse is None: mid_output_lse = torch.empty((bsz * q_len, num_heads, kv_max_split_num), dtype=torch.float32, device=q.device) if output is None: # A hack to prevent `view` operation in modeling output = torch.empty((bsz * q_len, num_heads * head_dim), dtype=q.dtype, device=q.device) assert ( mid_output.size(2) == mid_output_lse.size(2) >= kv_max_split_num ), "Incompatible kv split number of intermediate output tensors" assert ( mid_output.size(0) == mid_output_lse.size(0) >= output.size(0) == n_tokens ), f"Incompatible first dimension of output tensors" # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = lambda META: ( triton.next_power_of_2(bsz * q_len), num_heads, triton.cdiv(triton.next_power_of_2(max_seq_len_in_batch), META["BLOCK_KV"]), ) if alibi_slopes is not None: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( not use_new_kcache_layout ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" _alibi_flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), sm_scale, KV_GROUPS=kv_group_num, BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) else: # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] _flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, kv_group_num, x, sm_scale, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) grid = (triton.next_power_of_2(bsz * q_len), num_heads) _flash_decoding_fwd_reduce_kernel[grid]( mid_output, mid_output_lse, output, kv_seq_len, q_len, bsz, mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), output.stride(0), head_dim, 1, BLOCK_KV=block_size, HEAD_DIM=head_dim, ) return output
@triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim], # or [num_blocks, num_kv_heads, head_dim//x, block_size, x], depends on strides provided block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, kv_group_num, x, sm_scale, stride_qt, stride_qh, stride_qd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_block = tl.arange(0, BLOCK_SIZE) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) cur_kv_head_idx = cur_head_idx // kv_group_num offset_kvcache = cur_block_id * stride_kcb + cur_kv_head_idx * stride_kch offsets_k = ( offset_kvcache + (offsets_dmodel[None, :] // x) * stride_kcsplit_x + (offsets_dmodel[None, :] % x) * stride_kcd + offsets_block[:, None] * stride_kcs ) k_cur_block = tl.load(KCache + offsets_k) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_vcs, stride_vcd), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij += tl.where(block_start_kv * BLOCK_KV + offsets_block < cur_kv_seq_len, 0, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0
hpcaitech/ColossalAI
colossalai/kernel/triton/flash_decoding.py
https://github.com/hpcaitech/ColossalAI/blob/44d4053fec005fe0b06b6bc755fdc962463145df/colossalai/kernel/triton/flash_decoding.py
# Applying Flash-Decoding as descibed in # https://pytorch.org/blog/flash-decoding/ # by Tri Dao, 2023 import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim], # or [num_blocks, num_kv_heads, head_dim//x, block_size, x], depends on strides provided block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, kv_group_num, x, sm_scale, stride_qt, stride_qh, stride_qd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_block = tl.arange(0, BLOCK_SIZE) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) cur_kv_head_idx = cur_head_idx // kv_group_num offset_kvcache = cur_block_id * stride_kcb + cur_kv_head_idx * stride_kch offsets_k = ( offset_kvcache + (offsets_dmodel[None, :] // x) * stride_kcsplit_x + (offsets_dmodel[None, :] % x) * stride_kcd + offsets_block[:, None] * stride_kcs ) k_cur_block = tl.load(KCache + offsets_k) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_vcs, stride_vcd), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij += tl.where(block_start_kv * BLOCK_KV + offsets_block < cur_kv_seq_len, 0, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _alibi_flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) cur_kv_head_idx = cur_head_idx // KV_GROUPS offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh K_block_ptr = tl.make_block_ptr( base=KCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) k_cur_block = tl.load(K_block_ptr) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) alibi_slope = tl.load(alibi_slopes + cur_head_idx) position_k_offset = block_start_kv * BLOCK_KV + tl.arange(0, BLOCK_SIZE) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij -= alibi_slope * (cur_kv_seq_len - 1 - position_k_offset) S_ij = tl.where(cur_kv_seq_len > position_k_offset, S_ij, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_reduce_kernel( mid_o, # [batch_size, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size, head_num, kv_split_num] O, # [batch_size, num_heads, head_dim] or [batch_size, 1, num_heads, head_dim] kv_seq_len, q_len, batch_size, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_o_lset, stride_o_lseh, stride_o_lseb, stride_ot, stride_oh, stride_od, BLOCK_KV: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) # cur_token_off is used as a "mask" here for spec-dec during verification process cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off offsets_dmodel = tl.arange(0, HEAD_DIM) # NOTE currently the block size BLOCK_KV splitting kv is relatively small as we have # BLOCK_KV == BLOCK_SIZE for now. We might want to decrease the number of blocks of kv splitted. kv_split_num = (cur_kv_seq_len + BLOCK_KV - 1) // BLOCK_KV m_i = float("-inf") # max logic l_i = 0.0 # sum exp acc = tl.zeros([HEAD_DIM], dtype=tl.float32) offsets_mid_o = cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + offsets_dmodel offset_mid_lse = cur_token_idx * stride_o_lset + cur_head_idx * stride_o_lseh for block_i in range(0, kv_split_num, 1): mid_o_block = tl.load(mid_o + offsets_mid_o + block_i * stride_mid_ob) lse = tl.load(mid_o_lse + offset_mid_lse + block_i * stride_o_lseb) m_ij = tl.maximum(m_i, lse) scale = tl.exp(m_i - m_ij) acc = acc * scale lse -= m_ij exp_logic = tl.exp(lse) acc += exp_logic * mid_o_block l_i = scale * l_i + exp_logic m_i = m_ij acc = acc / l_i offsets_O = cur_token_idx * stride_ot + cur_head_idx * stride_oh + offsets_dmodel tl.store(O + offsets_O, acc.to(O.type.element_ty)) return # Decoding Stage # Used with blocked KV Cache (PagedAttention) def flash_decoding_attention( q: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_seq_len: torch.Tensor, block_tables: torch.Tensor, block_size: int, max_seq_len_in_batch: int = None, output: torch.Tensor = None, mid_output: torch.Tensor = None, mid_output_lse: torch.Tensor = None, alibi_slopes: torch.Tensor = None, sm_scale: int = None, kv_group_num: int = 1, q_len: int = 1, # NOTE alibi flash decoding does not support q_len > 1 at this moment. use_new_kcache_layout: bool = False, ): """ Flash decoding implemented with a blocked KV Cache (PagedAttention) during decoding stage. Args: q (torch.Tensor): [bsz * q_len, num_heads, head_dim] q_len > 1 only for verification process in speculative-decoding. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] kv_seq_len (torch.Tensor): [batch_size] records the (kv) sequence lengths incorporating past kv sequence lengths. block_tables (torch.Tensor): [batch_size, max_blocks_per_sequence] max_seq_len_in_batch (int): Maximum sequence length in the batch. output (torch.Tensor): [bsz, num_heads * head_dim] mid_output (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num, head_dim] Intermediate output tensor. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. mid_output_lse (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num] Log-sum-exp of intermediate output. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. alibi_slopes (torch.Tensor): [num_heads] alibi slopes used for alibi flash decoding. block_size (int): Size of each block in the blocked key/value cache. num_kv_group (int, optional): Number of key/value groups. Defaults to 1. q_length (int): Query length. Use for speculative decoding when `q_length` > 1 (i.e. the last n tokens). Defaults to 1. use_new_kcache_layout (bool): Whether to use the new kcache layout. Defaults to False. Returns: Output tensor with shape [bsz * q_len, num_heads * head_dim] """ q = q.squeeze() if q.dim() == 4 else q assert q.dim() == 3, f"Incompatible q dim: {q.dim()}" n_tokens, num_heads, head_dim = q.shape assert n_tokens % q_len == 0, "Invalid q_len" bsz = n_tokens // q_len assert head_dim in {32, 64, 128, 256} assert kv_seq_len.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" KV seq lengths bsz {kv_seq_len.size(0)}, Block tables bsz {block_tables.size(0)}, " f"batch size {bsz}" ) assert k_cache.size(-2) == v_cache.size(-2) == block_size, ( f"Got incompatible block size on kv caches:\n" f" assigned block_size {block_size}, k_cache block_size {k_cache.size(-2)}, " f"v_cache block_size {v_cache.size(-2)}" ) # NOTE BLOCK_KV could be considered as block splitting the sequence on k/v # For now, BLOCK_KV is supposed to be equivalent with the size of physical cache block (i.e.`block_size`) assert block_size in {16, 32, 64, 128} BLOCK_KV = block_size sm_scale = 1.0 / (head_dim**0.5) if sm_scale is None else sm_scale max_seq_len_in_batch = kv_seq_len.max().item() if max_seq_len_in_batch is None else max_seq_len_in_batch # For compatibility (TODO revise modeling in future) kv_max_split_num = (max_seq_len_in_batch + BLOCK_KV - 1) // BLOCK_KV if mid_output is None: mid_output = torch.empty( (bsz * q_len, num_heads, kv_max_split_num, head_dim), dtype=torch.float32, device=q.device ) if mid_output_lse is None: mid_output_lse = torch.empty((bsz * q_len, num_heads, kv_max_split_num), dtype=torch.float32, device=q.device) if output is None: # A hack to prevent `view` operation in modeling output = torch.empty((bsz * q_len, num_heads * head_dim), dtype=q.dtype, device=q.device) assert ( mid_output.size(2) == mid_output_lse.size(2) >= kv_max_split_num ), "Incompatible kv split number of intermediate output tensors" assert ( mid_output.size(0) == mid_output_lse.size(0) >= output.size(0) == n_tokens ), f"Incompatible first dimension of output tensors" # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = lambda META: ( triton.next_power_of_2(bsz * q_len), num_heads, triton.cdiv(triton.next_power_of_2(max_seq_len_in_batch), META["BLOCK_KV"]), ) if alibi_slopes is not None: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( not use_new_kcache_layout ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" _alibi_flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), sm_scale, KV_GROUPS=kv_group_num, BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) else: # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] _flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, kv_group_num, x, sm_scale, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) grid = (triton.next_power_of_2(bsz * q_len), num_heads) _flash_decoding_fwd_reduce_kernel[grid]( mid_output, mid_output_lse, output, kv_seq_len, q_len, bsz, mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), output.stride(0), head_dim, 1, BLOCK_KV=block_size, HEAD_DIM=head_dim, ) return output
@triton.jit def _alibi_flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) cur_kv_head_idx = cur_head_idx // KV_GROUPS offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh K_block_ptr = tl.make_block_ptr( base=KCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) k_cur_block = tl.load(K_block_ptr) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) alibi_slope = tl.load(alibi_slopes + cur_head_idx) position_k_offset = block_start_kv * BLOCK_KV + tl.arange(0, BLOCK_SIZE) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij -= alibi_slope * (cur_kv_seq_len - 1 - position_k_offset) S_ij = tl.where(cur_kv_seq_len > position_k_offset, S_ij, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0
hpcaitech/ColossalAI
colossalai/kernel/triton/flash_decoding.py
https://github.com/hpcaitech/ColossalAI/blob/44d4053fec005fe0b06b6bc755fdc962463145df/colossalai/kernel/triton/flash_decoding.py
# Applying Flash-Decoding as descibed in # https://pytorch.org/blog/flash-decoding/ # by Tri Dao, 2023 import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim], # or [num_blocks, num_kv_heads, head_dim//x, block_size, x], depends on strides provided block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, kv_group_num, x, sm_scale, stride_qt, stride_qh, stride_qd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_block = tl.arange(0, BLOCK_SIZE) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) cur_kv_head_idx = cur_head_idx // kv_group_num offset_kvcache = cur_block_id * stride_kcb + cur_kv_head_idx * stride_kch offsets_k = ( offset_kvcache + (offsets_dmodel[None, :] // x) * stride_kcsplit_x + (offsets_dmodel[None, :] % x) * stride_kcd + offsets_block[:, None] * stride_kcs ) k_cur_block = tl.load(KCache + offsets_k) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_vcs, stride_vcd), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij += tl.where(block_start_kv * BLOCK_KV + offsets_block < cur_kv_seq_len, 0, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _alibi_flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) cur_kv_head_idx = cur_head_idx // KV_GROUPS offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh K_block_ptr = tl.make_block_ptr( base=KCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) k_cur_block = tl.load(K_block_ptr) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) alibi_slope = tl.load(alibi_slopes + cur_head_idx) position_k_offset = block_start_kv * BLOCK_KV + tl.arange(0, BLOCK_SIZE) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij -= alibi_slope * (cur_kv_seq_len - 1 - position_k_offset) S_ij = tl.where(cur_kv_seq_len > position_k_offset, S_ij, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_reduce_kernel( mid_o, # [batch_size, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size, head_num, kv_split_num] O, # [batch_size, num_heads, head_dim] or [batch_size, 1, num_heads, head_dim] kv_seq_len, q_len, batch_size, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_o_lset, stride_o_lseh, stride_o_lseb, stride_ot, stride_oh, stride_od, BLOCK_KV: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) # cur_token_off is used as a "mask" here for spec-dec during verification process cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off offsets_dmodel = tl.arange(0, HEAD_DIM) # NOTE currently the block size BLOCK_KV splitting kv is relatively small as we have # BLOCK_KV == BLOCK_SIZE for now. We might want to decrease the number of blocks of kv splitted. kv_split_num = (cur_kv_seq_len + BLOCK_KV - 1) // BLOCK_KV m_i = float("-inf") # max logic l_i = 0.0 # sum exp acc = tl.zeros([HEAD_DIM], dtype=tl.float32) offsets_mid_o = cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + offsets_dmodel offset_mid_lse = cur_token_idx * stride_o_lset + cur_head_idx * stride_o_lseh for block_i in range(0, kv_split_num, 1): mid_o_block = tl.load(mid_o + offsets_mid_o + block_i * stride_mid_ob) lse = tl.load(mid_o_lse + offset_mid_lse + block_i * stride_o_lseb) m_ij = tl.maximum(m_i, lse) scale = tl.exp(m_i - m_ij) acc = acc * scale lse -= m_ij exp_logic = tl.exp(lse) acc += exp_logic * mid_o_block l_i = scale * l_i + exp_logic m_i = m_ij acc = acc / l_i offsets_O = cur_token_idx * stride_ot + cur_head_idx * stride_oh + offsets_dmodel tl.store(O + offsets_O, acc.to(O.type.element_ty)) return # Decoding Stage # Used with blocked KV Cache (PagedAttention) def flash_decoding_attention( q: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_seq_len: torch.Tensor, block_tables: torch.Tensor, block_size: int, max_seq_len_in_batch: int = None, output: torch.Tensor = None, mid_output: torch.Tensor = None, mid_output_lse: torch.Tensor = None, alibi_slopes: torch.Tensor = None, sm_scale: int = None, kv_group_num: int = 1, q_len: int = 1, # NOTE alibi flash decoding does not support q_len > 1 at this moment. use_new_kcache_layout: bool = False, ): """ Flash decoding implemented with a blocked KV Cache (PagedAttention) during decoding stage. Args: q (torch.Tensor): [bsz * q_len, num_heads, head_dim] q_len > 1 only for verification process in speculative-decoding. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] kv_seq_len (torch.Tensor): [batch_size] records the (kv) sequence lengths incorporating past kv sequence lengths. block_tables (torch.Tensor): [batch_size, max_blocks_per_sequence] max_seq_len_in_batch (int): Maximum sequence length in the batch. output (torch.Tensor): [bsz, num_heads * head_dim] mid_output (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num, head_dim] Intermediate output tensor. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. mid_output_lse (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num] Log-sum-exp of intermediate output. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. alibi_slopes (torch.Tensor): [num_heads] alibi slopes used for alibi flash decoding. block_size (int): Size of each block in the blocked key/value cache. num_kv_group (int, optional): Number of key/value groups. Defaults to 1. q_length (int): Query length. Use for speculative decoding when `q_length` > 1 (i.e. the last n tokens). Defaults to 1. use_new_kcache_layout (bool): Whether to use the new kcache layout. Defaults to False. Returns: Output tensor with shape [bsz * q_len, num_heads * head_dim] """ q = q.squeeze() if q.dim() == 4 else q assert q.dim() == 3, f"Incompatible q dim: {q.dim()}" n_tokens, num_heads, head_dim = q.shape assert n_tokens % q_len == 0, "Invalid q_len" bsz = n_tokens // q_len assert head_dim in {32, 64, 128, 256} assert kv_seq_len.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" KV seq lengths bsz {kv_seq_len.size(0)}, Block tables bsz {block_tables.size(0)}, " f"batch size {bsz}" ) assert k_cache.size(-2) == v_cache.size(-2) == block_size, ( f"Got incompatible block size on kv caches:\n" f" assigned block_size {block_size}, k_cache block_size {k_cache.size(-2)}, " f"v_cache block_size {v_cache.size(-2)}" ) # NOTE BLOCK_KV could be considered as block splitting the sequence on k/v # For now, BLOCK_KV is supposed to be equivalent with the size of physical cache block (i.e.`block_size`) assert block_size in {16, 32, 64, 128} BLOCK_KV = block_size sm_scale = 1.0 / (head_dim**0.5) if sm_scale is None else sm_scale max_seq_len_in_batch = kv_seq_len.max().item() if max_seq_len_in_batch is None else max_seq_len_in_batch # For compatibility (TODO revise modeling in future) kv_max_split_num = (max_seq_len_in_batch + BLOCK_KV - 1) // BLOCK_KV if mid_output is None: mid_output = torch.empty( (bsz * q_len, num_heads, kv_max_split_num, head_dim), dtype=torch.float32, device=q.device ) if mid_output_lse is None: mid_output_lse = torch.empty((bsz * q_len, num_heads, kv_max_split_num), dtype=torch.float32, device=q.device) if output is None: # A hack to prevent `view` operation in modeling output = torch.empty((bsz * q_len, num_heads * head_dim), dtype=q.dtype, device=q.device) assert ( mid_output.size(2) == mid_output_lse.size(2) >= kv_max_split_num ), "Incompatible kv split number of intermediate output tensors" assert ( mid_output.size(0) == mid_output_lse.size(0) >= output.size(0) == n_tokens ), f"Incompatible first dimension of output tensors" # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = lambda META: ( triton.next_power_of_2(bsz * q_len), num_heads, triton.cdiv(triton.next_power_of_2(max_seq_len_in_batch), META["BLOCK_KV"]), ) if alibi_slopes is not None: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( not use_new_kcache_layout ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" _alibi_flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), sm_scale, KV_GROUPS=kv_group_num, BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) else: # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] _flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, kv_group_num, x, sm_scale, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) grid = (triton.next_power_of_2(bsz * q_len), num_heads) _flash_decoding_fwd_reduce_kernel[grid]( mid_output, mid_output_lse, output, kv_seq_len, q_len, bsz, mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), output.stride(0), head_dim, 1, BLOCK_KV=block_size, HEAD_DIM=head_dim, ) return output
@triton.jit def _flash_decoding_fwd_reduce_kernel( mid_o, # [batch_size, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size, head_num, kv_split_num] O, # [batch_size, num_heads, head_dim] or [batch_size, 1, num_heads, head_dim] kv_seq_len, q_len, batch_size, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_o_lset, stride_o_lseh, stride_o_lseb, stride_ot, stride_oh, stride_od, BLOCK_KV: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) # cur_token_off is used as a "mask" here for spec-dec during verification process cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off offsets_dmodel = tl.arange(0, HEAD_DIM) # NOTE currently the block size BLOCK_KV splitting kv is relatively small as we have # BLOCK_KV == BLOCK_SIZE for now. We might want to decrease the number of blocks of kv splitted. kv_split_num = (cur_kv_seq_len + BLOCK_KV - 1) // BLOCK_KV m_i = float("-inf") # max logic l_i = 0.0 # sum exp acc = tl.zeros([HEAD_DIM], dtype=tl.float32) offsets_mid_o = cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + offsets_dmodel offset_mid_lse = cur_token_idx * stride_o_lset + cur_head_idx * stride_o_lseh for block_i in range(0, kv_split_num, 1): mid_o_block = tl.load(mid_o + offsets_mid_o + block_i * stride_mid_ob) lse = tl.load(mid_o_lse + offset_mid_lse + block_i * stride_o_lseb) m_ij = tl.maximum(m_i, lse) scale = tl.exp(m_i - m_ij) acc = acc * scale lse -= m_ij exp_logic = tl.exp(lse) acc += exp_logic * mid_o_block l_i = scale * l_i + exp_logic m_i = m_ij acc = acc / l_i offsets_O = cur_token_idx * stride_ot + cur_head_idx * stride_oh + offsets_dmodel tl.store(O + offsets_O, acc.to(O.type.element_ty)) return # Decoding Stage # Used with blocked KV Cache (PagedAttention) def flash_decoding_attention( q: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_seq_len: torch.Tensor, block_tables: torch.Tensor, block_size: int, max_seq_len_in_batch: int = None, output: torch.Tensor = None, mid_output: torch.Tensor = None, mid_output_lse: torch.Tensor = None, alibi_slopes: torch.Tensor = None, sm_scale: int = None, kv_group_num: int = 1, q_len: int = 1, # NOTE alibi flash decoding does not support q_len > 1 at this moment. use_new_kcache_layout: bool = False, ): """ Flash decoding implemented with a blocked KV Cache (PagedAttention) during decoding stage. Args: q (torch.Tensor): [bsz * q_len, num_heads, head_dim] q_len > 1 only for verification process in speculative-decoding. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] kv_seq_len (torch.Tensor): [batch_size] records the (kv) sequence lengths incorporating past kv sequence lengths. block_tables (torch.Tensor): [batch_size, max_blocks_per_sequence] max_seq_len_in_batch (int): Maximum sequence length in the batch. output (torch.Tensor): [bsz, num_heads * head_dim] mid_output (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num, head_dim] Intermediate output tensor. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. mid_output_lse (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num] Log-sum-exp of intermediate output. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. alibi_slopes (torch.Tensor): [num_heads] alibi slopes used for alibi flash decoding. block_size (int): Size of each block in the blocked key/value cache. num_kv_group (int, optional): Number of key/value groups. Defaults to 1. q_length (int): Query length. Use for speculative decoding when `q_length` > 1 (i.e. the last n tokens). Defaults to 1. use_new_kcache_layout (bool): Whether to use the new kcache layout. Defaults to False. Returns: Output tensor with shape [bsz * q_len, num_heads * head_dim] """ q = q.squeeze() if q.dim() == 4 else q assert q.dim() == 3, f"Incompatible q dim: {q.dim()}" n_tokens, num_heads, head_dim = q.shape assert n_tokens % q_len == 0, "Invalid q_len" bsz = n_tokens // q_len assert head_dim in {32, 64, 128, 256} assert kv_seq_len.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" KV seq lengths bsz {kv_seq_len.size(0)}, Block tables bsz {block_tables.size(0)}, " f"batch size {bsz}" ) assert k_cache.size(-2) == v_cache.size(-2) == block_size, ( f"Got incompatible block size on kv caches:\n" f" assigned block_size {block_size}, k_cache block_size {k_cache.size(-2)}, " f"v_cache block_size {v_cache.size(-2)}" ) # NOTE BLOCK_KV could be considered as block splitting the sequence on k/v # For now, BLOCK_KV is supposed to be equivalent with the size of physical cache block (i.e.`block_size`) assert block_size in {16, 32, 64, 128} BLOCK_KV = block_size sm_scale = 1.0 / (head_dim**0.5) if sm_scale is None else sm_scale max_seq_len_in_batch = kv_seq_len.max().item() if max_seq_len_in_batch is None else max_seq_len_in_batch # For compatibility (TODO revise modeling in future) kv_max_split_num = (max_seq_len_in_batch + BLOCK_KV - 1) // BLOCK_KV if mid_output is None: mid_output = torch.empty( (bsz * q_len, num_heads, kv_max_split_num, head_dim), dtype=torch.float32, device=q.device ) if mid_output_lse is None: mid_output_lse = torch.empty((bsz * q_len, num_heads, kv_max_split_num), dtype=torch.float32, device=q.device) if output is None: # A hack to prevent `view` operation in modeling output = torch.empty((bsz * q_len, num_heads * head_dim), dtype=q.dtype, device=q.device) assert ( mid_output.size(2) == mid_output_lse.size(2) >= kv_max_split_num ), "Incompatible kv split number of intermediate output tensors" assert ( mid_output.size(0) == mid_output_lse.size(0) >= output.size(0) == n_tokens ), f"Incompatible first dimension of output tensors" # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = lambda META: ( triton.next_power_of_2(bsz * q_len), num_heads, triton.cdiv(triton.next_power_of_2(max_seq_len_in_batch), META["BLOCK_KV"]), ) if alibi_slopes is not None: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( not use_new_kcache_layout ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" _alibi_flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), sm_scale, KV_GROUPS=kv_group_num, BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) else: # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] _flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, kv_group_num, x, sm_scale, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) grid = (triton.next_power_of_2(bsz * q_len), num_heads) _flash_decoding_fwd_reduce_kernel[grid]( mid_output, mid_output_lse, output, kv_seq_len, q_len, bsz, mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), output.stride(0), head_dim, 1, BLOCK_KV=block_size, HEAD_DIM=head_dim, ) return output
FlagOpen/FlagGems
src/flag_gems/ops/ones.py
https://github.com/FlagOpen/FlagGems/blob/2437f4ffa2d644e38c26aacbf1249263a2016bb2/src/flag_gems/ops/ones.py
import logging import torch import triton import triton.language as tl from ..runtime import device, torch_device_fn from ..utils import libentry from ..utils import triton_lang_extension as tle from ..utils.shape_utils import volume device_ = device @libentry() @triton.jit def ones_kernel( output_ptr, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tle.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements tl.store(output_ptr + offsets, 1.0, mask=mask) def ones(size, *, dtype=None, layout=None, device=None, pin_memory=None): logging.debug("GEMS ONES") if dtype is None: dtype = torch.get_default_dtype() if device is None: device = torch.device(device_.name) out = torch.empty(size, device=device, dtype=dtype) N = volume(size) BLOCK_SIZE = 1024 grid = (triton.cdiv(N, BLOCK_SIZE),) with torch_device_fn.device(device): ones_kernel[grid](out, N, BLOCK_SIZE) return out
@triton.jit def ones_kernel( output_ptr, n_elements, BLOCK_SIZE: tl.constexpr, ): pid = tle.program_id(axis=0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements tl.store(output_ptr + offsets, 1.0, mask=mask) def ones(size, *, dtype=None, layout=None, device=None, pin_memory=None): logging.debug("GEMS ONES") if dtype is None: dtype = torch.get_default_dtype() if device is None: device = torch.device(device_.name) out = torch.empty(size, device=device, dtype=dtype) N = volume(size) BLOCK_SIZE = 1024 grid = (triton.cdiv(N, BLOCK_SIZE),) with torch_device_fn.device(device): ones_kernel[grid](out, N, BLOCK_SIZE) return out
zinccat/TritonTrace
original/level3/6_GoogleNetInceptionModule/triton_poi_fused_0.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/original/level3/6_GoogleNetInceptionModule/triton_poi_fused_0.py
# From: 6_GoogleNetInceptionModule import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties triton_helpers.set_driver_to_gpu() @triton_heuristics.pointwise( size_hints={'y': 8192, 'x': 65536}, tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {'in_ptr0': '*fp32', 'out_ptr0': '*fp32', 'ynumel': 'i32', 'xnumel': 'i32'}, 'device': DeviceProperties(type='cuda', index=0, multi_processor_count=82, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, warp_size=32), 'constants': {}, 'configs': [AttrsDescriptor.from_dict({'arg_properties': {'tt.divisibility': (0, 1, 2, 3), 'tt.equal_to': ()}, 'cls': 'AttrsDescriptor'})]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'optimize_mem': False, 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '5A06A9183D03767BDAB0FC92F89F8279B36CCC7C4B95A264F6D3CCE126D2D3A0', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4800 xnumel = 50176 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = (yindex % 480) y1 = yindex // 480 tmp0 = tl.load(in_ptr0 + (x2 + 50176*y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 480*x2 + 24084480*y1), tmp0, xmask & ymask)
@triton.jit def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 4800 xnumel = 50176 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = (yindex % 480) y1 = yindex // 480 tmp0 = tl.load(in_ptr0 + (x2 + 50176*y3), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + 480*x2 + 24084480*y1), tmp0, xmask & ymask)
thuml/learn_torch.compile
timm/swin_base_patch4_window7_224_training_cuda/__compiled_fn_3 kernel 59.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/swin_base_patch4_window7_224_training_cuda/__compiled_fn_3%20kernel%2059.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, persistent_reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @persistent_reduction( size_hints=[2048, 512], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(9, 10))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_native_layer_norm_native_layer_norm_backward_58', 'mutated_arg_names': ['in_out_ptr0']} ) @triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 1568 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r2 = rindex x3 = xindex x0 = xindex % 196 x1 = (xindex // 196) tmp0 = tl.load(in_ptr0 + (r2 + (512*x3)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + (512*((x0 % 14) % 7)) + (3584*((x0 // 14) % 7)) + (25088*((x0 % 14) // 7)) + (50176*(x0 // 98)) + (100352*x1)), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_out_ptr0 + (r2 + (512*x3)), rmask & xmask, other=0.0) tmp10 = tl.load(in_ptr4 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = 0.947826087474823 tmp6 = tmp4 / tmp5 tmp7 = tmp3 * tmp6 tmp8 = tmp0 + tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp12 / tmp5 tmp14 = tmp11 * tmp13 tmp15 = tmp8 + tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = tl.broadcast_to(tmp16, [RBLOCK]) tmp21 = tl.where(rmask & xmask, tmp19, 0) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp23 = tl.full([1], 512, tl.int32) tmp24 = tmp23.to(tl.float32) tmp25 = tmp22 / tmp24 tmp26 = tmp16 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tl.broadcast_to(tmp27, [RBLOCK]) tmp30 = tl.where(rmask & xmask, tmp28, 0) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp32 = tmp15 - tmp25 tmp33 = 512.0 tmp34 = tmp31 / tmp33 tmp35 = 1e-05 tmp36 = tmp34 + tmp35 tmp37 = tl.math.rsqrt(tmp36) tmp38 = tmp32 * tmp37 tmp39 = tmp37 / tmp33 tl.store(in_out_ptr0 + (r2 + (512*x3)), tmp15, rmask & xmask) tl.store(out_ptr2 + (r2 + (512*x3)), tmp38, rmask & xmask) tl.store(out_ptr3 + (x3), tmp39, xmask)
@triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr2, out_ptr3, xnumel, rnumel): xnumel = 1568 XBLOCK: tl.constexpr = 1 rnumel = 512 RBLOCK: tl.constexpr = 512 xoffset = tl.program_id(0) * XBLOCK xindex = tl.full([1], xoffset, tl.int32) xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[:] rmask = rindex < rnumel r2 = rindex x3 = xindex x0 = xindex % 196 x1 = (xindex // 196) tmp0 = tl.load(in_ptr0 + (r2 + (512*x3)), rmask & xmask, other=0.0) tmp1 = tl.load(in_ptr1 + (r2 + (512*((x0 % 14) % 7)) + (3584*((x0 // 14) % 7)) + (25088*((x0 % 14) // 7)) + (50176*(x0 // 98)) + (100352*x1)), rmask & xmask, other=0.0) tmp2 = tl.load(in_ptr2 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last') tmp9 = tl.load(in_out_ptr0 + (r2 + (512*x3)), rmask & xmask, other=0.0) tmp10 = tl.load(in_ptr4 + (r2), rmask, eviction_policy='evict_last', other=0.0) tmp12 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last') tmp3 = tmp1 + tmp2 tmp5 = 0.947826087474823 tmp6 = tmp4 / tmp5 tmp7 = tmp3 * tmp6 tmp8 = tmp0 + tmp7 tmp11 = tmp9 + tmp10 tmp13 = tmp12 / tmp5 tmp14 = tmp11 * tmp13 tmp15 = tmp8 + tmp14 tmp16 = tl.broadcast_to(tmp15, [RBLOCK]) tmp18 = tl.where(rmask & xmask, tmp16, 0) tmp19 = tl.broadcast_to(tmp16, [RBLOCK]) tmp21 = tl.where(rmask & xmask, tmp19, 0) tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0)) tmp23 = tl.full([1], 512, tl.int32) tmp24 = tmp23.to(tl.float32) tmp25 = tmp22 / tmp24 tmp26 = tmp16 - tmp25 tmp27 = tmp26 * tmp26 tmp28 = tl.broadcast_to(tmp27, [RBLOCK]) tmp30 = tl.where(rmask & xmask, tmp28, 0) tmp31 = triton_helpers.promote_to_tensor(tl.sum(tmp30, 0)) tmp32 = tmp15 - tmp25 tmp33 = 512.0 tmp34 = tmp31 / tmp33 tmp35 = 1e-05 tmp36 = tmp34 + tmp35 tmp37 = tl.math.rsqrt(tmp36) tmp38 = tmp32 * tmp37 tmp39 = tmp37 / tmp33 tl.store(in_out_ptr0 + (r2 + (512*x3)), tmp15, rmask & xmask) tl.store(out_ptr2 + (r2 + (512*x3)), tmp38, rmask & xmask) tl.store(out_ptr3 + (x3), tmp39, xmask)
thu-ml/Jetfire-INT8Training
Jetfire/Nonlinear/dropout_fwd.py
https://github.com/thu-ml/Jetfire-INT8Training/blob/55563553682643aef58fe2acbfacca64fd4c8d3c/Jetfire/Nonlinear/dropout_fwd.py
import torch # 4 block import triton import triton.language as tl CONST_BLOCK=32 # here is B for quant group size ? # The kernel with 1 load operation and 4 store operation def get_configs_io_block(): configs = [] for num_stages in [2, 3, 4, 5, 6]: block_m, block_n = 64, 64 num_warps = 4 if block_n <= 64 else 8 configs.append(triton.Config({'BLOCK_M': block_m, 'BLOCK_N': block_n}, num_stages=num_stages, num_warps=num_warps,)) return configs @triton.autotune( configs=[] + get_configs_io_block(), key=['M', 'N',], ) @triton.heuristics({ 'BLOCK_SM': lambda args: args["BLOCK_M"] // args["QB"], 'BLOCK_SN': lambda args: args["BLOCK_N"] // args["QB"], }) @triton.jit def int8_dropout_kernel_forward( output_ptr, output_scale_ptr, input_ptr, input_scale_ptr, mask_ptr, p_ptr, M, N, SM, SN, input_stride_b, input_stride_0, input_stride_1, s_input_stride_b, s_input_stride_0, s_input_stride_1, output_stride_b, output_stride_0, output_stride_1, s_output_stride_b, s_output_stride_0, s_output_stride_1, mask_stride_b, mask_stride_0, mask_stride_1, QB: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_SM: tl.constexpr, BLOCK_SN: tl.constexpr,): # Block PID pid_b = tl.program_id(0) pid = tl.program_id(1) NUM_BLOCK_M = tl.cdiv(M, BLOCK_M) NUM_BLOCK_N = tl.cdiv(N, BLOCK_N) pid_dim0 = pid // NUM_BLOCK_N pid_dim1 = pid % NUM_BLOCK_N # pointers input_block_ptr = tl.make_block_ptr( base=input_ptr + pid_b * input_stride_b, shape=(M, N), strides=(input_stride_0, input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) # input ptr scale_input_ptr = tl.make_block_ptr( base=input_scale_ptr + pid_b * s_input_stride_b, shape=(SM, SN), strides=(s_input_stride_0, s_input_stride_1), offsets=(pid_dim0 * BLOCK_SM, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_SM, BLOCK_SN), order=(1, 0), ) mask_block_ptr = tl.make_block_ptr( base=mask_ptr + pid_b * mask_stride_b, shape=(M, N), strides=(mask_stride_0, mask_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) input = tl.load(input_block_ptr) scale_input = tl.load(scale_input_ptr) mask = tl.load(mask_block_ptr) p = tl.load(p_ptr) # Dequantize and dropout calculation # scale_input = tl.reshape(scale_input, (BLOCK_SM, 1, BLOCK_SN, 1)) # input = tl.reshape(input, (BLOCK_SM, BLOCK_M//BLOCK_SM, BLOCK_SN, BLOCK_N//BLOCK_SN)) dropout_output = input * mask scale_output = scale_input / (1-p) # scale_output = tl.reshape(scale_output, (4, 1, 4, 1)) # scale_output = tl.reshape(scale_output, (BLOCK_SM, 1, BLOCK_SN, 1)) dropout_output = dropout_output.to(tl.int8) scale_output = scale_output.to(tl.float16) # debug # dropout_output = input # scale_output = scale_input # pointers output_block_ptr = tl.make_block_ptr( base=output_ptr + pid_b * output_stride_b, shape=(M, N), strides=(output_stride_0, output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) scale_output_ptr = tl.make_block_ptr( base=output_scale_ptr + pid_b * s_output_stride_b, shape=(SM, SN), strides=(s_output_stride_0, s_output_stride_1), offsets=(pid_dim0 * BLOCK_SM, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_SM, BLOCK_SN), order=(1, 0), ) tl.store(output_block_ptr, dropout_output) tl.store(scale_output_ptr, scale_output) def int8_dropout_forward(x, s_x, mask, p, QB): # defining the input and output tensor BS, M, N = x.shape _, SM, SN = s_x.shape y = torch.empty_like(x, dtype=torch.int8) s_y = torch.empty_like(s_x, dtype=torch.float16) grid = lambda META: ( BS, triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), ) int8_dropout_kernel_forward[grid]( y, s_y, x, s_x, mask, p, M, N, SM, SN, x.stride(0), x.stride(1), x.stride(2), s_x.stride(0), s_x.stride(1), s_x.stride(2), y.stride(0), y.stride(1), y.stride(2), s_y.stride(0), s_y.stride(1), s_y.stride(2), mask.stride(0), mask.stride(1), mask.stride(2), QB ) return y, s_y # I change the dtype of both the input tensor and the output tensor. I use torch.float32, torch.float16, and torch.int8 configs = [] for BS in [1, 2, 4, 8]: for SL in [2048, 4096, ]: configs.append( triton.testing.Benchmark( # test different matrix size influence x_names=['CDIM'], x_vals=[1024, 2048, 4096, 6144, 8192], line_arg='provider', line_vals=["triton", "torch"], line_names=['triton', 'torch'], styles=[('blue', '-'), ('green', '-')], ylabel='time-cost', plot_name=f'INT8dropout<BLSZ={CONST_BLOCK}><BS={BS}><SL={SL}>', args={'BS': BS, 'SL': SL, 'QB': CONST_BLOCK, 'dtype': torch.float16, 'mode': 'time-consuming'} ) ) @triton.testing.perf_report( configs ) def bench_load_store(BS, SL, CDIM, QB, provider, dtype, mode='forward'): # I only use triton as the provider, and mode when benchmarking # create data x = torch.randn(BS, SL, CDIM, dtype=dtype).cuda() sx = x.abs().max(dim=1)[0] / 127 bias = None # triton result _qx = x.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) sx = _qx.abs().amax(dim=(3, 4)) / 127 _qx = ((_qx / sx.unsqueeze(3).unsqueeze(4)).round()).to(torch.int8) qx = _qx.permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) p = torch.tensor([0.2], dtype=dtype, ).cuda() mask = torch.ones_like(x).bernoulli_(1 - p).to(torch.bool) quantiles = [0.5, 0.2, 0.8] # utility functions if provider == 'triton': def y_fwd(): int8_dropout_forward(qx, sx, mask, p, QB) if provider == 'torch': torch_dropout = torch.nn.Dropout() def y_fwd(): return torch_dropout(x) # forward pass if mode == 'time-consuming': convert_func = lambda ms: ms ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=100) # backward pass if mode == 'gbps': convert_func = lambda ms: 2 * x.numel() * x.element_size() / ms * 1e-6 ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=100) return convert_func(ms), convert_func(max_ms), convert_func(min_ms) def validity_check(BS=2, SL=64, CDIM=64, QB=CONST_BLOCK, dtype=torch.float16): # x = torch.randn(BS, SL, CDIM, dtype=dtype).cuda() x = torch.ones(BS, SL, CDIM, dtype=dtype, ).cuda() p = torch.tensor([0.2], dtype=dtype, ).cuda() mask = torch.ones_like(x).bernoulli_(1 - p).to(torch.bool) sx = x.abs().max(dim=1)[0] / 127 bias = None # triton result _qx = x.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) sx = _qx.abs().amax(dim=(3, 4)) / 127 _qx = ((_qx / sx.unsqueeze(3).unsqueeze(4)).round()).to(torch.int8) qx = _qx.permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) print(qx.shape, sx.shape) # exit() x_triton, s_triton = int8_dropout_forward(qx, sx, mask, p, QB) print(x_triton[0]) _x_triton = x_triton.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) s_triton = s_triton.unsqueeze(3).unsqueeze(4) output_triton = (_x_triton * s_triton).permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) # print(qx) # print(sx) # import IPython # IPython.embed() print(output_triton) if __name__ == "__main__": torch.manual_seed(0) torch.set_printoptions(precision=8, linewidth=1600, sci_mode=False, edgeitems=3) validity_check(BS=2, SL=128, CDIM=64, QB=CONST_BLOCK, dtype=torch.float16) bench_load_store.run(save_path=f'result/time/multi_quantize_block_dropout/BLSZ=64', print_data=True)
@triton.jit def int8_dropout_kernel_forward( output_ptr, output_scale_ptr, input_ptr, input_scale_ptr, mask_ptr, p_ptr, M, N, SM, SN, input_stride_b, input_stride_0, input_stride_1, s_input_stride_b, s_input_stride_0, s_input_stride_1, output_stride_b, output_stride_0, output_stride_1, s_output_stride_b, s_output_stride_0, s_output_stride_1, mask_stride_b, mask_stride_0, mask_stride_1, QB: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_SM: tl.constexpr, BLOCK_SN: tl.constexpr,): # Block PID pid_b = tl.program_id(0) pid = tl.program_id(1) NUM_BLOCK_M = tl.cdiv(M, BLOCK_M) NUM_BLOCK_N = tl.cdiv(N, BLOCK_N) pid_dim0 = pid // NUM_BLOCK_N pid_dim1 = pid % NUM_BLOCK_N # pointers input_block_ptr = tl.make_block_ptr( base=input_ptr + pid_b * input_stride_b, shape=(M, N), strides=(input_stride_0, input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) # input ptr scale_input_ptr = tl.make_block_ptr( base=input_scale_ptr + pid_b * s_input_stride_b, shape=(SM, SN), strides=(s_input_stride_0, s_input_stride_1), offsets=(pid_dim0 * BLOCK_SM, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_SM, BLOCK_SN), order=(1, 0), ) mask_block_ptr = tl.make_block_ptr( base=mask_ptr + pid_b * mask_stride_b, shape=(M, N), strides=(mask_stride_0, mask_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) input = tl.load(input_block_ptr) scale_input = tl.load(scale_input_ptr) mask = tl.load(mask_block_ptr) p = tl.load(p_ptr) # Dequantize and dropout calculation # scale_input = tl.reshape(scale_input, (BLOCK_SM, 1, BLOCK_SN, 1)) # input = tl.reshape(input, (BLOCK_SM, BLOCK_M//BLOCK_SM, BLOCK_SN, BLOCK_N//BLOCK_SN)) dropout_output = input * mask scale_output = scale_input / (1-p) # scale_output = tl.reshape(scale_output, (4, 1, 4, 1)) # scale_output = tl.reshape(scale_output, (BLOCK_SM, 1, BLOCK_SN, 1)) dropout_output = dropout_output.to(tl.int8) scale_output = scale_output.to(tl.float16) # debug # dropout_output = input # scale_output = scale_input # pointers output_block_ptr = tl.make_block_ptr( base=output_ptr + pid_b * output_stride_b, shape=(M, N), strides=(output_stride_0, output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0) ) scale_output_ptr = tl.make_block_ptr( base=output_scale_ptr + pid_b * s_output_stride_b, shape=(SM, SN), strides=(s_output_stride_0, s_output_stride_1), offsets=(pid_dim0 * BLOCK_SM, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_SM, BLOCK_SN), order=(1, 0), ) tl.store(output_block_ptr, dropout_output) tl.store(scale_output_ptr, scale_output) def int8_dropout_forward(x, s_x, mask, p, QB): # defining the input and output tensor BS, M, N = x.shape _, SM, SN = s_x.shape y = torch.empty_like(x, dtype=torch.int8) s_y = torch.empty_like(s_x, dtype=torch.float16) grid = lambda META: ( BS, triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]), ) int8_dropout_kernel_forward[grid]( y, s_y, x, s_x, mask, p, M, N, SM, SN, x.stride(0), x.stride(1), x.stride(2), s_x.stride(0), s_x.stride(1), s_x.stride(2), y.stride(0), y.stride(1), y.stride(2), s_y.stride(0), s_y.stride(1), s_y.stride(2), mask.stride(0), mask.stride(1), mask.stride(2), QB ) return y, s_y # I change the dtype of both the input tensor and the output tensor. I use torch.float32, torch.float16, and torch.int8 configs = [] for BS in [1, 2, 4, 8]: for SL in [2048, 4096, ]: configs.append( triton.testing.Benchmark( # test different matrix size influence x_names=['CDIM'], x_vals=[1024, 2048, 4096, 6144, 8192], line_arg='provider', line_vals=["triton", "torch"], line_names=['triton', 'torch'], styles=[('blue', '-'), ('green', '-')], ylabel='time-cost', plot_name=f'INT8dropout<BLSZ={CONST_BLOCK}><BS={BS}><SL={SL}>', args={'BS': BS, 'SL': SL, 'QB': CONST_BLOCK, 'dtype': torch.float16, 'mode': 'time-consuming'} ) ) @triton.testing.perf_report( configs ) def bench_load_store(BS, SL, CDIM, QB, provider, dtype, mode='forward'): # I only use triton as the provider, and mode when benchmarking # create data x = torch.randn(BS, SL, CDIM, dtype=dtype).cuda() sx = x.abs().max(dim=1)[0] / 127 bias = None # triton result _qx = x.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) sx = _qx.abs().amax(dim=(3, 4)) / 127 _qx = ((_qx / sx.unsqueeze(3).unsqueeze(4)).round()).to(torch.int8) qx = _qx.permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) p = torch.tensor([0.2], dtype=dtype, ).cuda() mask = torch.ones_like(x).bernoulli_(1 - p).to(torch.bool) quantiles = [0.5, 0.2, 0.8] # utility functions if provider == 'triton': def y_fwd(): int8_dropout_forward(qx, sx, mask, p, QB) if provider == 'torch': torch_dropout = torch.nn.Dropout() def y_fwd(): return torch_dropout(x) # forward pass if mode == 'time-consuming': convert_func = lambda ms: ms ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=100) # backward pass if mode == 'gbps': convert_func = lambda ms: 2 * x.numel() * x.element_size() / ms * 1e-6 ms, min_ms, max_ms = triton.testing.do_bench(y_fwd, quantiles=quantiles, rep=100) return convert_func(ms), convert_func(max_ms), convert_func(min_ms) def validity_check(BS=2, SL=64, CDIM=64, QB=CONST_BLOCK, dtype=torch.float16): # x = torch.randn(BS, SL, CDIM, dtype=dtype).cuda() x = torch.ones(BS, SL, CDIM, dtype=dtype, ).cuda() p = torch.tensor([0.2], dtype=dtype, ).cuda() mask = torch.ones_like(x).bernoulli_(1 - p).to(torch.bool) sx = x.abs().max(dim=1)[0] / 127 bias = None # triton result _qx = x.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) sx = _qx.abs().amax(dim=(3, 4)) / 127 _qx = ((_qx / sx.unsqueeze(3).unsqueeze(4)).round()).to(torch.int8) qx = _qx.permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) print(qx.shape, sx.shape) # exit() x_triton, s_triton = int8_dropout_forward(qx, sx, mask, p, QB) print(x_triton[0]) _x_triton = x_triton.reshape(BS, SL // QB, QB, CDIM // QB, QB).permute(0, 1, 3, 2, 4) s_triton = s_triton.unsqueeze(3).unsqueeze(4) output_triton = (_x_triton * s_triton).permute(0, 1, 3, 2, 4).reshape(BS, SL, CDIM) # print(qx) # print(sx) # import IPython # IPython.embed() print(output_triton) if __name__ == "__main__": torch.manual_seed(0) torch.set_printoptions(precision=8, linewidth=1600, sci_mode=False, edgeitems=3) validity_check(BS=2, SL=128, CDIM=64, QB=CONST_BLOCK, dtype=torch.float16) bench_load_store.run(save_path=f'result/time/multi_quantize_block_dropout/BLSZ=64', print_data=True)
mdy666/mdy_triton
others/lighting_attention/lighting_attention.py
https://github.com/mdy666/mdy_triton/blob/fb82dd5ee036edfcde1f15623adcb728155bc03e/others/lighting_attention/lighting_attention.py
import torch import triton import triton.language as tl import os from copy import deepcopy import math @triton.jit def _lighting_attention_encode_kernel(QKV, KV, Y, SLOPE_RATE, NUM_PADDDINGS, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, BLOCK_SIZE_N: tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h NUM_PADDDINGS += off_b dd = tl.arange(0, D) nn = tl.arange(0, BLOCK_SIZE_N) num_paddings = tl.load(NUM_PADDDINGS) q_ptrs = QKV + (nn[:, None] + num_paddings) * qkv_sn + dd[None, :] k_ptrs = QKV + (nn[None, :] + num_paddings) * qkv_sn + dd[:, None] + D kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_pts = Y + (nn[:, None] + num_paddings) * y_sn + dd[None, :] slope_rate = tl.load(SLOPE_RATE).to(tl.float32) array = (nn + 1) q_decay = tl.exp(-1. * slope_rate * array)[:, None] k_decay = tl.exp(-1. * slope_rate * (BLOCK_SIZE_N - array))[None, :] index = array[:, None] - array[None, :] s_index = slope_rate * index s_index = tl.where(index >= 0, -s_index, float('-inf')) diag_decay = tl.exp(s_index) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.zeros((D, D), dtype=tl.float32) for start_n in tl.range(num_paddings, N, BLOCK_SIZE_N): mask_nn = (nn + start_n) < N m = tl.minimum(N-start_n, BLOCK_SIZE_N) if m < BLOCK_SIZE_N: k_decay = tl.exp(-1. * slope_rate * (m - array))[None, :] q = tl.load(q_ptrs, mask=mask_nn[:, None], other=0.).to(dtype) k = tl.load(k_ptrs, mask=mask_nn[None, :], other=0.).to(dtype) v = tl.load(q_ptrs + 2*D, mask=mask_nn[:, None], other=0.).to(dtype) qkv_none_diag = tl.dot((q * q_decay).to(dtype), kv.to(dtype)) qk = tl.dot(q, k) * diag_decay qkv_diag = tl.dot(qk.to(dtype), v) y = qkv_diag + qkv_none_diag block_decay = tl.exp(-1. * slope_rate * m) kv = kv * block_decay + tl.dot((k * k_decay).to(v.dtype), v) # kv = tl.dot(tl.permute(k, (1,0)), k) tl.store(y_pts, y, mask=mask_nn[:, None]) q_ptrs += BLOCK_SIZE_N * qkv_sn y_pts += BLOCK_SIZE_N * y_sn k_ptrs += BLOCK_SIZE_N * qkv_sn tl.store(kv_ptrs, kv) @triton.jit def _lighting_attention_decode_kernel(QKV, KV, Y, SLOPE_RATE, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h dd = tl.arange(0, D) q_ptrs = QKV + dd kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_ptrs = Y + dd slope_rate = tl.load(SLOPE_RATE).to(tl.float32) ratio = tl.exp(-1. * slope_rate) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.load(kv_ptrs).to(dtype) q = tl.load(q_ptrs).to(dtype) k = tl.load(q_ptrs+D) .to(dtype) v = tl.load(q_ptrs + 2*D).to(dtype) kv = ratio * kv + k[:, None] * v[None, :] y = tl.sum(q[:, None] * kv, axis=0) tl.store(kv_ptrs, kv) tl.store(y_ptrs, y) def lighting_attention_encode(qkv, slope_rate, attention_mask=None, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape d = d3 // 3 assert math.log2(d).is_integer(), 'd must be power of 2' slope_rate = slope_rate.squeeze() kv = torch.empty(b, h, d, d).to(torch.float32).to(qkv.device) y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) if attention_mask is not None: assert attention_mask[-1, :].min().values != 0, 'please use left_padding' num_paddings = n - attention_mask.sum(-1) else: num_paddings = torch.full((b,), 0, device=qkv.device, dtype=torch.int32) grids = (b, h) _lighting_attention_encode_kernel[grids](qkv, kv, y, slope_rate, num_paddings, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, BLOCK_SIZE_N=32, num_warps=8, num_stages=4 if fp32 else 1, ) return y, kv def lighting_attention_decode(qkv, slope_rate, kv, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape assert n == 1, 'decoing phase need n=1' d = d3 // 3 slope_rate = slope_rate.squeeze() y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) grids = (b, h) _lighting_attention_decode_kernel[grids](qkv, kv, y, slope_rate, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, num_warps=8, num_stages=1, ) return y, kv def triton_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None, fp32=False): if past_key_value is None: y, kv = lighting_attention_encode(qkv, slope_rate, attention_mask, fp32) else: y, kv = lighting_attention_decode(qkv, slope_rate, past_key_value, fp32) return y, kv BLOCK = 256 def torch_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None): n = qkv.size(1) q, k, v = torch.split(qkv, [qkv.size(-1)//3] * 3, dim=3) # [b, h, l, d] q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if past_key_value is None: offset = q.shape[-2] else: offset = 1 # for align with metaseq ratio = torch.exp(-slope_rate) # only use for the first time if past_key_value is None: slope_rate = slope_rate.to(torch.float32) if attention_mask is not None: v = v.masked_fill((1 - attention_mask).unsqueeze(1).unsqueeze(-1).to(torch.bool), 0) # print(v[0, 0, :32]) NUM_BLOCK = (n + BLOCK - 1) // BLOCK b, h, n, d = q.shape e = v.shape[-1] # other array = torch.arange(BLOCK).to(q) + 1 q_decay = torch.exp(-slope_rate * array.reshape(-1, 1)) # h, bn, 1 k_decay = torch.exp(-slope_rate * (BLOCK - array.reshape(-1, 1))) index = array[:, None] - array[None, :] s_index = slope_rate * index[ None, None, ] s_index = torch.where(index >= 0, -s_index, float("-inf")) diag_decay = torch.exp(s_index) kv = torch.zeros(b, h, d, e).to(torch.float32).to(q.device) output = torch.empty((b, h, n, e), dtype=q.dtype, device=q.device) for i in range(NUM_BLOCK): si = i * BLOCK ei = min(si + BLOCK, n) m = ei - si qi = q[:, :, si:ei].contiguous() ki = k[:, :, si:ei].contiguous() vi = v[:, :, si:ei].contiguous() qkv_none_diag = torch.matmul(qi * q_decay[:, :m], kv).to(torch.float32) # diag qk = torch.matmul(qi, ki.transpose(-1, -2)).to(torch.float32) * diag_decay[:, :, :m, :m] qkv_diag = torch.matmul(qk, vi.to(torch.float32)) block_decay = torch.exp(-slope_rate * m) output[:, :, si:ei] = qkv_none_diag + qkv_diag kv = block_decay * kv + torch.matmul((ki * k_decay[:, -m:]).transpose(-1, -2).to(vi.dtype), vi) else: kv = past_key_value output = [] for i in range(n): kv = ratio * kv + torch.einsum( "... n d, ... n e -> ... d e", k[:, :, i:i + 1], v[:, :, i:i + 1], ) qkv = torch.einsum("... n e, ... e d -> ... n d", q[:, :, i:i + 1], kv.to(q.dtype)) output.append(qkv) output = torch.concat(output, dim=-2) return output, kv
@triton.jit def _lighting_attention_encode_kernel(QKV, KV, Y, SLOPE_RATE, NUM_PADDDINGS, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, BLOCK_SIZE_N: tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h NUM_PADDDINGS += off_b dd = tl.arange(0, D) nn = tl.arange(0, BLOCK_SIZE_N) num_paddings = tl.load(NUM_PADDDINGS) q_ptrs = QKV + (nn[:, None] + num_paddings) * qkv_sn + dd[None, :] k_ptrs = QKV + (nn[None, :] + num_paddings) * qkv_sn + dd[:, None] + D kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_pts = Y + (nn[:, None] + num_paddings) * y_sn + dd[None, :] slope_rate = tl.load(SLOPE_RATE).to(tl.float32) array = (nn + 1) q_decay = tl.exp(-1. * slope_rate * array)[:, None] k_decay = tl.exp(-1. * slope_rate * (BLOCK_SIZE_N - array))[None, :] index = array[:, None] - array[None, :] s_index = slope_rate * index s_index = tl.where(index >= 0, -s_index, float('-inf')) diag_decay = tl.exp(s_index) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.zeros((D, D), dtype=tl.float32) for start_n in tl.range(num_paddings, N, BLOCK_SIZE_N): mask_nn = (nn + start_n) < N m = tl.minimum(N-start_n, BLOCK_SIZE_N) if m < BLOCK_SIZE_N: k_decay = tl.exp(-1. * slope_rate * (m - array))[None, :] q = tl.load(q_ptrs, mask=mask_nn[:, None], other=0.).to(dtype) k = tl.load(k_ptrs, mask=mask_nn[None, :], other=0.).to(dtype) v = tl.load(q_ptrs + 2*D, mask=mask_nn[:, None], other=0.).to(dtype) qkv_none_diag = tl.dot((q * q_decay).to(dtype), kv.to(dtype)) qk = tl.dot(q, k) * diag_decay qkv_diag = tl.dot(qk.to(dtype), v) y = qkv_diag + qkv_none_diag block_decay = tl.exp(-1. * slope_rate * m) kv = kv * block_decay + tl.dot((k * k_decay).to(v.dtype), v) # kv = tl.dot(tl.permute(k, (1,0)), k) tl.store(y_pts, y, mask=mask_nn[:, None]) q_ptrs += BLOCK_SIZE_N * qkv_sn y_pts += BLOCK_SIZE_N * y_sn k_ptrs += BLOCK_SIZE_N * qkv_sn tl.store(kv_ptrs, kv)
mdy666/mdy_triton
others/lighting_attention/lighting_attention.py
https://github.com/mdy666/mdy_triton/blob/fb82dd5ee036edfcde1f15623adcb728155bc03e/others/lighting_attention/lighting_attention.py
import torch import triton import triton.language as tl import os from copy import deepcopy import math @triton.jit def _lighting_attention_encode_kernel(QKV, KV, Y, SLOPE_RATE, NUM_PADDDINGS, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, BLOCK_SIZE_N: tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h NUM_PADDDINGS += off_b dd = tl.arange(0, D) nn = tl.arange(0, BLOCK_SIZE_N) num_paddings = tl.load(NUM_PADDDINGS) q_ptrs = QKV + (nn[:, None] + num_paddings) * qkv_sn + dd[None, :] k_ptrs = QKV + (nn[None, :] + num_paddings) * qkv_sn + dd[:, None] + D kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_pts = Y + (nn[:, None] + num_paddings) * y_sn + dd[None, :] slope_rate = tl.load(SLOPE_RATE).to(tl.float32) array = (nn + 1) q_decay = tl.exp(-1. * slope_rate * array)[:, None] k_decay = tl.exp(-1. * slope_rate * (BLOCK_SIZE_N - array))[None, :] index = array[:, None] - array[None, :] s_index = slope_rate * index s_index = tl.where(index >= 0, -s_index, float('-inf')) diag_decay = tl.exp(s_index) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.zeros((D, D), dtype=tl.float32) for start_n in tl.range(num_paddings, N, BLOCK_SIZE_N): mask_nn = (nn + start_n) < N m = tl.minimum(N-start_n, BLOCK_SIZE_N) if m < BLOCK_SIZE_N: k_decay = tl.exp(-1. * slope_rate * (m - array))[None, :] q = tl.load(q_ptrs, mask=mask_nn[:, None], other=0.).to(dtype) k = tl.load(k_ptrs, mask=mask_nn[None, :], other=0.).to(dtype) v = tl.load(q_ptrs + 2*D, mask=mask_nn[:, None], other=0.).to(dtype) qkv_none_diag = tl.dot((q * q_decay).to(dtype), kv.to(dtype)) qk = tl.dot(q, k) * diag_decay qkv_diag = tl.dot(qk.to(dtype), v) y = qkv_diag + qkv_none_diag block_decay = tl.exp(-1. * slope_rate * m) kv = kv * block_decay + tl.dot((k * k_decay).to(v.dtype), v) # kv = tl.dot(tl.permute(k, (1,0)), k) tl.store(y_pts, y, mask=mask_nn[:, None]) q_ptrs += BLOCK_SIZE_N * qkv_sn y_pts += BLOCK_SIZE_N * y_sn k_ptrs += BLOCK_SIZE_N * qkv_sn tl.store(kv_ptrs, kv) @triton.jit def _lighting_attention_decode_kernel(QKV, KV, Y, SLOPE_RATE, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h dd = tl.arange(0, D) q_ptrs = QKV + dd kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_ptrs = Y + dd slope_rate = tl.load(SLOPE_RATE).to(tl.float32) ratio = tl.exp(-1. * slope_rate) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.load(kv_ptrs).to(dtype) q = tl.load(q_ptrs).to(dtype) k = tl.load(q_ptrs+D) .to(dtype) v = tl.load(q_ptrs + 2*D).to(dtype) kv = ratio * kv + k[:, None] * v[None, :] y = tl.sum(q[:, None] * kv, axis=0) tl.store(kv_ptrs, kv) tl.store(y_ptrs, y) def lighting_attention_encode(qkv, slope_rate, attention_mask=None, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape d = d3 // 3 assert math.log2(d).is_integer(), 'd must be power of 2' slope_rate = slope_rate.squeeze() kv = torch.empty(b, h, d, d).to(torch.float32).to(qkv.device) y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) if attention_mask is not None: assert attention_mask[-1, :].min().values != 0, 'please use left_padding' num_paddings = n - attention_mask.sum(-1) else: num_paddings = torch.full((b,), 0, device=qkv.device, dtype=torch.int32) grids = (b, h) _lighting_attention_encode_kernel[grids](qkv, kv, y, slope_rate, num_paddings, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, BLOCK_SIZE_N=32, num_warps=8, num_stages=4 if fp32 else 1, ) return y, kv def lighting_attention_decode(qkv, slope_rate, kv, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape assert n == 1, 'decoing phase need n=1' d = d3 // 3 slope_rate = slope_rate.squeeze() y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) grids = (b, h) _lighting_attention_decode_kernel[grids](qkv, kv, y, slope_rate, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, num_warps=8, num_stages=1, ) return y, kv def triton_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None, fp32=False): if past_key_value is None: y, kv = lighting_attention_encode(qkv, slope_rate, attention_mask, fp32) else: y, kv = lighting_attention_decode(qkv, slope_rate, past_key_value, fp32) return y, kv BLOCK = 256 def torch_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None): n = qkv.size(1) q, k, v = torch.split(qkv, [qkv.size(-1)//3] * 3, dim=3) # [b, h, l, d] q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if past_key_value is None: offset = q.shape[-2] else: offset = 1 # for align with metaseq ratio = torch.exp(-slope_rate) # only use for the first time if past_key_value is None: slope_rate = slope_rate.to(torch.float32) if attention_mask is not None: v = v.masked_fill((1 - attention_mask).unsqueeze(1).unsqueeze(-1).to(torch.bool), 0) # print(v[0, 0, :32]) NUM_BLOCK = (n + BLOCK - 1) // BLOCK b, h, n, d = q.shape e = v.shape[-1] # other array = torch.arange(BLOCK).to(q) + 1 q_decay = torch.exp(-slope_rate * array.reshape(-1, 1)) # h, bn, 1 k_decay = torch.exp(-slope_rate * (BLOCK - array.reshape(-1, 1))) index = array[:, None] - array[None, :] s_index = slope_rate * index[ None, None, ] s_index = torch.where(index >= 0, -s_index, float("-inf")) diag_decay = torch.exp(s_index) kv = torch.zeros(b, h, d, e).to(torch.float32).to(q.device) output = torch.empty((b, h, n, e), dtype=q.dtype, device=q.device) for i in range(NUM_BLOCK): si = i * BLOCK ei = min(si + BLOCK, n) m = ei - si qi = q[:, :, si:ei].contiguous() ki = k[:, :, si:ei].contiguous() vi = v[:, :, si:ei].contiguous() qkv_none_diag = torch.matmul(qi * q_decay[:, :m], kv).to(torch.float32) # diag qk = torch.matmul(qi, ki.transpose(-1, -2)).to(torch.float32) * diag_decay[:, :, :m, :m] qkv_diag = torch.matmul(qk, vi.to(torch.float32)) block_decay = torch.exp(-slope_rate * m) output[:, :, si:ei] = qkv_none_diag + qkv_diag kv = block_decay * kv + torch.matmul((ki * k_decay[:, -m:]).transpose(-1, -2).to(vi.dtype), vi) else: kv = past_key_value output = [] for i in range(n): kv = ratio * kv + torch.einsum( "... n d, ... n e -> ... d e", k[:, :, i:i + 1], v[:, :, i:i + 1], ) qkv = torch.einsum("... n e, ... e d -> ... n d", q[:, :, i:i + 1], kv.to(q.dtype)) output.append(qkv) output = torch.concat(output, dim=-2) return output, kv
@triton.jit def _lighting_attention_decode_kernel(QKV, KV, Y, SLOPE_RATE, qkv_sb, qkv_sn, qkv_sh, qkv_sd, kv_sb, kv_sh, kv_sd, kv_se, y_sb, y_sh, y_sn, y_sd, B, N, H, D:tl.constexpr, FP32:tl.constexpr, ): off_b = tl.program_id(0) off_h = tl.program_id(1) QKV += off_b * qkv_sb + off_h * qkv_sh KV += off_b * kv_sb + off_h * kv_sh Y += off_b * y_sb + off_h * y_sh SLOPE_RATE += off_h dd = tl.arange(0, D) q_ptrs = QKV + dd kv_ptrs = KV + dd[:, None] * kv_sd + dd[None, :] y_ptrs = Y + dd slope_rate = tl.load(SLOPE_RATE).to(tl.float32) ratio = tl.exp(-1. * slope_rate) if FP32: dtype = tl.float32 else: dtype = tl.bfloat16 kv = tl.load(kv_ptrs).to(dtype) q = tl.load(q_ptrs).to(dtype) k = tl.load(q_ptrs+D) .to(dtype) v = tl.load(q_ptrs + 2*D).to(dtype) kv = ratio * kv + k[:, None] * v[None, :] y = tl.sum(q[:, None] * kv, axis=0) tl.store(kv_ptrs, kv) tl.store(y_ptrs, y) def lighting_attention_encode(qkv, slope_rate, attention_mask=None, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape d = d3 // 3 assert math.log2(d).is_integer(), 'd must be power of 2' slope_rate = slope_rate.squeeze() kv = torch.empty(b, h, d, d).to(torch.float32).to(qkv.device) y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) if attention_mask is not None: assert attention_mask[-1, :].min().values != 0, 'please use left_padding' num_paddings = n - attention_mask.sum(-1) else: num_paddings = torch.full((b,), 0, device=qkv.device, dtype=torch.int32) grids = (b, h) _lighting_attention_encode_kernel[grids](qkv, kv, y, slope_rate, num_paddings, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, BLOCK_SIZE_N=32, num_warps=8, num_stages=4 if fp32 else 1, ) return y, kv def lighting_attention_decode(qkv, slope_rate, kv, fp32=False): # b, n, h, d b, n, h, d3 = qkv.shape assert n == 1, 'decoing phase need n=1' d = d3 // 3 slope_rate = slope_rate.squeeze() y = torch.empty(b, h, n, d, device=qkv.device, dtype=qkv.dtype) grids = (b, h) _lighting_attention_decode_kernel[grids](qkv, kv, y, slope_rate, *qkv.stride(), *kv.stride(), *y.stride(), b, n, h, d, fp32, num_warps=8, num_stages=1, ) return y, kv def triton_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None, fp32=False): if past_key_value is None: y, kv = lighting_attention_encode(qkv, slope_rate, attention_mask, fp32) else: y, kv = lighting_attention_decode(qkv, slope_rate, past_key_value, fp32) return y, kv BLOCK = 256 def torch_lighting_attention(qkv, slope_rate, past_key_value=None, attention_mask=None): n = qkv.size(1) q, k, v = torch.split(qkv, [qkv.size(-1)//3] * 3, dim=3) # [b, h, l, d] q = q.transpose(1, 2) k = k.transpose(1, 2) v = v.transpose(1, 2) if past_key_value is None: offset = q.shape[-2] else: offset = 1 # for align with metaseq ratio = torch.exp(-slope_rate) # only use for the first time if past_key_value is None: slope_rate = slope_rate.to(torch.float32) if attention_mask is not None: v = v.masked_fill((1 - attention_mask).unsqueeze(1).unsqueeze(-1).to(torch.bool), 0) # print(v[0, 0, :32]) NUM_BLOCK = (n + BLOCK - 1) // BLOCK b, h, n, d = q.shape e = v.shape[-1] # other array = torch.arange(BLOCK).to(q) + 1 q_decay = torch.exp(-slope_rate * array.reshape(-1, 1)) # h, bn, 1 k_decay = torch.exp(-slope_rate * (BLOCK - array.reshape(-1, 1))) index = array[:, None] - array[None, :] s_index = slope_rate * index[ None, None, ] s_index = torch.where(index >= 0, -s_index, float("-inf")) diag_decay = torch.exp(s_index) kv = torch.zeros(b, h, d, e).to(torch.float32).to(q.device) output = torch.empty((b, h, n, e), dtype=q.dtype, device=q.device) for i in range(NUM_BLOCK): si = i * BLOCK ei = min(si + BLOCK, n) m = ei - si qi = q[:, :, si:ei].contiguous() ki = k[:, :, si:ei].contiguous() vi = v[:, :, si:ei].contiguous() qkv_none_diag = torch.matmul(qi * q_decay[:, :m], kv).to(torch.float32) # diag qk = torch.matmul(qi, ki.transpose(-1, -2)).to(torch.float32) * diag_decay[:, :, :m, :m] qkv_diag = torch.matmul(qk, vi.to(torch.float32)) block_decay = torch.exp(-slope_rate * m) output[:, :, si:ei] = qkv_none_diag + qkv_diag kv = block_decay * kv + torch.matmul((ki * k_decay[:, -m:]).transpose(-1, -2).to(vi.dtype), vi) else: kv = past_key_value output = [] for i in range(n): kv = ratio * kv + torch.einsum( "... n d, ... n e -> ... d e", k[:, :, i:i + 1], v[:, :, i:i + 1], ) qkv = torch.einsum("... n e, ... e d -> ... n d", q[:, :, i:i + 1], kv.to(q.dtype)) output.append(qkv) output = torch.concat(output, dim=-2) return output, kv
alexanderb14/pytorch
torch/_inductor/codegen/triton.py
https://github.com/alexanderb14/pytorch/blob/8da4224042665686de22f8e351a0b42bfa42cab8/torch/_inductor/codegen/triton.py
# mypy: allow-untyped-defs from __future__ import annotations import collections import contextlib import dataclasses import functools import itertools import logging import os import re import textwrap from functools import lru_cache from typing import ( Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union, ) import sympy from sympy.printing.precedence import PRECEDENCE import torch import torch._logging from torch._dynamo.utils import identity, preserve_rng_state from torch._prims_common import is_integer_dtype from torch.utils._ordered_set import OrderedSet from torch.utils._sympy.functions import CeilDiv, FloorDiv, ModularIndexing from torch.utils._triton import has_triton_package from ...utils._sympy.symbol import free_symbol_is_type, prefix_str, symbol_is_type, SymT from ...utils._sympy.value_ranges import ValueRanges from .. import config, ir, metrics from ..codecache import code_hash, get_path, PyCodeCache from ..runtime.benchmarking import benchmarker from ..runtime.hints import ( AutotuneHint, DeviceProperties, TRITON_MAX_BLOCK, TRITON_MAX_RSPLIT, ) from ..runtime.runtime_utils import get_max_y_grid, next_power_of_2 from ..runtime.triton_heuristics import ( cooperative_reduction_grid, grid as default_grid_fn, ) from ..scheduler import BaseSchedulerNode, FusedSchedulerNode, Scheduler, SchedulerNode from ..utils import ( DelayReplaceLine, get_bounds_index_expr, get_fused_kernel_name, get_kernel_metadata, is_welford_reduction, Placeholder, sympy_subs, upcast_compute_type, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code from .block_analysis import BlockPatternMatcher from .common import ( BackendFeature, CSE, CSEVariable, DeferredLine, IndentedBuffer, OpOverrides, PythonPrinter, SizeArg, TensorArg, WorkspaceArg, WorkspaceZeroMode, ) from .simd import ( constant_repr, IterationRanges, IterationRangesEntry, IterationRangesRoot, pexpr, prefix_is_reduction, SIMDKernel, SIMDScheduling, ) from .triton_utils import ( config_of, should_unwrap_unspec_arg, signature_of, signature_to_meta, ) if TYPE_CHECKING: from ..ir import IRNode log = logging.getLogger(__name__) perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") @lru_cache(None) def gen_attr_descriptor_import(): """ import AttrsDescriptor if the triton version is new enough to have this class defined. """ if not has_triton_package(): return "" import triton.compiler.compiler # Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler # When support for the legacy AttrsDescriptor is removed then this import path should be changed. if hasattr(triton.compiler.compiler, "AttrsDescriptor"): return "from triton.compiler.compiler import AttrsDescriptor" else: return "" @lru_cache(None) def gen_common_triton_imports(): imports = IndentedBuffer() imports.splice( """ import triton import triton.language as tl """ ) if attr_desc := gen_attr_descriptor_import(): imports.writeline(attr_desc) imports.splice( """ from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties """ ) return imports.getvalue() class TritonSymbols: """ Stores sympy.Symbol instances and constants associated with triton codegen. """ block_offsets = { symt: sympy.Symbol(f"{prefix_str[symt]}offset", integer=True, nonnegative=True) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } block_sizes = { symt: sympy.Symbol( f"{prefix_str[symt].upper()}BLOCK", integer=True, positive=True ) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } @classmethod def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_sizes[tree.symt] @classmethod def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_offsets[tree.symt] @dataclasses.dataclass class IndexingOptions: index_str: str mask_vars: OrderedSet[str] mask_str: str expand_str: Optional[str] _has_rindex: bool index: sympy.Expr def has_mask(self): return bool(self.mask_vars) def has_indirect(self): return free_symbol_is_type(self.index, SymT.TMP) def has_rindex(self): return self._has_rindex def has_tmpmask(self): return "tmp" in self.mask_str def has_rmask(self): return "rmask" in self.mask_str @dataclasses.dataclass class BlockPtrOptions: params: BlockParameters constant_offset: sympy.Expr order: List[int] mask_vars: OrderedSet[str] broadcast_shape: Sequence[sympy.Expr] broadcasting_dims: List[bool] final_shape: Sequence[sympy.Expr] _boundary_check: Optional[List[int]] = None @property def shape(self) -> List[sympy.Expr]: return self.params.shape @property def block_shape(self) -> List[sympy.Expr]: return self.params.block_shape @property def strides(self) -> List[sympy.Expr]: return self.params.strides @property def offsets(self) -> List[sympy.Expr]: return self.params.offsets def codegen_broadcast_and_reshape( self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool, ) -> str: """ Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer. """ # Reshape to add singletons. pre_broadcast_shape = [ sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip( self.broadcast_shape, self.broadcasting_dims ) ] value = triton_reshape(value, initial_shape, pre_broadcast_shape) # Broadcast singletons. # For loads, we can often implicitly broadcast singleton dimensions. # We need an explicit broadcast for stores, or if the final reshape does more # than add singletons. sizevars = V.graph.sizevars require_broadcast = any(self.broadcasting_dims) and ( len(pre_broadcast_shape) != len(final_shape) or any( not ( sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) ) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape) ) ) if not allow_implicit or require_broadcast: value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})" # Reshape to the final shape. value = triton_reshape(value, self.broadcast_shape, final_shape) return value @staticmethod def create( *, params: BlockParameters, constant_offset: sympy.Expr, range_trees: List[IterationRangesEntry], mask_vars: OrderedSet[str], get_max_block: Callable[[str], int], ) -> BlockPtrOptions: """Helper to create a BlockPtrOptions instance""" sizevars = V.graph.sizevars def lookup_size(exprs: Iterable[sympy.Expr]) -> List[sympy.Expr]: return [sizevars.lookup_precomputed_size(expr) for expr in exprs] # Look up precomputed sizes params.shape = lookup_size(params.shape) params.strides = lookup_size(params.strides) # Strip out dimensions of stride 0. # These will be restored with tl.broadcast_to. broadcasting_dims = [ sizevars.statically_known_equals(stride, 0) for stride in params.strides ] # Strip out dimensions of size 1. # These will be restored by tl.reshape. singleton_dims = [ sizevars.statically_known_equals(dim, 1) for dim in params.block_shape ] if all(singleton_dims): # Handle a pure singletons, e.g. [1, 1] singleton_dims[-1] = False # Record the post-broadcast shape before broadcasting dims are removed. # The pre-broadcast shape is identical to this, except broadcasting dims are # replaced with 1. broadcast_shape = [ dim for dim, is_singleton in zip(params.block_shape, singleton_dims) if not is_singleton ] # Combine all removable dims. removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)] def remove_dims(it): """Removes any broadcasting or singleton dims from a given sequence""" return [ item for item, is_removable in zip(it, removable_dims) if not is_removable ] # Drop removable dimensions from the input. params = BlockParameters( **{key: remove_dims(val) for key, val in dataclasses.asdict(params).items()} ) # Compute the final shape, adjusting for special kernel types. final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees] if V.kernel.no_x_dim: assert range_trees[0].prefix == "x" final_shape.pop(0) if ( not V.kernel.inside_reduction and len(params.strides) == len(V.kernel.numels) - 1 and V.kernel.numels["r"] != 1 ): # Need to expand rank by 1 to match rank when self.inside_reduction=True final_shape.append(sympy.S.One) result = BlockPtrOptions( params=params, constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), order=list(reversed(range(len(params.shape)))), mask_vars=mask_vars, final_shape=final_shape, broadcast_shape=broadcast_shape, broadcasting_dims=broadcasting_dims, ) result.compute_boundary_check(get_max_block) return result def replace_roffset(self, expr: sympy.Expr, replacement: sympy.Expr) -> sympy.Expr: """ Replaces instances of roffset with the new expression. """ roffset = TritonSymbols.block_offsets[SymT.RINDEX] return sympy_subs(expr, {roffset: replacement}) def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should roffset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.replace_roffset(offset, sympy.S.Zero) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})" def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None: """List of indices to pass to tl.load(boundary_check=...)""" sizevars = V.graph.sizevars # Substitute maximum block sizes in shape expressions. # This works in multiple_of checks because block sizes are powers of 2. block_to_max: Dict[sympy.Expr, Any] = { block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items() } self._boundary_check = [ idx for idx in range(len(self.shape)) if ( not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and not sizevars.statically_known_multiple_of( self.shape[idx], self.block_shape[idx] ) and not sizevars.statically_known_multiple_of( self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max) ) and not ( V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK] ) ) ] def boundary_check(self): assert self._boundary_check is not None return self._boundary_check def advance_roffset(self): """ Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace roffset with multiples of RBLOCK. Since we expect roffset to vary in range(0, rnumel, RBLOCK), the first iteration has roffset=0, while the second has roffset=RBLOCK. """ rblock = TritonSymbols.block_sizes[SymT.RINDEX] advance = [ ( self.replace_roffset(offset, rblock) - self.replace_roffset(offset, sympy.S.Zero) ) for offset in self.offsets ] return V.kernel.index_to_str(advance) def has_indirect(self): return False # block_ptr can't do indirect indexing def has_rindex(self) -> bool: return any(free_symbol_is_type(expr, SymT.RINDEX) for expr in self.block_shape) def has_rmask(self): return self.has_rindex() def has_tmpmask(self): return False # block_ptr can't do indirect indexing def has_mask(self): return bool(self.boundary_check()) def triton_reshape( value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr] ): """Workaround https://github.com/openai/triton/issues/2836""" assert isinstance(old_shape, list) and isinstance(new_shape, list) old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape] new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape] if old_shape_str == new_shape_str: return value if [s for s in new_shape_str if s != "1"] != old_shape_str: return f"tl.reshape({value}, [{', '.join(new_shape_str)}])" # rewrite to [:, None] syntax, which is less buggy idx = 0 expand = [] for size in new_shape_str: if idx < len(old_shape_str) and size == old_shape_str[idx]: expand.append(":") idx += 1 else: assert size == "1" expand.append("None") assert idx == len(old_shape_str) return f"{value}[{', '.join(expand)}]" # NB: Inheriting from PythonPrinter is somewhat dangerous, because there are a # number of operators which Triton "implements", but in a way that is # inconsistent with Python semantics (and consistent with C semantics). We # must override all of these, or it is potential silent correctness problem class TritonPrinter(PythonPrinter): def _print_TruncToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_Float(self, expr): if config.is_fbcode() and torch.version.hip: ret = f"{expr}" else: ret = f"tl.full([], {expr}, tl.float64)" return ret def _print_ToFloat(self, expr): assert len(expr.args) == 1 s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5) return f"{s}.to(tl.float64)" def _print_PythonMod(self, expr): quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.remainder_integer({quot_s}, {div_s})" def _print_FloorDiv(self, expr): assert expr.is_integer quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.div_floor_integer({quot_s}, {div_s})" # TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher # precision algorithm, which we would need to replicate here def _print_IntTrueDiv(self, expr): return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5) # NB: sympy.floor/ceiling produce integers, so we have to do the # conversion to index dtype def _print_floor(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_FloorToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_ceiling(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _print_CeilToInt(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _helper_sqrt(self, expr): return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" def _print_FloatPow(self, expr): return ( f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})" ) _print_PowByNatural = _print_FloatPow def _print_Where(self, expr): c = self.doprint(expr.args[0]) p = self.doprint(expr.args[1]) q = self.doprint(expr.args[2]) return f"tl.where({c}, {p}, {q})" def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str: """ Helper for max/min code genereration. cmp: > or < """ nargs = len(expr.args) if len(expr.args) == 1: return self._print(expr.args[0]) mid = len(expr.args) // 2 cls = type(expr) a = self._print(cls(*expr.args[:mid])) b = self._print(cls(*expr.args[mid:])) # Use a macro so we can propagate constexprs. # https://github.com/triton-lang/triton/issues/3815 a, b = tuple(f"({x})" for x in (a, b)) assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'" return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))" def _print_Min(self, expr): return self._print_min_max_helper(expr, "<") def _print_Max(self, expr): return self._print_min_max_helper(expr, ">") def _print_Abs(self, expr): assert len(expr.args) == 1 return f"tl_math.abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr): assert len(expr.args) == 1 return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_cosh(self, expr): assert len(expr.args) == 1 return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_acos(self, expr): assert len(expr.args) == 1 return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sin(self, expr): assert len(expr.args) == 1 return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sinh(self, expr): assert len(expr.args) == 1 return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_asin(self, expr): assert len(expr.args) == 1 return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tan(self, expr): assert len(expr.args) == 1 return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tanh(self, expr): assert len(expr.args) == 1 return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_atan(self, expr): assert len(expr.args) == 1 return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" def _print_RoundToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_RoundDecimal(self, expr): assert len(expr.args) == 2 number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function assert ndigits < 0 raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}" texpr = TritonPrinter().doprint # correct cases where Triton types names don't match PyTorch _triton_type_mapping = { "tl.bool": "tl.int1", "tl.float8_e4m3fn": "tl.float8e4nv", "tl.float8_e5m2": "tl.float8e5", "tl.float8_e4m3fnuz": "tl.float8e4b8", "tl.float8_e5m2fnuz": "tl.float8e5b16", } _triton_type_re = re.compile(r"^.*[.]") def triton_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type""" triton_type_name = _triton_type_re.sub("tl.", str(dtype)) return _triton_type_mapping.get(triton_type_name, triton_type_name) def triton_compute_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type and upcast [b]float16 to float32""" return triton_type(upcast_compute_type(dtype)) def _get_primitive_bitwidth(dtype: torch.dtype) -> int: """Number of bits of triton_compute_type()""" dtype = upcast_compute_type(dtype) itemsize = getattr(dtype, "itemsize", None) if itemsize: return itemsize * 8 else: return -1 def triton_store_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with fix for storing tl.bool""" if dtype == torch.bool: dtype = torch.int8 return triton_type(dtype) def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype: """Implicit upcasts used for Triton reduction types""" if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4: return torch.int32 return upcast_compute_type(dtype) def triton_acc_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with reduction upcasts""" return triton_compute_type(upcast_acc_dtype(dtype)) class TritonCSEVariable(CSEVariable): def __init__(self, name, bounds: ValueRanges[Any], dtype: torch.dtype) -> None: super().__init__(name, bounds, dtype) # We'll use this to track which masks the variable needs when used for indirect indexing self.mask_vars: OrderedSet[str] = OrderedSet() assert dtype is not None, "TritonCSEVariable must have dtype" def update_on_args(self, name, args, kwargs): for arg in args: if isinstance(arg, TritonCSEVariable): self.mask_vars.update(arg.mask_vars) elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": # most of the time index vars don't need masks associated with them # however, when index vars are used to compute indices for indirect reads # those reads should subsequently be masked, self.mask_vars.update({f"{arg.name[0]}mask"}) class TritonOverrides(OpOverrides): """Map element-wise ops to Triton""" @staticmethod def to_dtype( x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None, use_compute_types=True, ): def _get_min_elements_per_thread( src_dtype: torch.dtype, dst_dtype: torch.dtype ) -> int: if src_dtype == dst_dtype: # No data type conversion is needed. No requirements on min_elem_per_thread. return 0 # fp8 data type conversions has min_elem_per_thread requirements. # Refer to Triton implementations here: # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. fp8_dtypes = ( torch.float8_e4m3fn, torch.float8_e5m2, ) # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. assert not ( src_dtype in fp8_dtypes and dst_dtype in fp8_dtypes and src_dtype != dst_dtype ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: return 4 if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: return 2 # No requirements on min_elem_per_thread. return 0 if src_dtype is not None: # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions # in the same kernel. V.kernel.min_elem_per_thread = max( _get_min_elements_per_thread(src_dtype, dtype), V.kernel.min_elem_per_thread, ) if dtype == torch.bool: return f"({x} != 0)" elif dtype == torch.uint8: # to work around llvm uint conversion semantics # that produces 0's for negative values return f"{x}.to(tl.int8).to(tl.uint8)" if use_compute_types: out_dtype = triton_compute_type(dtype) else: out_dtype = triton_store_type(dtype) return f"{x}.to({out_dtype})" @staticmethod def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): triton_dtype = triton_compute_type(dtype) # We may promote float16 or bfloat16 to float32 and cause the # bitwidth of dtype to be different from the input tensor (i.e. float32). # In such as case, we will have to convert the input tensor to # its src_type, perform bitcast, and then convert the bit-casted # tensor back to float to ensure we use values with the right precision. if ( src_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): triton_src_dtype = str(src_dtype).split(".")[-1] cast_x = f"{x}.to(tl.{triton_src_dtype})" if dtype in (torch.float16, torch.bfloat16): triton_type_name = str(dtype).split(".")[-1] triton_dtype = f"tl.{triton_type_name}" cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" if dtype in (torch.float16, torch.bfloat16): return f"{cast_x}.to(tl.float32)" return cast_x else: src_dtype_bitwidth = _get_primitive_bitwidth(src_dtype) target_dtype_bitwidth = _get_primitive_bitwidth(dtype) bitcast = "True" if src_dtype_bitwidth == target_dtype_bitwidth else "False" return f"{x}.to({triton_dtype}, bitcast={bitcast})" @staticmethod def _shaped_constant(value, dtype, shape): type_ = torch._prims_common.dtype_to_type(dtype) triton_val = constant_repr(type_(value)) triton_type = triton_compute_type(dtype) if triton_type == "tl.float32": # Float constants are always f32 in triton return triton_val # NOTE: We use a tensor here in order to get the expected type. # Otherwise, e.g. float64 constants would be trunctated to float32. return f"tl.full({shape}, {triton_val}, {triton_type})" @classmethod def constant(cls, value, dtype): return cls._shaped_constant(value, dtype, shape=[]) @staticmethod def abs(x): return f"tl_math.abs({x})" @staticmethod def libdevice_abs(x): return f"libdevice.abs({x})" @staticmethod def exp(x): return f"tl_math.exp({x})" @staticmethod def libdevice_exp(x): return f"libdevice.exp({x})" @staticmethod def exp2(x): return f"libdevice.exp2({x})" @staticmethod def expm1(x): return f"libdevice.expm1({x})" @staticmethod def sqrt(x): if config.triton.codegen_upcast_to_fp32: return f"libdevice.sqrt({x})" else: needs_upcast = x.dtype in (torch.float16, torch.bfloat16) orig_dtype = triton_type(x.dtype) upcast_string = ".to(tl.float32)" if needs_upcast else "" downcast_string = f".to({orig_dtype})" if needs_upcast else "" return f"libdevice.sqrt({x}{upcast_string}){downcast_string}" @staticmethod def libdevice_sqrt(x): return f"libdevice.sqrt({x})" @staticmethod def relu(x): bug = config.triton.inject_relu_bug_TESTING_ONLY if bug == "compile_error": return "compile error!" elif bug == "runtime_error": # NB: this only triggers runtime error as long as input # is not all zero return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' elif bug == "accuracy": return f"{x} + 1" elif bug is None: return ops.maximum(ops.constant(0, torch.int32), x) else: raise AssertionError( f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" ) @staticmethod def minimum(a, b): return f"triton_helpers.minimum({a}, {b})" @staticmethod def maximum(a, b): return f"triton_helpers.maximum({a}, {b})" @staticmethod def where(a, b, c): return f"tl.where({a}, {b}, {c})" @staticmethod def inline_asm_elementwise( *inputs, asm, constraints=None, dtype=torch.float32, is_pure=True, pack=1 ): triton_type = triton_compute_type(dtype) input_refs = ", ".join([str(i) for i in inputs]) if constraints is None: constraints = ", ".join(["=r"] + ["r" for _ in inputs]) return f"tl.inline_asm_elementwise('{asm}', '{constraints}', [{input_refs}], dtype={triton_type}, is_pure={is_pure}, pack={pack})" # noqa: B950 @staticmethod def cos(x): return f"tl_math.cos({x})" @staticmethod def libdevice_cos(x): return f"libdevice.cos({x})" @staticmethod def sin(x): return f"tl_math.sin({x})" @staticmethod def libdevice_sin(x): return f"libdevice.sin({x})" @classmethod def index_expr(cls, expr, dtype): raise NotImplementedError("ops.index_expr not implemented outside a kernel") @staticmethod def masked(mask, body, other): raise NotImplementedError("ops.masked not implemented outside a kernel") @staticmethod def lgamma(x): return f"libdevice.lgamma({x})" @staticmethod def erf(x): return f"libdevice.erf({x})" @staticmethod def cosh(x): return f"libdevice.cosh({x})" @staticmethod def sinh(x): return f"libdevice.sinh({x})" @staticmethod def acos(x): return f"libdevice.acos({x})" @staticmethod def acosh(x): return f"libdevice.acosh({x})" @staticmethod def asin(x): return f"libdevice.asin({x})" @staticmethod def asinh(x): return f"libdevice.asinh({x})" @staticmethod def atan2(x, y): return f"libdevice.atan2({x}, {y})" @staticmethod def atan(x): return f"libdevice.atan({x})" @staticmethod def atanh(x): return f"libdevice.atanh({x})" @staticmethod def copysign(x, y): return f"libdevice.copysign({x}, {y})" @staticmethod def erfc(x): return f"libdevice.erfc({x})" @staticmethod def erfinv(x): return f"libdevice.erfinv({x})" @staticmethod def hypot(x, y): return f"libdevice.hypot({x}, {y})" @staticmethod def log10(x): return f"libdevice.log10({x})" @staticmethod def log2(x): return f"libdevice.log2({x})" @staticmethod def nextafter(x, y): return f"libdevice.nextafter({x}, {y})" @staticmethod def logical_and(a, b): return f"{a} & {b}" @staticmethod def logical_not(a): return f"{a} == 0" @staticmethod def logical_or(a, b): return f"{a} | {b}" @staticmethod def logical_xor(a, b): return f"({a} ^ {b})" @staticmethod def bitwise_and(a, b): return f"{a} & {b}" @staticmethod def bitwise_not(a): return f"~{a}" @staticmethod def bitwise_or(a, b): return f"{a} | {b}" @staticmethod def bitwise_xor(a, b): return f"{a} ^ {b}" @staticmethod def bitwise_left_shift(a, b): return f"{a} << {b}" @staticmethod def bitwise_right_shift(a, b): return f"{a} >> {b}" @staticmethod def rand(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.rand({seed}, {offset})" @staticmethod def randn(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.randn({seed}, {offset})" @staticmethod def randint64(seed, offset, low, high): offset = f"({offset}).to(tl.uint32)" return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" @staticmethod def load_seed(name, offset): raise NotImplementedError("ops.load_seed not implemented outside a kernel") @staticmethod def rsqrt(x): return f"libdevice.rsqrt({x})" @staticmethod def log1p(x): return f"libdevice.log1p({x})" @staticmethod def tan(x): return f"libdevice.tan({x})" @staticmethod def tanh(x): return f"libdevice.tanh({x})" @staticmethod def sigmoid(x): return f"tl.sigmoid({x})" @staticmethod def signbit(x): # XX: This is wrong for the value -0.0 in floating point return ( f"(libdevice.signbit({x}) != 0) if ({x}).dtype is tl.float32 else {x} < 0" ) @staticmethod def fmod(a, b): return f"libdevice.fmod({a}, {b})" @staticmethod def pow(a, b): return f"libdevice.pow({a}, {b})" @staticmethod def log(x): return f"tl_math.log({x})" @staticmethod def libdevice_log(x): return f"libdevice.log({x})" @staticmethod def isinf(x): return f"libdevice.isinf({x}).to(tl.int1)" @staticmethod def isnan(x): return f"libdevice.isnan({x}).to(tl.int1)" @staticmethod def round(x): return f"libdevice.nearbyint({x})" @staticmethod def floor(x): return f"libdevice.floor({x})" @staticmethod def floordiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Similar to div_floor_kernel_cuda in pytorch core. # Notice that // in triton behaves as truncdiv instead of floordiv quot = f"{a} // {b}" rem = f"{a} % {b}" return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" @staticmethod def sign(x): z = ops.constant(0, torch.int32) left = ops.to_dtype((ops.lt(z, x)), torch.int8) right = ops.to_dtype((ops.lt(x, z)), torch.int8) sub = ops.sub(left, right) return f"{sub}.to({x}.dtype)" @staticmethod def trunc(x): return f"libdevice.trunc({x})" @staticmethod def truncdiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Notice that // in triton behaves as truncdiv instead of floordiv return f"{a} // {b}" @staticmethod def ceil(x): return f"libdevice.ceil({x})" TritonOverrides._initialize_pointwise_overrides("triton") # Use mypy to check protocol implemented correctly def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: return h class TritonKernelOverrides(TritonOverrides): """Map element-wise ops to Triton within a TritonKernel Unlike TritonOverrides, these assume the code is going to be inserted into the body of the main triton kernel and so it may use indexing and mask variables which are assumed to already be defined in the current scope. """ @classmethod def constant(cls, value, dtype): # NOTE: Cannot use shape=[] as it's not supported by triton-rocm # We could use shape=[1] instead but starting with the correct # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. ndim = V.kernel.triton_tensor_ndim() shape = [1] * ndim return cls._shaped_constant(value, dtype, shape=shape) @classmethod def index_expr(cls, expr, dtype): indexing = V.kernel.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) # Our sympy expr printing casts to the current kernel index dtype. # we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype index_dtype = torch.int32 if V.kernel.index_dtype == "tl.int32" else torch.int64 dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype var = V.kernel.cse.generate( V.kernel.compute, indexing.index_str, bounds=get_bounds_index_expr(expr), dtype=dtype, ) if dtype not in (torch.int32, torch.int64): var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, dtype), dtype=upcast_compute_type(dtype), ) else: # TODO: we are not always consistent in enforcing that the output of the index expr printing # results in the indexing dtype. So if we detect that we have an input which might type promote # to a dtype other than indexing dtype, add a cast. # Trying to avoid dtype = index_dtype for index_var in expr.free_symbols: if symbol_is_type(index_var, SymT.TMP): dtype = torch.promote_types( dtype, V.kernel.cse.varname_map[index_var.name].dtype ) if dtype != index_dtype: var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, index_dtype), dtype=index_dtype, ) var.mask_vars = indexing.mask_vars return var @staticmethod def masked(mask, body, other): if mask is not None and torch.version.hip is not None: mask = V.kernel.cse.generate( V.kernel.compute, f"{mask}.to(tl.int1)", dtype=torch.bool, ) nodes = body.graph.find_nodes(op="output") assert nodes, "graph for body does not contain an output" need_where = False for node in nodes: for arg in node.args: if arg.target != "load" or should_unwrap_unspec_arg(arg.args[0]): need_where = True value = None if need_where else other with V.kernel.mask_loads(mask, value=value) as new_mask: result = body() if need_where: # Remove once CSEVariables track the dtype if result.bounds.is_bool: other = bool(other) # Take dtype from result to prevent accidental promotion other = V.kernel.cse.generate( V.kernel.compute, f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)", bounds=ValueRanges.wrap(other), dtype=result.dtype, ) ret = ops.where(new_mask, result, other) else: ret = result ret.mask_vars.discard(new_mask) return ret @staticmethod def load_seed(name, offset): var = V.kernel.args.input(name) return ( f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" ) @staticmethod def frexp(x): cache_key = f"frexp({x})" if cse_val := V.kernel.cse.try_get(cache_key): return cse_val mantissa = V.kernel.cse.newvar(dtype=x.dtype) exponent = V.kernel.cse.newvar(dtype=torch.int32) V.kernel.compute.writeline( f"{mantissa}, {exponent} = triton_helpers.frexp({x})" ) V.kernel.cse.put(cache_key, (mantissa, exponent)) return (mantissa, exponent) # Use mypy to check protocol implemented correctly def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: return h class HelperFunctions: """An ordered set of helper functions.""" _templates_seen: Dict[str, str] # Template code to function name finalized_helpers: List[str] def __init__(self) -> None: self._templates_seen = {} self.finalized_helpers = [] def add(self, template_code: str, *, base_name="_triton_helper_fn") -> str: """This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("@triton.jit") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} ) @triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
@triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("
alexanderb14/pytorch
torch/_inductor/codegen/triton.py
https://github.com/alexanderb14/pytorch/blob/8da4224042665686de22f8e351a0b42bfa42cab8/torch/_inductor/codegen/triton.py
# mypy: allow-untyped-defs from __future__ import annotations import collections import contextlib import dataclasses import functools import itertools import logging import os import re import textwrap from functools import lru_cache from typing import ( Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union, ) import sympy from sympy.printing.precedence import PRECEDENCE import torch import torch._logging from torch._dynamo.utils import identity, preserve_rng_state from torch._prims_common import is_integer_dtype from torch.utils._ordered_set import OrderedSet from torch.utils._sympy.functions import CeilDiv, FloorDiv, ModularIndexing from torch.utils._triton import has_triton_package from ...utils._sympy.symbol import free_symbol_is_type, prefix_str, symbol_is_type, SymT from ...utils._sympy.value_ranges import ValueRanges from .. import config, ir, metrics from ..codecache import code_hash, get_path, PyCodeCache from ..runtime.benchmarking import benchmarker from ..runtime.hints import ( AutotuneHint, DeviceProperties, TRITON_MAX_BLOCK, TRITON_MAX_RSPLIT, ) from ..runtime.runtime_utils import get_max_y_grid, next_power_of_2 from ..runtime.triton_heuristics import ( cooperative_reduction_grid, grid as default_grid_fn, ) from ..scheduler import BaseSchedulerNode, FusedSchedulerNode, Scheduler, SchedulerNode from ..utils import ( DelayReplaceLine, get_bounds_index_expr, get_fused_kernel_name, get_kernel_metadata, is_welford_reduction, Placeholder, sympy_subs, upcast_compute_type, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code from .block_analysis import BlockPatternMatcher from .common import ( BackendFeature, CSE, CSEVariable, DeferredLine, IndentedBuffer, OpOverrides, PythonPrinter, SizeArg, TensorArg, WorkspaceArg, WorkspaceZeroMode, ) from .simd import ( constant_repr, IterationRanges, IterationRangesEntry, IterationRangesRoot, pexpr, prefix_is_reduction, SIMDKernel, SIMDScheduling, ) from .triton_utils import ( config_of, should_unwrap_unspec_arg, signature_of, signature_to_meta, ) if TYPE_CHECKING: from ..ir import IRNode log = logging.getLogger(__name__) perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") @lru_cache(None) def gen_attr_descriptor_import(): """ import AttrsDescriptor if the triton version is new enough to have this class defined. """ if not has_triton_package(): return "" import triton.compiler.compiler # Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler # When support for the legacy AttrsDescriptor is removed then this import path should be changed. if hasattr(triton.compiler.compiler, "AttrsDescriptor"): return "from triton.compiler.compiler import AttrsDescriptor" else: return "" @lru_cache(None) def gen_common_triton_imports(): imports = IndentedBuffer() imports.splice( """ import triton import triton.language as tl """ ) if attr_desc := gen_attr_descriptor_import(): imports.writeline(attr_desc) imports.splice( """ from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties """ ) return imports.getvalue() class TritonSymbols: """ Stores sympy.Symbol instances and constants associated with triton codegen. """ block_offsets = { symt: sympy.Symbol(f"{prefix_str[symt]}offset", integer=True, nonnegative=True) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } block_sizes = { symt: sympy.Symbol( f"{prefix_str[symt].upper()}BLOCK", integer=True, positive=True ) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } @classmethod def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_sizes[tree.symt] @classmethod def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_offsets[tree.symt] @dataclasses.dataclass class IndexingOptions: index_str: str mask_vars: OrderedSet[str] mask_str: str expand_str: Optional[str] _has_rindex: bool index: sympy.Expr def has_mask(self): return bool(self.mask_vars) def has_indirect(self): return free_symbol_is_type(self.index, SymT.TMP) def has_rindex(self): return self._has_rindex def has_tmpmask(self): return "tmp" in self.mask_str def has_rmask(self): return "rmask" in self.mask_str @dataclasses.dataclass class BlockPtrOptions: params: BlockParameters constant_offset: sympy.Expr order: List[int] mask_vars: OrderedSet[str] broadcast_shape: Sequence[sympy.Expr] broadcasting_dims: List[bool] final_shape: Sequence[sympy.Expr] _boundary_check: Optional[List[int]] = None @property def shape(self) -> List[sympy.Expr]: return self.params.shape @property def block_shape(self) -> List[sympy.Expr]: return self.params.block_shape @property def strides(self) -> List[sympy.Expr]: return self.params.strides @property def offsets(self) -> List[sympy.Expr]: return self.params.offsets def codegen_broadcast_and_reshape( self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool, ) -> str: """ Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer. """ # Reshape to add singletons. pre_broadcast_shape = [ sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip( self.broadcast_shape, self.broadcasting_dims ) ] value = triton_reshape(value, initial_shape, pre_broadcast_shape) # Broadcast singletons. # For loads, we can often implicitly broadcast singleton dimensions. # We need an explicit broadcast for stores, or if the final reshape does more # than add singletons. sizevars = V.graph.sizevars require_broadcast = any(self.broadcasting_dims) and ( len(pre_broadcast_shape) != len(final_shape) or any( not ( sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) ) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape) ) ) if not allow_implicit or require_broadcast: value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})" # Reshape to the final shape. value = triton_reshape(value, self.broadcast_shape, final_shape) return value @staticmethod def create( *, params: BlockParameters, constant_offset: sympy.Expr, range_trees: List[IterationRangesEntry], mask_vars: OrderedSet[str], get_max_block: Callable[[str], int], ) -> BlockPtrOptions: """Helper to create a BlockPtrOptions instance""" sizevars = V.graph.sizevars def lookup_size(exprs: Iterable[sympy.Expr]) -> List[sympy.Expr]: return [sizevars.lookup_precomputed_size(expr) for expr in exprs] # Look up precomputed sizes params.shape = lookup_size(params.shape) params.strides = lookup_size(params.strides) # Strip out dimensions of stride 0. # These will be restored with tl.broadcast_to. broadcasting_dims = [ sizevars.statically_known_equals(stride, 0) for stride in params.strides ] # Strip out dimensions of size 1. # These will be restored by tl.reshape. singleton_dims = [ sizevars.statically_known_equals(dim, 1) for dim in params.block_shape ] if all(singleton_dims): # Handle a pure singletons, e.g. [1, 1] singleton_dims[-1] = False # Record the post-broadcast shape before broadcasting dims are removed. # The pre-broadcast shape is identical to this, except broadcasting dims are # replaced with 1. broadcast_shape = [ dim for dim, is_singleton in zip(params.block_shape, singleton_dims) if not is_singleton ] # Combine all removable dims. removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)] def remove_dims(it): """Removes any broadcasting or singleton dims from a given sequence""" return [ item for item, is_removable in zip(it, removable_dims) if not is_removable ] # Drop removable dimensions from the input. params = BlockParameters( **{key: remove_dims(val) for key, val in dataclasses.asdict(params).items()} ) # Compute the final shape, adjusting for special kernel types. final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees] if V.kernel.no_x_dim: assert range_trees[0].prefix == "x" final_shape.pop(0) if ( not V.kernel.inside_reduction and len(params.strides) == len(V.kernel.numels) - 1 and V.kernel.numels["r"] != 1 ): # Need to expand rank by 1 to match rank when self.inside_reduction=True final_shape.append(sympy.S.One) result = BlockPtrOptions( params=params, constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), order=list(reversed(range(len(params.shape)))), mask_vars=mask_vars, final_shape=final_shape, broadcast_shape=broadcast_shape, broadcasting_dims=broadcasting_dims, ) result.compute_boundary_check(get_max_block) return result def replace_roffset(self, expr: sympy.Expr, replacement: sympy.Expr) -> sympy.Expr: """ Replaces instances of roffset with the new expression. """ roffset = TritonSymbols.block_offsets[SymT.RINDEX] return sympy_subs(expr, {roffset: replacement}) def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should roffset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.replace_roffset(offset, sympy.S.Zero) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})" def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None: """List of indices to pass to tl.load(boundary_check=...)""" sizevars = V.graph.sizevars # Substitute maximum block sizes in shape expressions. # This works in multiple_of checks because block sizes are powers of 2. block_to_max: Dict[sympy.Expr, Any] = { block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items() } self._boundary_check = [ idx for idx in range(len(self.shape)) if ( not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and not sizevars.statically_known_multiple_of( self.shape[idx], self.block_shape[idx] ) and not sizevars.statically_known_multiple_of( self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max) ) and not ( V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK] ) ) ] def boundary_check(self): assert self._boundary_check is not None return self._boundary_check def advance_roffset(self): """ Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace roffset with multiples of RBLOCK. Since we expect roffset to vary in range(0, rnumel, RBLOCK), the first iteration has roffset=0, while the second has roffset=RBLOCK. """ rblock = TritonSymbols.block_sizes[SymT.RINDEX] advance = [ ( self.replace_roffset(offset, rblock) - self.replace_roffset(offset, sympy.S.Zero) ) for offset in self.offsets ] return V.kernel.index_to_str(advance) def has_indirect(self): return False # block_ptr can't do indirect indexing def has_rindex(self) -> bool: return any(free_symbol_is_type(expr, SymT.RINDEX) for expr in self.block_shape) def has_rmask(self): return self.has_rindex() def has_tmpmask(self): return False # block_ptr can't do indirect indexing def has_mask(self): return bool(self.boundary_check()) def triton_reshape( value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr] ): """Workaround https://github.com/openai/triton/issues/2836""" assert isinstance(old_shape, list) and isinstance(new_shape, list) old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape] new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape] if old_shape_str == new_shape_str: return value if [s for s in new_shape_str if s != "1"] != old_shape_str: return f"tl.reshape({value}, [{', '.join(new_shape_str)}])" # rewrite to [:, None] syntax, which is less buggy idx = 0 expand = [] for size in new_shape_str: if idx < len(old_shape_str) and size == old_shape_str[idx]: expand.append(":") idx += 1 else: assert size == "1" expand.append("None") assert idx == len(old_shape_str) return f"{value}[{', '.join(expand)}]" # NB: Inheriting from PythonPrinter is somewhat dangerous, because there are a # number of operators which Triton "implements", but in a way that is # inconsistent with Python semantics (and consistent with C semantics). We # must override all of these, or it is potential silent correctness problem class TritonPrinter(PythonPrinter): def _print_TruncToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_Float(self, expr): if config.is_fbcode() and torch.version.hip: ret = f"{expr}" else: ret = f"tl.full([], {expr}, tl.float64)" return ret def _print_ToFloat(self, expr): assert len(expr.args) == 1 s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5) return f"{s}.to(tl.float64)" def _print_PythonMod(self, expr): quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.remainder_integer({quot_s}, {div_s})" def _print_FloorDiv(self, expr): assert expr.is_integer quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.div_floor_integer({quot_s}, {div_s})" # TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher # precision algorithm, which we would need to replicate here def _print_IntTrueDiv(self, expr): return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5) # NB: sympy.floor/ceiling produce integers, so we have to do the # conversion to index dtype def _print_floor(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_FloorToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_ceiling(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _print_CeilToInt(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _helper_sqrt(self, expr): return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" def _print_FloatPow(self, expr): return ( f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})" ) _print_PowByNatural = _print_FloatPow def _print_Where(self, expr): c = self.doprint(expr.args[0]) p = self.doprint(expr.args[1]) q = self.doprint(expr.args[2]) return f"tl.where({c}, {p}, {q})" def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str: """ Helper for max/min code genereration. cmp: > or < """ nargs = len(expr.args) if len(expr.args) == 1: return self._print(expr.args[0]) mid = len(expr.args) // 2 cls = type(expr) a = self._print(cls(*expr.args[:mid])) b = self._print(cls(*expr.args[mid:])) # Use a macro so we can propagate constexprs. # https://github.com/triton-lang/triton/issues/3815 a, b = tuple(f"({x})" for x in (a, b)) assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'" return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))" def _print_Min(self, expr): return self._print_min_max_helper(expr, "<") def _print_Max(self, expr): return self._print_min_max_helper(expr, ">") def _print_Abs(self, expr): assert len(expr.args) == 1 return f"tl_math.abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr): assert len(expr.args) == 1 return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_cosh(self, expr): assert len(expr.args) == 1 return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_acos(self, expr): assert len(expr.args) == 1 return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sin(self, expr): assert len(expr.args) == 1 return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sinh(self, expr): assert len(expr.args) == 1 return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_asin(self, expr): assert len(expr.args) == 1 return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tan(self, expr): assert len(expr.args) == 1 return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tanh(self, expr): assert len(expr.args) == 1 return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_atan(self, expr): assert len(expr.args) == 1 return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" def _print_RoundToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_RoundDecimal(self, expr): assert len(expr.args) == 2 number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function assert ndigits < 0 raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}" texpr = TritonPrinter().doprint # correct cases where Triton types names don't match PyTorch _triton_type_mapping = { "tl.bool": "tl.int1", "tl.float8_e4m3fn": "tl.float8e4nv", "tl.float8_e5m2": "tl.float8e5", "tl.float8_e4m3fnuz": "tl.float8e4b8", "tl.float8_e5m2fnuz": "tl.float8e5b16", } _triton_type_re = re.compile(r"^.*[.]") def triton_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type""" triton_type_name = _triton_type_re.sub("tl.", str(dtype)) return _triton_type_mapping.get(triton_type_name, triton_type_name) def triton_compute_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type and upcast [b]float16 to float32""" return triton_type(upcast_compute_type(dtype)) def _get_primitive_bitwidth(dtype: torch.dtype) -> int: """Number of bits of triton_compute_type()""" dtype = upcast_compute_type(dtype) itemsize = getattr(dtype, "itemsize", None) if itemsize: return itemsize * 8 else: return -1 def triton_store_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with fix for storing tl.bool""" if dtype == torch.bool: dtype = torch.int8 return triton_type(dtype) def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype: """Implicit upcasts used for Triton reduction types""" if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4: return torch.int32 return upcast_compute_type(dtype) def triton_acc_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with reduction upcasts""" return triton_compute_type(upcast_acc_dtype(dtype)) class TritonCSEVariable(CSEVariable): def __init__(self, name, bounds: ValueRanges[Any], dtype: torch.dtype) -> None: super().__init__(name, bounds, dtype) # We'll use this to track which masks the variable needs when used for indirect indexing self.mask_vars: OrderedSet[str] = OrderedSet() assert dtype is not None, "TritonCSEVariable must have dtype" def update_on_args(self, name, args, kwargs): for arg in args: if isinstance(arg, TritonCSEVariable): self.mask_vars.update(arg.mask_vars) elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": # most of the time index vars don't need masks associated with them # however, when index vars are used to compute indices for indirect reads # those reads should subsequently be masked, self.mask_vars.update({f"{arg.name[0]}mask"}) class TritonOverrides(OpOverrides): """Map element-wise ops to Triton""" @staticmethod def to_dtype( x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None, use_compute_types=True, ): def _get_min_elements_per_thread( src_dtype: torch.dtype, dst_dtype: torch.dtype ) -> int: if src_dtype == dst_dtype: # No data type conversion is needed. No requirements on min_elem_per_thread. return 0 # fp8 data type conversions has min_elem_per_thread requirements. # Refer to Triton implementations here: # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. fp8_dtypes = ( torch.float8_e4m3fn, torch.float8_e5m2, ) # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. assert not ( src_dtype in fp8_dtypes and dst_dtype in fp8_dtypes and src_dtype != dst_dtype ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: return 4 if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: return 2 # No requirements on min_elem_per_thread. return 0 if src_dtype is not None: # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions # in the same kernel. V.kernel.min_elem_per_thread = max( _get_min_elements_per_thread(src_dtype, dtype), V.kernel.min_elem_per_thread, ) if dtype == torch.bool: return f"({x} != 0)" elif dtype == torch.uint8: # to work around llvm uint conversion semantics # that produces 0's for negative values return f"{x}.to(tl.int8).to(tl.uint8)" if use_compute_types: out_dtype = triton_compute_type(dtype) else: out_dtype = triton_store_type(dtype) return f"{x}.to({out_dtype})" @staticmethod def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): triton_dtype = triton_compute_type(dtype) # We may promote float16 or bfloat16 to float32 and cause the # bitwidth of dtype to be different from the input tensor (i.e. float32). # In such as case, we will have to convert the input tensor to # its src_type, perform bitcast, and then convert the bit-casted # tensor back to float to ensure we use values with the right precision. if ( src_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): triton_src_dtype = str(src_dtype).split(".")[-1] cast_x = f"{x}.to(tl.{triton_src_dtype})" if dtype in (torch.float16, torch.bfloat16): triton_type_name = str(dtype).split(".")[-1] triton_dtype = f"tl.{triton_type_name}" cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" if dtype in (torch.float16, torch.bfloat16): return f"{cast_x}.to(tl.float32)" return cast_x else: src_dtype_bitwidth = _get_primitive_bitwidth(src_dtype) target_dtype_bitwidth = _get_primitive_bitwidth(dtype) bitcast = "True" if src_dtype_bitwidth == target_dtype_bitwidth else "False" return f"{x}.to({triton_dtype}, bitcast={bitcast})" @staticmethod def _shaped_constant(value, dtype, shape): type_ = torch._prims_common.dtype_to_type(dtype) triton_val = constant_repr(type_(value)) triton_type = triton_compute_type(dtype) if triton_type == "tl.float32": # Float constants are always f32 in triton return triton_val # NOTE: We use a tensor here in order to get the expected type. # Otherwise, e.g. float64 constants would be trunctated to float32. return f"tl.full({shape}, {triton_val}, {triton_type})" @classmethod def constant(cls, value, dtype): return cls._shaped_constant(value, dtype, shape=[]) @staticmethod def abs(x): return f"tl_math.abs({x})" @staticmethod def libdevice_abs(x): return f"libdevice.abs({x})" @staticmethod def exp(x): return f"tl_math.exp({x})" @staticmethod def libdevice_exp(x): return f"libdevice.exp({x})" @staticmethod def exp2(x): return f"libdevice.exp2({x})" @staticmethod def expm1(x): return f"libdevice.expm1({x})" @staticmethod def sqrt(x): if config.triton.codegen_upcast_to_fp32: return f"libdevice.sqrt({x})" else: needs_upcast = x.dtype in (torch.float16, torch.bfloat16) orig_dtype = triton_type(x.dtype) upcast_string = ".to(tl.float32)" if needs_upcast else "" downcast_string = f".to({orig_dtype})" if needs_upcast else "" return f"libdevice.sqrt({x}{upcast_string}){downcast_string}" @staticmethod def libdevice_sqrt(x): return f"libdevice.sqrt({x})" @staticmethod def relu(x): bug = config.triton.inject_relu_bug_TESTING_ONLY if bug == "compile_error": return "compile error!" elif bug == "runtime_error": # NB: this only triggers runtime error as long as input # is not all zero return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' elif bug == "accuracy": return f"{x} + 1" elif bug is None: return ops.maximum(ops.constant(0, torch.int32), x) else: raise AssertionError( f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" ) @staticmethod def minimum(a, b): return f"triton_helpers.minimum({a}, {b})" @staticmethod def maximum(a, b): return f"triton_helpers.maximum({a}, {b})" @staticmethod def where(a, b, c): return f"tl.where({a}, {b}, {c})" @staticmethod def inline_asm_elementwise( *inputs, asm, constraints=None, dtype=torch.float32, is_pure=True, pack=1 ): triton_type = triton_compute_type(dtype) input_refs = ", ".join([str(i) for i in inputs]) if constraints is None: constraints = ", ".join(["=r"] + ["r" for _ in inputs]) return f"tl.inline_asm_elementwise('{asm}', '{constraints}', [{input_refs}], dtype={triton_type}, is_pure={is_pure}, pack={pack})" # noqa: B950 @staticmethod def cos(x): return f"tl_math.cos({x})" @staticmethod def libdevice_cos(x): return f"libdevice.cos({x})" @staticmethod def sin(x): return f"tl_math.sin({x})" @staticmethod def libdevice_sin(x): return f"libdevice.sin({x})" @classmethod def index_expr(cls, expr, dtype): raise NotImplementedError("ops.index_expr not implemented outside a kernel") @staticmethod def masked(mask, body, other): raise NotImplementedError("ops.masked not implemented outside a kernel") @staticmethod def lgamma(x): return f"libdevice.lgamma({x})" @staticmethod def erf(x): return f"libdevice.erf({x})" @staticmethod def cosh(x): return f"libdevice.cosh({x})" @staticmethod def sinh(x): return f"libdevice.sinh({x})" @staticmethod def acos(x): return f"libdevice.acos({x})" @staticmethod def acosh(x): return f"libdevice.acosh({x})" @staticmethod def asin(x): return f"libdevice.asin({x})" @staticmethod def asinh(x): return f"libdevice.asinh({x})" @staticmethod def atan2(x, y): return f"libdevice.atan2({x}, {y})" @staticmethod def atan(x): return f"libdevice.atan({x})" @staticmethod def atanh(x): return f"libdevice.atanh({x})" @staticmethod def copysign(x, y): return f"libdevice.copysign({x}, {y})" @staticmethod def erfc(x): return f"libdevice.erfc({x})" @staticmethod def erfinv(x): return f"libdevice.erfinv({x})" @staticmethod def hypot(x, y): return f"libdevice.hypot({x}, {y})" @staticmethod def log10(x): return f"libdevice.log10({x})" @staticmethod def log2(x): return f"libdevice.log2({x})" @staticmethod def nextafter(x, y): return f"libdevice.nextafter({x}, {y})" @staticmethod def logical_and(a, b): return f"{a} & {b}" @staticmethod def logical_not(a): return f"{a} == 0" @staticmethod def logical_or(a, b): return f"{a} | {b}" @staticmethod def logical_xor(a, b): return f"({a} ^ {b})" @staticmethod def bitwise_and(a, b): return f"{a} & {b}" @staticmethod def bitwise_not(a): return f"~{a}" @staticmethod def bitwise_or(a, b): return f"{a} | {b}" @staticmethod def bitwise_xor(a, b): return f"{a} ^ {b}" @staticmethod def bitwise_left_shift(a, b): return f"{a} << {b}" @staticmethod def bitwise_right_shift(a, b): return f"{a} >> {b}" @staticmethod def rand(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.rand({seed}, {offset})" @staticmethod def randn(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.randn({seed}, {offset})" @staticmethod def randint64(seed, offset, low, high): offset = f"({offset}).to(tl.uint32)" return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" @staticmethod def load_seed(name, offset): raise NotImplementedError("ops.load_seed not implemented outside a kernel") @staticmethod def rsqrt(x): return f"libdevice.rsqrt({x})" @staticmethod def log1p(x): return f"libdevice.log1p({x})" @staticmethod def tan(x): return f"libdevice.tan({x})" @staticmethod def tanh(x): return f"libdevice.tanh({x})" @staticmethod def sigmoid(x): return f"tl.sigmoid({x})" @staticmethod def signbit(x): # XX: This is wrong for the value -0.0 in floating point return ( f"(libdevice.signbit({x}) != 0) if ({x}).dtype is tl.float32 else {x} < 0" ) @staticmethod def fmod(a, b): return f"libdevice.fmod({a}, {b})" @staticmethod def pow(a, b): return f"libdevice.pow({a}, {b})" @staticmethod def log(x): return f"tl_math.log({x})" @staticmethod def libdevice_log(x): return f"libdevice.log({x})" @staticmethod def isinf(x): return f"libdevice.isinf({x}).to(tl.int1)" @staticmethod def isnan(x): return f"libdevice.isnan({x}).to(tl.int1)" @staticmethod def round(x): return f"libdevice.nearbyint({x})" @staticmethod def floor(x): return f"libdevice.floor({x})" @staticmethod def floordiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Similar to div_floor_kernel_cuda in pytorch core. # Notice that // in triton behaves as truncdiv instead of floordiv quot = f"{a} // {b}" rem = f"{a} % {b}" return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" @staticmethod def sign(x): z = ops.constant(0, torch.int32) left = ops.to_dtype((ops.lt(z, x)), torch.int8) right = ops.to_dtype((ops.lt(x, z)), torch.int8) sub = ops.sub(left, right) return f"{sub}.to({x}.dtype)" @staticmethod def trunc(x): return f"libdevice.trunc({x})" @staticmethod def truncdiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Notice that // in triton behaves as truncdiv instead of floordiv return f"{a} // {b}" @staticmethod def ceil(x): return f"libdevice.ceil({x})" TritonOverrides._initialize_pointwise_overrides("triton") # Use mypy to check protocol implemented correctly def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: return h class TritonKernelOverrides(TritonOverrides): """Map element-wise ops to Triton within a TritonKernel Unlike TritonOverrides, these assume the code is going to be inserted into the body of the main triton kernel and so it may use indexing and mask variables which are assumed to already be defined in the current scope. """ @classmethod def constant(cls, value, dtype): # NOTE: Cannot use shape=[] as it's not supported by triton-rocm # We could use shape=[1] instead but starting with the correct # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. ndim = V.kernel.triton_tensor_ndim() shape = [1] * ndim return cls._shaped_constant(value, dtype, shape=shape) @classmethod def index_expr(cls, expr, dtype): indexing = V.kernel.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) # Our sympy expr printing casts to the current kernel index dtype. # we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype index_dtype = torch.int32 if V.kernel.index_dtype == "tl.int32" else torch.int64 dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype var = V.kernel.cse.generate( V.kernel.compute, indexing.index_str, bounds=get_bounds_index_expr(expr), dtype=dtype, ) if dtype not in (torch.int32, torch.int64): var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, dtype), dtype=upcast_compute_type(dtype), ) else: # TODO: we are not always consistent in enforcing that the output of the index expr printing # results in the indexing dtype. So if we detect that we have an input which might type promote # to a dtype other than indexing dtype, add a cast. # Trying to avoid dtype = index_dtype for index_var in expr.free_symbols: if symbol_is_type(index_var, SymT.TMP): dtype = torch.promote_types( dtype, V.kernel.cse.varname_map[index_var.name].dtype ) if dtype != index_dtype: var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, index_dtype), dtype=index_dtype, ) var.mask_vars = indexing.mask_vars return var @staticmethod def masked(mask, body, other): if mask is not None and torch.version.hip is not None: mask = V.kernel.cse.generate( V.kernel.compute, f"{mask}.to(tl.int1)", dtype=torch.bool, ) nodes = body.graph.find_nodes(op="output") assert nodes, "graph for body does not contain an output" need_where = False for node in nodes: for arg in node.args: if arg.target != "load" or should_unwrap_unspec_arg(arg.args[0]): need_where = True value = None if need_where else other with V.kernel.mask_loads(mask, value=value) as new_mask: result = body() if need_where: # Remove once CSEVariables track the dtype if result.bounds.is_bool: other = bool(other) # Take dtype from result to prevent accidental promotion other = V.kernel.cse.generate( V.kernel.compute, f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)", bounds=ValueRanges.wrap(other), dtype=result.dtype, ) ret = ops.where(new_mask, result, other) else: ret = result ret.mask_vars.discard(new_mask) return ret @staticmethod def load_seed(name, offset): var = V.kernel.args.input(name) return ( f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" ) @staticmethod def frexp(x): cache_key = f"frexp({x})" if cse_val := V.kernel.cse.try_get(cache_key): return cse_val mantissa = V.kernel.cse.newvar(dtype=x.dtype) exponent = V.kernel.cse.newvar(dtype=torch.int32) V.kernel.compute.writeline( f"{mantissa}, {exponent} = triton_helpers.frexp({x})" ) V.kernel.cse.put(cache_key, (mantissa, exponent)) return (mantissa, exponent) # Use mypy to check protocol implemented correctly def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: return h class HelperFunctions: """An ordered set of helper functions.""" _templates_seen: Dict[str, str] # Template code to function name finalized_helpers: List[str] def __init__(self) -> None: self._templates_seen = {} self.finalized_helpers = [] def add(self, template_code: str, *, base_name="_triton_helper_fn") -> str: """This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("@triton.jit") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} ) @triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
@triton.jit ") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} )
alexanderb14/pytorch
torch/_inductor/codegen/triton.py
https://github.com/alexanderb14/pytorch/blob/8da4224042665686de22f8e351a0b42bfa42cab8/torch/_inductor/codegen/triton.py
# mypy: allow-untyped-defs from __future__ import annotations import collections import contextlib import dataclasses import functools import itertools import logging import os import re import textwrap from functools import lru_cache from typing import ( Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union, ) import sympy from sympy.printing.precedence import PRECEDENCE import torch import torch._logging from torch._dynamo.utils import identity, preserve_rng_state from torch._prims_common import is_integer_dtype from torch.utils._ordered_set import OrderedSet from torch.utils._sympy.functions import CeilDiv, FloorDiv, ModularIndexing from torch.utils._triton import has_triton_package from ...utils._sympy.symbol import free_symbol_is_type, prefix_str, symbol_is_type, SymT from ...utils._sympy.value_ranges import ValueRanges from .. import config, ir, metrics from ..codecache import code_hash, get_path, PyCodeCache from ..runtime.benchmarking import benchmarker from ..runtime.hints import ( AutotuneHint, DeviceProperties, TRITON_MAX_BLOCK, TRITON_MAX_RSPLIT, ) from ..runtime.runtime_utils import get_max_y_grid, next_power_of_2 from ..runtime.triton_heuristics import ( cooperative_reduction_grid, grid as default_grid_fn, ) from ..scheduler import BaseSchedulerNode, FusedSchedulerNode, Scheduler, SchedulerNode from ..utils import ( DelayReplaceLine, get_bounds_index_expr, get_fused_kernel_name, get_kernel_metadata, is_welford_reduction, Placeholder, sympy_subs, upcast_compute_type, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code from .block_analysis import BlockPatternMatcher from .common import ( BackendFeature, CSE, CSEVariable, DeferredLine, IndentedBuffer, OpOverrides, PythonPrinter, SizeArg, TensorArg, WorkspaceArg, WorkspaceZeroMode, ) from .simd import ( constant_repr, IterationRanges, IterationRangesEntry, IterationRangesRoot, pexpr, prefix_is_reduction, SIMDKernel, SIMDScheduling, ) from .triton_utils import ( config_of, should_unwrap_unspec_arg, signature_of, signature_to_meta, ) if TYPE_CHECKING: from ..ir import IRNode log = logging.getLogger(__name__) perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") @lru_cache(None) def gen_attr_descriptor_import(): """ import AttrsDescriptor if the triton version is new enough to have this class defined. """ if not has_triton_package(): return "" import triton.compiler.compiler # Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler # When support for the legacy AttrsDescriptor is removed then this import path should be changed. if hasattr(triton.compiler.compiler, "AttrsDescriptor"): return "from triton.compiler.compiler import AttrsDescriptor" else: return "" @lru_cache(None) def gen_common_triton_imports(): imports = IndentedBuffer() imports.splice( """ import triton import triton.language as tl """ ) if attr_desc := gen_attr_descriptor_import(): imports.writeline(attr_desc) imports.splice( """ from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties """ ) return imports.getvalue() class TritonSymbols: """ Stores sympy.Symbol instances and constants associated with triton codegen. """ block_offsets = { symt: sympy.Symbol(f"{prefix_str[symt]}offset", integer=True, nonnegative=True) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } block_sizes = { symt: sympy.Symbol( f"{prefix_str[symt].upper()}BLOCK", integer=True, positive=True ) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } @classmethod def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_sizes[tree.symt] @classmethod def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_offsets[tree.symt] @dataclasses.dataclass class IndexingOptions: index_str: str mask_vars: OrderedSet[str] mask_str: str expand_str: Optional[str] _has_rindex: bool index: sympy.Expr def has_mask(self): return bool(self.mask_vars) def has_indirect(self): return free_symbol_is_type(self.index, SymT.TMP) def has_rindex(self): return self._has_rindex def has_tmpmask(self): return "tmp" in self.mask_str def has_rmask(self): return "rmask" in self.mask_str @dataclasses.dataclass class BlockPtrOptions: params: BlockParameters constant_offset: sympy.Expr order: List[int] mask_vars: OrderedSet[str] broadcast_shape: Sequence[sympy.Expr] broadcasting_dims: List[bool] final_shape: Sequence[sympy.Expr] _boundary_check: Optional[List[int]] = None @property def shape(self) -> List[sympy.Expr]: return self.params.shape @property def block_shape(self) -> List[sympy.Expr]: return self.params.block_shape @property def strides(self) -> List[sympy.Expr]: return self.params.strides @property def offsets(self) -> List[sympy.Expr]: return self.params.offsets def codegen_broadcast_and_reshape( self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool, ) -> str: """ Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer. """ # Reshape to add singletons. pre_broadcast_shape = [ sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip( self.broadcast_shape, self.broadcasting_dims ) ] value = triton_reshape(value, initial_shape, pre_broadcast_shape) # Broadcast singletons. # For loads, we can often implicitly broadcast singleton dimensions. # We need an explicit broadcast for stores, or if the final reshape does more # than add singletons. sizevars = V.graph.sizevars require_broadcast = any(self.broadcasting_dims) and ( len(pre_broadcast_shape) != len(final_shape) or any( not ( sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) ) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape) ) ) if not allow_implicit or require_broadcast: value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})" # Reshape to the final shape. value = triton_reshape(value, self.broadcast_shape, final_shape) return value @staticmethod def create( *, params: BlockParameters, constant_offset: sympy.Expr, range_trees: List[IterationRangesEntry], mask_vars: OrderedSet[str], get_max_block: Callable[[str], int], ) -> BlockPtrOptions: """Helper to create a BlockPtrOptions instance""" sizevars = V.graph.sizevars def lookup_size(exprs: Iterable[sympy.Expr]) -> List[sympy.Expr]: return [sizevars.lookup_precomputed_size(expr) for expr in exprs] # Look up precomputed sizes params.shape = lookup_size(params.shape) params.strides = lookup_size(params.strides) # Strip out dimensions of stride 0. # These will be restored with tl.broadcast_to. broadcasting_dims = [ sizevars.statically_known_equals(stride, 0) for stride in params.strides ] # Strip out dimensions of size 1. # These will be restored by tl.reshape. singleton_dims = [ sizevars.statically_known_equals(dim, 1) for dim in params.block_shape ] if all(singleton_dims): # Handle a pure singletons, e.g. [1, 1] singleton_dims[-1] = False # Record the post-broadcast shape before broadcasting dims are removed. # The pre-broadcast shape is identical to this, except broadcasting dims are # replaced with 1. broadcast_shape = [ dim for dim, is_singleton in zip(params.block_shape, singleton_dims) if not is_singleton ] # Combine all removable dims. removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)] def remove_dims(it): """Removes any broadcasting or singleton dims from a given sequence""" return [ item for item, is_removable in zip(it, removable_dims) if not is_removable ] # Drop removable dimensions from the input. params = BlockParameters( **{key: remove_dims(val) for key, val in dataclasses.asdict(params).items()} ) # Compute the final shape, adjusting for special kernel types. final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees] if V.kernel.no_x_dim: assert range_trees[0].prefix == "x" final_shape.pop(0) if ( not V.kernel.inside_reduction and len(params.strides) == len(V.kernel.numels) - 1 and V.kernel.numels["r"] != 1 ): # Need to expand rank by 1 to match rank when self.inside_reduction=True final_shape.append(sympy.S.One) result = BlockPtrOptions( params=params, constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), order=list(reversed(range(len(params.shape)))), mask_vars=mask_vars, final_shape=final_shape, broadcast_shape=broadcast_shape, broadcasting_dims=broadcasting_dims, ) result.compute_boundary_check(get_max_block) return result def replace_roffset(self, expr: sympy.Expr, replacement: sympy.Expr) -> sympy.Expr: """ Replaces instances of roffset with the new expression. """ roffset = TritonSymbols.block_offsets[SymT.RINDEX] return sympy_subs(expr, {roffset: replacement}) def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should roffset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.replace_roffset(offset, sympy.S.Zero) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})" def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None: """List of indices to pass to tl.load(boundary_check=...)""" sizevars = V.graph.sizevars # Substitute maximum block sizes in shape expressions. # This works in multiple_of checks because block sizes are powers of 2. block_to_max: Dict[sympy.Expr, Any] = { block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items() } self._boundary_check = [ idx for idx in range(len(self.shape)) if ( not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and not sizevars.statically_known_multiple_of( self.shape[idx], self.block_shape[idx] ) and not sizevars.statically_known_multiple_of( self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max) ) and not ( V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK] ) ) ] def boundary_check(self): assert self._boundary_check is not None return self._boundary_check def advance_roffset(self): """ Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace roffset with multiples of RBLOCK. Since we expect roffset to vary in range(0, rnumel, RBLOCK), the first iteration has roffset=0, while the second has roffset=RBLOCK. """ rblock = TritonSymbols.block_sizes[SymT.RINDEX] advance = [ ( self.replace_roffset(offset, rblock) - self.replace_roffset(offset, sympy.S.Zero) ) for offset in self.offsets ] return V.kernel.index_to_str(advance) def has_indirect(self): return False # block_ptr can't do indirect indexing def has_rindex(self) -> bool: return any(free_symbol_is_type(expr, SymT.RINDEX) for expr in self.block_shape) def has_rmask(self): return self.has_rindex() def has_tmpmask(self): return False # block_ptr can't do indirect indexing def has_mask(self): return bool(self.boundary_check()) def triton_reshape( value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr] ): """Workaround https://github.com/openai/triton/issues/2836""" assert isinstance(old_shape, list) and isinstance(new_shape, list) old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape] new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape] if old_shape_str == new_shape_str: return value if [s for s in new_shape_str if s != "1"] != old_shape_str: return f"tl.reshape({value}, [{', '.join(new_shape_str)}])" # rewrite to [:, None] syntax, which is less buggy idx = 0 expand = [] for size in new_shape_str: if idx < len(old_shape_str) and size == old_shape_str[idx]: expand.append(":") idx += 1 else: assert size == "1" expand.append("None") assert idx == len(old_shape_str) return f"{value}[{', '.join(expand)}]" # NB: Inheriting from PythonPrinter is somewhat dangerous, because there are a # number of operators which Triton "implements", but in a way that is # inconsistent with Python semantics (and consistent with C semantics). We # must override all of these, or it is potential silent correctness problem class TritonPrinter(PythonPrinter): def _print_TruncToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_Float(self, expr): if config.is_fbcode() and torch.version.hip: ret = f"{expr}" else: ret = f"tl.full([], {expr}, tl.float64)" return ret def _print_ToFloat(self, expr): assert len(expr.args) == 1 s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5) return f"{s}.to(tl.float64)" def _print_PythonMod(self, expr): quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.remainder_integer({quot_s}, {div_s})" def _print_FloorDiv(self, expr): assert expr.is_integer quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.div_floor_integer({quot_s}, {div_s})" # TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher # precision algorithm, which we would need to replicate here def _print_IntTrueDiv(self, expr): return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5) # NB: sympy.floor/ceiling produce integers, so we have to do the # conversion to index dtype def _print_floor(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_FloorToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_ceiling(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _print_CeilToInt(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _helper_sqrt(self, expr): return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" def _print_FloatPow(self, expr): return ( f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})" ) _print_PowByNatural = _print_FloatPow def _print_Where(self, expr): c = self.doprint(expr.args[0]) p = self.doprint(expr.args[1]) q = self.doprint(expr.args[2]) return f"tl.where({c}, {p}, {q})" def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str: """ Helper for max/min code genereration. cmp: > or < """ nargs = len(expr.args) if len(expr.args) == 1: return self._print(expr.args[0]) mid = len(expr.args) // 2 cls = type(expr) a = self._print(cls(*expr.args[:mid])) b = self._print(cls(*expr.args[mid:])) # Use a macro so we can propagate constexprs. # https://github.com/triton-lang/triton/issues/3815 a, b = tuple(f"({x})" for x in (a, b)) assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'" return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))" def _print_Min(self, expr): return self._print_min_max_helper(expr, "<") def _print_Max(self, expr): return self._print_min_max_helper(expr, ">") def _print_Abs(self, expr): assert len(expr.args) == 1 return f"tl_math.abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr): assert len(expr.args) == 1 return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_cosh(self, expr): assert len(expr.args) == 1 return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_acos(self, expr): assert len(expr.args) == 1 return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sin(self, expr): assert len(expr.args) == 1 return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sinh(self, expr): assert len(expr.args) == 1 return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_asin(self, expr): assert len(expr.args) == 1 return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tan(self, expr): assert len(expr.args) == 1 return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tanh(self, expr): assert len(expr.args) == 1 return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_atan(self, expr): assert len(expr.args) == 1 return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" def _print_RoundToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_RoundDecimal(self, expr): assert len(expr.args) == 2 number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function assert ndigits < 0 raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}" texpr = TritonPrinter().doprint # correct cases where Triton types names don't match PyTorch _triton_type_mapping = { "tl.bool": "tl.int1", "tl.float8_e4m3fn": "tl.float8e4nv", "tl.float8_e5m2": "tl.float8e5", "tl.float8_e4m3fnuz": "tl.float8e4b8", "tl.float8_e5m2fnuz": "tl.float8e5b16", } _triton_type_re = re.compile(r"^.*[.]") def triton_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type""" triton_type_name = _triton_type_re.sub("tl.", str(dtype)) return _triton_type_mapping.get(triton_type_name, triton_type_name) def triton_compute_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type and upcast [b]float16 to float32""" return triton_type(upcast_compute_type(dtype)) def _get_primitive_bitwidth(dtype: torch.dtype) -> int: """Number of bits of triton_compute_type()""" dtype = upcast_compute_type(dtype) itemsize = getattr(dtype, "itemsize", None) if itemsize: return itemsize * 8 else: return -1 def triton_store_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with fix for storing tl.bool""" if dtype == torch.bool: dtype = torch.int8 return triton_type(dtype) def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype: """Implicit upcasts used for Triton reduction types""" if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4: return torch.int32 return upcast_compute_type(dtype) def triton_acc_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with reduction upcasts""" return triton_compute_type(upcast_acc_dtype(dtype)) class TritonCSEVariable(CSEVariable): def __init__(self, name, bounds: ValueRanges[Any], dtype: torch.dtype) -> None: super().__init__(name, bounds, dtype) # We'll use this to track which masks the variable needs when used for indirect indexing self.mask_vars: OrderedSet[str] = OrderedSet() assert dtype is not None, "TritonCSEVariable must have dtype" def update_on_args(self, name, args, kwargs): for arg in args: if isinstance(arg, TritonCSEVariable): self.mask_vars.update(arg.mask_vars) elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": # most of the time index vars don't need masks associated with them # however, when index vars are used to compute indices for indirect reads # those reads should subsequently be masked, self.mask_vars.update({f"{arg.name[0]}mask"}) class TritonOverrides(OpOverrides): """Map element-wise ops to Triton""" @staticmethod def to_dtype( x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None, use_compute_types=True, ): def _get_min_elements_per_thread( src_dtype: torch.dtype, dst_dtype: torch.dtype ) -> int: if src_dtype == dst_dtype: # No data type conversion is needed. No requirements on min_elem_per_thread. return 0 # fp8 data type conversions has min_elem_per_thread requirements. # Refer to Triton implementations here: # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. fp8_dtypes = ( torch.float8_e4m3fn, torch.float8_e5m2, ) # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. assert not ( src_dtype in fp8_dtypes and dst_dtype in fp8_dtypes and src_dtype != dst_dtype ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: return 4 if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: return 2 # No requirements on min_elem_per_thread. return 0 if src_dtype is not None: # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions # in the same kernel. V.kernel.min_elem_per_thread = max( _get_min_elements_per_thread(src_dtype, dtype), V.kernel.min_elem_per_thread, ) if dtype == torch.bool: return f"({x} != 0)" elif dtype == torch.uint8: # to work around llvm uint conversion semantics # that produces 0's for negative values return f"{x}.to(tl.int8).to(tl.uint8)" if use_compute_types: out_dtype = triton_compute_type(dtype) else: out_dtype = triton_store_type(dtype) return f"{x}.to({out_dtype})" @staticmethod def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): triton_dtype = triton_compute_type(dtype) # We may promote float16 or bfloat16 to float32 and cause the # bitwidth of dtype to be different from the input tensor (i.e. float32). # In such as case, we will have to convert the input tensor to # its src_type, perform bitcast, and then convert the bit-casted # tensor back to float to ensure we use values with the right precision. if ( src_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): triton_src_dtype = str(src_dtype).split(".")[-1] cast_x = f"{x}.to(tl.{triton_src_dtype})" if dtype in (torch.float16, torch.bfloat16): triton_type_name = str(dtype).split(".")[-1] triton_dtype = f"tl.{triton_type_name}" cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" if dtype in (torch.float16, torch.bfloat16): return f"{cast_x}.to(tl.float32)" return cast_x else: src_dtype_bitwidth = _get_primitive_bitwidth(src_dtype) target_dtype_bitwidth = _get_primitive_bitwidth(dtype) bitcast = "True" if src_dtype_bitwidth == target_dtype_bitwidth else "False" return f"{x}.to({triton_dtype}, bitcast={bitcast})" @staticmethod def _shaped_constant(value, dtype, shape): type_ = torch._prims_common.dtype_to_type(dtype) triton_val = constant_repr(type_(value)) triton_type = triton_compute_type(dtype) if triton_type == "tl.float32": # Float constants are always f32 in triton return triton_val # NOTE: We use a tensor here in order to get the expected type. # Otherwise, e.g. float64 constants would be trunctated to float32. return f"tl.full({shape}, {triton_val}, {triton_type})" @classmethod def constant(cls, value, dtype): return cls._shaped_constant(value, dtype, shape=[]) @staticmethod def abs(x): return f"tl_math.abs({x})" @staticmethod def libdevice_abs(x): return f"libdevice.abs({x})" @staticmethod def exp(x): return f"tl_math.exp({x})" @staticmethod def libdevice_exp(x): return f"libdevice.exp({x})" @staticmethod def exp2(x): return f"libdevice.exp2({x})" @staticmethod def expm1(x): return f"libdevice.expm1({x})" @staticmethod def sqrt(x): if config.triton.codegen_upcast_to_fp32: return f"libdevice.sqrt({x})" else: needs_upcast = x.dtype in (torch.float16, torch.bfloat16) orig_dtype = triton_type(x.dtype) upcast_string = ".to(tl.float32)" if needs_upcast else "" downcast_string = f".to({orig_dtype})" if needs_upcast else "" return f"libdevice.sqrt({x}{upcast_string}){downcast_string}" @staticmethod def libdevice_sqrt(x): return f"libdevice.sqrt({x})" @staticmethod def relu(x): bug = config.triton.inject_relu_bug_TESTING_ONLY if bug == "compile_error": return "compile error!" elif bug == "runtime_error": # NB: this only triggers runtime error as long as input # is not all zero return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' elif bug == "accuracy": return f"{x} + 1" elif bug is None: return ops.maximum(ops.constant(0, torch.int32), x) else: raise AssertionError( f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" ) @staticmethod def minimum(a, b): return f"triton_helpers.minimum({a}, {b})" @staticmethod def maximum(a, b): return f"triton_helpers.maximum({a}, {b})" @staticmethod def where(a, b, c): return f"tl.where({a}, {b}, {c})" @staticmethod def inline_asm_elementwise( *inputs, asm, constraints=None, dtype=torch.float32, is_pure=True, pack=1 ): triton_type = triton_compute_type(dtype) input_refs = ", ".join([str(i) for i in inputs]) if constraints is None: constraints = ", ".join(["=r"] + ["r" for _ in inputs]) return f"tl.inline_asm_elementwise('{asm}', '{constraints}', [{input_refs}], dtype={triton_type}, is_pure={is_pure}, pack={pack})" # noqa: B950 @staticmethod def cos(x): return f"tl_math.cos({x})" @staticmethod def libdevice_cos(x): return f"libdevice.cos({x})" @staticmethod def sin(x): return f"tl_math.sin({x})" @staticmethod def libdevice_sin(x): return f"libdevice.sin({x})" @classmethod def index_expr(cls, expr, dtype): raise NotImplementedError("ops.index_expr not implemented outside a kernel") @staticmethod def masked(mask, body, other): raise NotImplementedError("ops.masked not implemented outside a kernel") @staticmethod def lgamma(x): return f"libdevice.lgamma({x})" @staticmethod def erf(x): return f"libdevice.erf({x})" @staticmethod def cosh(x): return f"libdevice.cosh({x})" @staticmethod def sinh(x): return f"libdevice.sinh({x})" @staticmethod def acos(x): return f"libdevice.acos({x})" @staticmethod def acosh(x): return f"libdevice.acosh({x})" @staticmethod def asin(x): return f"libdevice.asin({x})" @staticmethod def asinh(x): return f"libdevice.asinh({x})" @staticmethod def atan2(x, y): return f"libdevice.atan2({x}, {y})" @staticmethod def atan(x): return f"libdevice.atan({x})" @staticmethod def atanh(x): return f"libdevice.atanh({x})" @staticmethod def copysign(x, y): return f"libdevice.copysign({x}, {y})" @staticmethod def erfc(x): return f"libdevice.erfc({x})" @staticmethod def erfinv(x): return f"libdevice.erfinv({x})" @staticmethod def hypot(x, y): return f"libdevice.hypot({x}, {y})" @staticmethod def log10(x): return f"libdevice.log10({x})" @staticmethod def log2(x): return f"libdevice.log2({x})" @staticmethod def nextafter(x, y): return f"libdevice.nextafter({x}, {y})" @staticmethod def logical_and(a, b): return f"{a} & {b}" @staticmethod def logical_not(a): return f"{a} == 0" @staticmethod def logical_or(a, b): return f"{a} | {b}" @staticmethod def logical_xor(a, b): return f"({a} ^ {b})" @staticmethod def bitwise_and(a, b): return f"{a} & {b}" @staticmethod def bitwise_not(a): return f"~{a}" @staticmethod def bitwise_or(a, b): return f"{a} | {b}" @staticmethod def bitwise_xor(a, b): return f"{a} ^ {b}" @staticmethod def bitwise_left_shift(a, b): return f"{a} << {b}" @staticmethod def bitwise_right_shift(a, b): return f"{a} >> {b}" @staticmethod def rand(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.rand({seed}, {offset})" @staticmethod def randn(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.randn({seed}, {offset})" @staticmethod def randint64(seed, offset, low, high): offset = f"({offset}).to(tl.uint32)" return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" @staticmethod def load_seed(name, offset): raise NotImplementedError("ops.load_seed not implemented outside a kernel") @staticmethod def rsqrt(x): return f"libdevice.rsqrt({x})" @staticmethod def log1p(x): return f"libdevice.log1p({x})" @staticmethod def tan(x): return f"libdevice.tan({x})" @staticmethod def tanh(x): return f"libdevice.tanh({x})" @staticmethod def sigmoid(x): return f"tl.sigmoid({x})" @staticmethod def signbit(x): # XX: This is wrong for the value -0.0 in floating point return ( f"(libdevice.signbit({x}) != 0) if ({x}).dtype is tl.float32 else {x} < 0" ) @staticmethod def fmod(a, b): return f"libdevice.fmod({a}, {b})" @staticmethod def pow(a, b): return f"libdevice.pow({a}, {b})" @staticmethod def log(x): return f"tl_math.log({x})" @staticmethod def libdevice_log(x): return f"libdevice.log({x})" @staticmethod def isinf(x): return f"libdevice.isinf({x}).to(tl.int1)" @staticmethod def isnan(x): return f"libdevice.isnan({x}).to(tl.int1)" @staticmethod def round(x): return f"libdevice.nearbyint({x})" @staticmethod def floor(x): return f"libdevice.floor({x})" @staticmethod def floordiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Similar to div_floor_kernel_cuda in pytorch core. # Notice that // in triton behaves as truncdiv instead of floordiv quot = f"{a} // {b}" rem = f"{a} % {b}" return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" @staticmethod def sign(x): z = ops.constant(0, torch.int32) left = ops.to_dtype((ops.lt(z, x)), torch.int8) right = ops.to_dtype((ops.lt(x, z)), torch.int8) sub = ops.sub(left, right) return f"{sub}.to({x}.dtype)" @staticmethod def trunc(x): return f"libdevice.trunc({x})" @staticmethod def truncdiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Notice that // in triton behaves as truncdiv instead of floordiv return f"{a} // {b}" @staticmethod def ceil(x): return f"libdevice.ceil({x})" TritonOverrides._initialize_pointwise_overrides("triton") # Use mypy to check protocol implemented correctly def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: return h class TritonKernelOverrides(TritonOverrides): """Map element-wise ops to Triton within a TritonKernel Unlike TritonOverrides, these assume the code is going to be inserted into the body of the main triton kernel and so it may use indexing and mask variables which are assumed to already be defined in the current scope. """ @classmethod def constant(cls, value, dtype): # NOTE: Cannot use shape=[] as it's not supported by triton-rocm # We could use shape=[1] instead but starting with the correct # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. ndim = V.kernel.triton_tensor_ndim() shape = [1] * ndim return cls._shaped_constant(value, dtype, shape=shape) @classmethod def index_expr(cls, expr, dtype): indexing = V.kernel.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) # Our sympy expr printing casts to the current kernel index dtype. # we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype index_dtype = torch.int32 if V.kernel.index_dtype == "tl.int32" else torch.int64 dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype var = V.kernel.cse.generate( V.kernel.compute, indexing.index_str, bounds=get_bounds_index_expr(expr), dtype=dtype, ) if dtype not in (torch.int32, torch.int64): var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, dtype), dtype=upcast_compute_type(dtype), ) else: # TODO: we are not always consistent in enforcing that the output of the index expr printing # results in the indexing dtype. So if we detect that we have an input which might type promote # to a dtype other than indexing dtype, add a cast. # Trying to avoid dtype = index_dtype for index_var in expr.free_symbols: if symbol_is_type(index_var, SymT.TMP): dtype = torch.promote_types( dtype, V.kernel.cse.varname_map[index_var.name].dtype ) if dtype != index_dtype: var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, index_dtype), dtype=index_dtype, ) var.mask_vars = indexing.mask_vars return var @staticmethod def masked(mask, body, other): if mask is not None and torch.version.hip is not None: mask = V.kernel.cse.generate( V.kernel.compute, f"{mask}.to(tl.int1)", dtype=torch.bool, ) nodes = body.graph.find_nodes(op="output") assert nodes, "graph for body does not contain an output" need_where = False for node in nodes: for arg in node.args: if arg.target != "load" or should_unwrap_unspec_arg(arg.args[0]): need_where = True value = None if need_where else other with V.kernel.mask_loads(mask, value=value) as new_mask: result = body() if need_where: # Remove once CSEVariables track the dtype if result.bounds.is_bool: other = bool(other) # Take dtype from result to prevent accidental promotion other = V.kernel.cse.generate( V.kernel.compute, f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)", bounds=ValueRanges.wrap(other), dtype=result.dtype, ) ret = ops.where(new_mask, result, other) else: ret = result ret.mask_vars.discard(new_mask) return ret @staticmethod def load_seed(name, offset): var = V.kernel.args.input(name) return ( f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" ) @staticmethod def frexp(x): cache_key = f"frexp({x})" if cse_val := V.kernel.cse.try_get(cache_key): return cse_val mantissa = V.kernel.cse.newvar(dtype=x.dtype) exponent = V.kernel.cse.newvar(dtype=torch.int32) V.kernel.compute.writeline( f"{mantissa}, {exponent} = triton_helpers.frexp({x})" ) V.kernel.cse.put(cache_key, (mantissa, exponent)) return (mantissa, exponent) # Use mypy to check protocol implemented correctly def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: return h class HelperFunctions: """An ordered set of helper functions.""" _templates_seen: Dict[str, str] # Template code to function name finalized_helpers: List[str] def __init__(self) -> None: self._templates_seen = {} self.finalized_helpers = [] def add(self, template_code: str, *, base_name="_triton_helper_fn") -> str: """This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("@triton.jit") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} ) @triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
@triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} )
alexanderb14/pytorch
torch/_inductor/codegen/triton.py
https://github.com/alexanderb14/pytorch/blob/8da4224042665686de22f8e351a0b42bfa42cab8/torch/_inductor/codegen/triton.py
# mypy: allow-untyped-defs from __future__ import annotations import collections import contextlib import dataclasses import functools import itertools import logging import os import re import textwrap from functools import lru_cache from typing import ( Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union, ) import sympy from sympy.printing.precedence import PRECEDENCE import torch import torch._logging from torch._dynamo.utils import identity, preserve_rng_state from torch._prims_common import is_integer_dtype from torch.utils._ordered_set import OrderedSet from torch.utils._sympy.functions import CeilDiv, FloorDiv, ModularIndexing from torch.utils._triton import has_triton_package from ...utils._sympy.symbol import free_symbol_is_type, prefix_str, symbol_is_type, SymT from ...utils._sympy.value_ranges import ValueRanges from .. import config, ir, metrics from ..codecache import code_hash, get_path, PyCodeCache from ..runtime.benchmarking import benchmarker from ..runtime.hints import ( AutotuneHint, DeviceProperties, TRITON_MAX_BLOCK, TRITON_MAX_RSPLIT, ) from ..runtime.runtime_utils import get_max_y_grid, next_power_of_2 from ..runtime.triton_heuristics import ( cooperative_reduction_grid, grid as default_grid_fn, ) from ..scheduler import BaseSchedulerNode, FusedSchedulerNode, Scheduler, SchedulerNode from ..utils import ( DelayReplaceLine, get_bounds_index_expr, get_fused_kernel_name, get_kernel_metadata, is_welford_reduction, Placeholder, sympy_subs, upcast_compute_type, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code from .block_analysis import BlockPatternMatcher from .common import ( BackendFeature, CSE, CSEVariable, DeferredLine, IndentedBuffer, OpOverrides, PythonPrinter, SizeArg, TensorArg, WorkspaceArg, WorkspaceZeroMode, ) from .simd import ( constant_repr, IterationRanges, IterationRangesEntry, IterationRangesRoot, pexpr, prefix_is_reduction, SIMDKernel, SIMDScheduling, ) from .triton_utils import ( config_of, should_unwrap_unspec_arg, signature_of, signature_to_meta, ) if TYPE_CHECKING: from ..ir import IRNode log = logging.getLogger(__name__) perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") @lru_cache(None) def gen_attr_descriptor_import(): """ import AttrsDescriptor if the triton version is new enough to have this class defined. """ if not has_triton_package(): return "" import triton.compiler.compiler # Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler # When support for the legacy AttrsDescriptor is removed then this import path should be changed. if hasattr(triton.compiler.compiler, "AttrsDescriptor"): return "from triton.compiler.compiler import AttrsDescriptor" else: return "" @lru_cache(None) def gen_common_triton_imports(): imports = IndentedBuffer() imports.splice( """ import triton import triton.language as tl """ ) if attr_desc := gen_attr_descriptor_import(): imports.writeline(attr_desc) imports.splice( """ from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties """ ) return imports.getvalue() class TritonSymbols: """ Stores sympy.Symbol instances and constants associated with triton codegen. """ block_offsets = { symt: sympy.Symbol(f"{prefix_str[symt]}offset", integer=True, nonnegative=True) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } block_sizes = { symt: sympy.Symbol( f"{prefix_str[symt].upper()}BLOCK", integer=True, positive=True ) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } @classmethod def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_sizes[tree.symt] @classmethod def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_offsets[tree.symt] @dataclasses.dataclass class IndexingOptions: index_str: str mask_vars: OrderedSet[str] mask_str: str expand_str: Optional[str] _has_rindex: bool index: sympy.Expr def has_mask(self): return bool(self.mask_vars) def has_indirect(self): return free_symbol_is_type(self.index, SymT.TMP) def has_rindex(self): return self._has_rindex def has_tmpmask(self): return "tmp" in self.mask_str def has_rmask(self): return "rmask" in self.mask_str @dataclasses.dataclass class BlockPtrOptions: params: BlockParameters constant_offset: sympy.Expr order: List[int] mask_vars: OrderedSet[str] broadcast_shape: Sequence[sympy.Expr] broadcasting_dims: List[bool] final_shape: Sequence[sympy.Expr] _boundary_check: Optional[List[int]] = None @property def shape(self) -> List[sympy.Expr]: return self.params.shape @property def block_shape(self) -> List[sympy.Expr]: return self.params.block_shape @property def strides(self) -> List[sympy.Expr]: return self.params.strides @property def offsets(self) -> List[sympy.Expr]: return self.params.offsets def codegen_broadcast_and_reshape( self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool, ) -> str: """ Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer. """ # Reshape to add singletons. pre_broadcast_shape = [ sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip( self.broadcast_shape, self.broadcasting_dims ) ] value = triton_reshape(value, initial_shape, pre_broadcast_shape) # Broadcast singletons. # For loads, we can often implicitly broadcast singleton dimensions. # We need an explicit broadcast for stores, or if the final reshape does more # than add singletons. sizevars = V.graph.sizevars require_broadcast = any(self.broadcasting_dims) and ( len(pre_broadcast_shape) != len(final_shape) or any( not ( sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) ) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape) ) ) if not allow_implicit or require_broadcast: value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})" # Reshape to the final shape. value = triton_reshape(value, self.broadcast_shape, final_shape) return value @staticmethod def create( *, params: BlockParameters, constant_offset: sympy.Expr, range_trees: List[IterationRangesEntry], mask_vars: OrderedSet[str], get_max_block: Callable[[str], int], ) -> BlockPtrOptions: """Helper to create a BlockPtrOptions instance""" sizevars = V.graph.sizevars def lookup_size(exprs: Iterable[sympy.Expr]) -> List[sympy.Expr]: return [sizevars.lookup_precomputed_size(expr) for expr in exprs] # Look up precomputed sizes params.shape = lookup_size(params.shape) params.strides = lookup_size(params.strides) # Strip out dimensions of stride 0. # These will be restored with tl.broadcast_to. broadcasting_dims = [ sizevars.statically_known_equals(stride, 0) for stride in params.strides ] # Strip out dimensions of size 1. # These will be restored by tl.reshape. singleton_dims = [ sizevars.statically_known_equals(dim, 1) for dim in params.block_shape ] if all(singleton_dims): # Handle a pure singletons, e.g. [1, 1] singleton_dims[-1] = False # Record the post-broadcast shape before broadcasting dims are removed. # The pre-broadcast shape is identical to this, except broadcasting dims are # replaced with 1. broadcast_shape = [ dim for dim, is_singleton in zip(params.block_shape, singleton_dims) if not is_singleton ] # Combine all removable dims. removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)] def remove_dims(it): """Removes any broadcasting or singleton dims from a given sequence""" return [ item for item, is_removable in zip(it, removable_dims) if not is_removable ] # Drop removable dimensions from the input. params = BlockParameters( **{key: remove_dims(val) for key, val in dataclasses.asdict(params).items()} ) # Compute the final shape, adjusting for special kernel types. final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees] if V.kernel.no_x_dim: assert range_trees[0].prefix == "x" final_shape.pop(0) if ( not V.kernel.inside_reduction and len(params.strides) == len(V.kernel.numels) - 1 and V.kernel.numels["r"] != 1 ): # Need to expand rank by 1 to match rank when self.inside_reduction=True final_shape.append(sympy.S.One) result = BlockPtrOptions( params=params, constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), order=list(reversed(range(len(params.shape)))), mask_vars=mask_vars, final_shape=final_shape, broadcast_shape=broadcast_shape, broadcasting_dims=broadcasting_dims, ) result.compute_boundary_check(get_max_block) return result def replace_roffset(self, expr: sympy.Expr, replacement: sympy.Expr) -> sympy.Expr: """ Replaces instances of roffset with the new expression. """ roffset = TritonSymbols.block_offsets[SymT.RINDEX] return sympy_subs(expr, {roffset: replacement}) def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should roffset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.replace_roffset(offset, sympy.S.Zero) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})" def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None: """List of indices to pass to tl.load(boundary_check=...)""" sizevars = V.graph.sizevars # Substitute maximum block sizes in shape expressions. # This works in multiple_of checks because block sizes are powers of 2. block_to_max: Dict[sympy.Expr, Any] = { block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items() } self._boundary_check = [ idx for idx in range(len(self.shape)) if ( not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and not sizevars.statically_known_multiple_of( self.shape[idx], self.block_shape[idx] ) and not sizevars.statically_known_multiple_of( self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max) ) and not ( V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK] ) ) ] def boundary_check(self): assert self._boundary_check is not None return self._boundary_check def advance_roffset(self): """ Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace roffset with multiples of RBLOCK. Since we expect roffset to vary in range(0, rnumel, RBLOCK), the first iteration has roffset=0, while the second has roffset=RBLOCK. """ rblock = TritonSymbols.block_sizes[SymT.RINDEX] advance = [ ( self.replace_roffset(offset, rblock) - self.replace_roffset(offset, sympy.S.Zero) ) for offset in self.offsets ] return V.kernel.index_to_str(advance) def has_indirect(self): return False # block_ptr can't do indirect indexing def has_rindex(self) -> bool: return any(free_symbol_is_type(expr, SymT.RINDEX) for expr in self.block_shape) def has_rmask(self): return self.has_rindex() def has_tmpmask(self): return False # block_ptr can't do indirect indexing def has_mask(self): return bool(self.boundary_check()) def triton_reshape( value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr] ): """Workaround https://github.com/openai/triton/issues/2836""" assert isinstance(old_shape, list) and isinstance(new_shape, list) old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape] new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape] if old_shape_str == new_shape_str: return value if [s for s in new_shape_str if s != "1"] != old_shape_str: return f"tl.reshape({value}, [{', '.join(new_shape_str)}])" # rewrite to [:, None] syntax, which is less buggy idx = 0 expand = [] for size in new_shape_str: if idx < len(old_shape_str) and size == old_shape_str[idx]: expand.append(":") idx += 1 else: assert size == "1" expand.append("None") assert idx == len(old_shape_str) return f"{value}[{', '.join(expand)}]" # NB: Inheriting from PythonPrinter is somewhat dangerous, because there are a # number of operators which Triton "implements", but in a way that is # inconsistent with Python semantics (and consistent with C semantics). We # must override all of these, or it is potential silent correctness problem class TritonPrinter(PythonPrinter): def _print_TruncToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_Float(self, expr): if config.is_fbcode() and torch.version.hip: ret = f"{expr}" else: ret = f"tl.full([], {expr}, tl.float64)" return ret def _print_ToFloat(self, expr): assert len(expr.args) == 1 s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5) return f"{s}.to(tl.float64)" def _print_PythonMod(self, expr): quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.remainder_integer({quot_s}, {div_s})" def _print_FloorDiv(self, expr): assert expr.is_integer quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.div_floor_integer({quot_s}, {div_s})" # TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher # precision algorithm, which we would need to replicate here def _print_IntTrueDiv(self, expr): return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5) # NB: sympy.floor/ceiling produce integers, so we have to do the # conversion to index dtype def _print_floor(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_FloorToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_ceiling(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _print_CeilToInt(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _helper_sqrt(self, expr): return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" def _print_FloatPow(self, expr): return ( f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})" ) _print_PowByNatural = _print_FloatPow def _print_Where(self, expr): c = self.doprint(expr.args[0]) p = self.doprint(expr.args[1]) q = self.doprint(expr.args[2]) return f"tl.where({c}, {p}, {q})" def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str: """ Helper for max/min code genereration. cmp: > or < """ nargs = len(expr.args) if len(expr.args) == 1: return self._print(expr.args[0]) mid = len(expr.args) // 2 cls = type(expr) a = self._print(cls(*expr.args[:mid])) b = self._print(cls(*expr.args[mid:])) # Use a macro so we can propagate constexprs. # https://github.com/triton-lang/triton/issues/3815 a, b = tuple(f"({x})" for x in (a, b)) assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'" return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))" def _print_Min(self, expr): return self._print_min_max_helper(expr, "<") def _print_Max(self, expr): return self._print_min_max_helper(expr, ">") def _print_Abs(self, expr): assert len(expr.args) == 1 return f"tl_math.abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr): assert len(expr.args) == 1 return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_cosh(self, expr): assert len(expr.args) == 1 return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_acos(self, expr): assert len(expr.args) == 1 return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sin(self, expr): assert len(expr.args) == 1 return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sinh(self, expr): assert len(expr.args) == 1 return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_asin(self, expr): assert len(expr.args) == 1 return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tan(self, expr): assert len(expr.args) == 1 return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tanh(self, expr): assert len(expr.args) == 1 return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_atan(self, expr): assert len(expr.args) == 1 return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" def _print_RoundToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_RoundDecimal(self, expr): assert len(expr.args) == 2 number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function assert ndigits < 0 raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}" texpr = TritonPrinter().doprint # correct cases where Triton types names don't match PyTorch _triton_type_mapping = { "tl.bool": "tl.int1", "tl.float8_e4m3fn": "tl.float8e4nv", "tl.float8_e5m2": "tl.float8e5", "tl.float8_e4m3fnuz": "tl.float8e4b8", "tl.float8_e5m2fnuz": "tl.float8e5b16", } _triton_type_re = re.compile(r"^.*[.]") def triton_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type""" triton_type_name = _triton_type_re.sub("tl.", str(dtype)) return _triton_type_mapping.get(triton_type_name, triton_type_name) def triton_compute_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type and upcast [b]float16 to float32""" return triton_type(upcast_compute_type(dtype)) def _get_primitive_bitwidth(dtype: torch.dtype) -> int: """Number of bits of triton_compute_type()""" dtype = upcast_compute_type(dtype) itemsize = getattr(dtype, "itemsize", None) if itemsize: return itemsize * 8 else: return -1 def triton_store_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with fix for storing tl.bool""" if dtype == torch.bool: dtype = torch.int8 return triton_type(dtype) def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype: """Implicit upcasts used for Triton reduction types""" if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4: return torch.int32 return upcast_compute_type(dtype) def triton_acc_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with reduction upcasts""" return triton_compute_type(upcast_acc_dtype(dtype)) class TritonCSEVariable(CSEVariable): def __init__(self, name, bounds: ValueRanges[Any], dtype: torch.dtype) -> None: super().__init__(name, bounds, dtype) # We'll use this to track which masks the variable needs when used for indirect indexing self.mask_vars: OrderedSet[str] = OrderedSet() assert dtype is not None, "TritonCSEVariable must have dtype" def update_on_args(self, name, args, kwargs): for arg in args: if isinstance(arg, TritonCSEVariable): self.mask_vars.update(arg.mask_vars) elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": # most of the time index vars don't need masks associated with them # however, when index vars are used to compute indices for indirect reads # those reads should subsequently be masked, self.mask_vars.update({f"{arg.name[0]}mask"}) class TritonOverrides(OpOverrides): """Map element-wise ops to Triton""" @staticmethod def to_dtype( x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None, use_compute_types=True, ): def _get_min_elements_per_thread( src_dtype: torch.dtype, dst_dtype: torch.dtype ) -> int: if src_dtype == dst_dtype: # No data type conversion is needed. No requirements on min_elem_per_thread. return 0 # fp8 data type conversions has min_elem_per_thread requirements. # Refer to Triton implementations here: # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. fp8_dtypes = ( torch.float8_e4m3fn, torch.float8_e5m2, ) # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. assert not ( src_dtype in fp8_dtypes and dst_dtype in fp8_dtypes and src_dtype != dst_dtype ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: return 4 if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: return 2 # No requirements on min_elem_per_thread. return 0 if src_dtype is not None: # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions # in the same kernel. V.kernel.min_elem_per_thread = max( _get_min_elements_per_thread(src_dtype, dtype), V.kernel.min_elem_per_thread, ) if dtype == torch.bool: return f"({x} != 0)" elif dtype == torch.uint8: # to work around llvm uint conversion semantics # that produces 0's for negative values return f"{x}.to(tl.int8).to(tl.uint8)" if use_compute_types: out_dtype = triton_compute_type(dtype) else: out_dtype = triton_store_type(dtype) return f"{x}.to({out_dtype})" @staticmethod def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): triton_dtype = triton_compute_type(dtype) # We may promote float16 or bfloat16 to float32 and cause the # bitwidth of dtype to be different from the input tensor (i.e. float32). # In such as case, we will have to convert the input tensor to # its src_type, perform bitcast, and then convert the bit-casted # tensor back to float to ensure we use values with the right precision. if ( src_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): triton_src_dtype = str(src_dtype).split(".")[-1] cast_x = f"{x}.to(tl.{triton_src_dtype})" if dtype in (torch.float16, torch.bfloat16): triton_type_name = str(dtype).split(".")[-1] triton_dtype = f"tl.{triton_type_name}" cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" if dtype in (torch.float16, torch.bfloat16): return f"{cast_x}.to(tl.float32)" return cast_x else: src_dtype_bitwidth = _get_primitive_bitwidth(src_dtype) target_dtype_bitwidth = _get_primitive_bitwidth(dtype) bitcast = "True" if src_dtype_bitwidth == target_dtype_bitwidth else "False" return f"{x}.to({triton_dtype}, bitcast={bitcast})" @staticmethod def _shaped_constant(value, dtype, shape): type_ = torch._prims_common.dtype_to_type(dtype) triton_val = constant_repr(type_(value)) triton_type = triton_compute_type(dtype) if triton_type == "tl.float32": # Float constants are always f32 in triton return triton_val # NOTE: We use a tensor here in order to get the expected type. # Otherwise, e.g. float64 constants would be trunctated to float32. return f"tl.full({shape}, {triton_val}, {triton_type})" @classmethod def constant(cls, value, dtype): return cls._shaped_constant(value, dtype, shape=[]) @staticmethod def abs(x): return f"tl_math.abs({x})" @staticmethod def libdevice_abs(x): return f"libdevice.abs({x})" @staticmethod def exp(x): return f"tl_math.exp({x})" @staticmethod def libdevice_exp(x): return f"libdevice.exp({x})" @staticmethod def exp2(x): return f"libdevice.exp2({x})" @staticmethod def expm1(x): return f"libdevice.expm1({x})" @staticmethod def sqrt(x): if config.triton.codegen_upcast_to_fp32: return f"libdevice.sqrt({x})" else: needs_upcast = x.dtype in (torch.float16, torch.bfloat16) orig_dtype = triton_type(x.dtype) upcast_string = ".to(tl.float32)" if needs_upcast else "" downcast_string = f".to({orig_dtype})" if needs_upcast else "" return f"libdevice.sqrt({x}{upcast_string}){downcast_string}" @staticmethod def libdevice_sqrt(x): return f"libdevice.sqrt({x})" @staticmethod def relu(x): bug = config.triton.inject_relu_bug_TESTING_ONLY if bug == "compile_error": return "compile error!" elif bug == "runtime_error": # NB: this only triggers runtime error as long as input # is not all zero return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' elif bug == "accuracy": return f"{x} + 1" elif bug is None: return ops.maximum(ops.constant(0, torch.int32), x) else: raise AssertionError( f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" ) @staticmethod def minimum(a, b): return f"triton_helpers.minimum({a}, {b})" @staticmethod def maximum(a, b): return f"triton_helpers.maximum({a}, {b})" @staticmethod def where(a, b, c): return f"tl.where({a}, {b}, {c})" @staticmethod def inline_asm_elementwise( *inputs, asm, constraints=None, dtype=torch.float32, is_pure=True, pack=1 ): triton_type = triton_compute_type(dtype) input_refs = ", ".join([str(i) for i in inputs]) if constraints is None: constraints = ", ".join(["=r"] + ["r" for _ in inputs]) return f"tl.inline_asm_elementwise('{asm}', '{constraints}', [{input_refs}], dtype={triton_type}, is_pure={is_pure}, pack={pack})" # noqa: B950 @staticmethod def cos(x): return f"tl_math.cos({x})" @staticmethod def libdevice_cos(x): return f"libdevice.cos({x})" @staticmethod def sin(x): return f"tl_math.sin({x})" @staticmethod def libdevice_sin(x): return f"libdevice.sin({x})" @classmethod def index_expr(cls, expr, dtype): raise NotImplementedError("ops.index_expr not implemented outside a kernel") @staticmethod def masked(mask, body, other): raise NotImplementedError("ops.masked not implemented outside a kernel") @staticmethod def lgamma(x): return f"libdevice.lgamma({x})" @staticmethod def erf(x): return f"libdevice.erf({x})" @staticmethod def cosh(x): return f"libdevice.cosh({x})" @staticmethod def sinh(x): return f"libdevice.sinh({x})" @staticmethod def acos(x): return f"libdevice.acos({x})" @staticmethod def acosh(x): return f"libdevice.acosh({x})" @staticmethod def asin(x): return f"libdevice.asin({x})" @staticmethod def asinh(x): return f"libdevice.asinh({x})" @staticmethod def atan2(x, y): return f"libdevice.atan2({x}, {y})" @staticmethod def atan(x): return f"libdevice.atan({x})" @staticmethod def atanh(x): return f"libdevice.atanh({x})" @staticmethod def copysign(x, y): return f"libdevice.copysign({x}, {y})" @staticmethod def erfc(x): return f"libdevice.erfc({x})" @staticmethod def erfinv(x): return f"libdevice.erfinv({x})" @staticmethod def hypot(x, y): return f"libdevice.hypot({x}, {y})" @staticmethod def log10(x): return f"libdevice.log10({x})" @staticmethod def log2(x): return f"libdevice.log2({x})" @staticmethod def nextafter(x, y): return f"libdevice.nextafter({x}, {y})" @staticmethod def logical_and(a, b): return f"{a} & {b}" @staticmethod def logical_not(a): return f"{a} == 0" @staticmethod def logical_or(a, b): return f"{a} | {b}" @staticmethod def logical_xor(a, b): return f"({a} ^ {b})" @staticmethod def bitwise_and(a, b): return f"{a} & {b}" @staticmethod def bitwise_not(a): return f"~{a}" @staticmethod def bitwise_or(a, b): return f"{a} | {b}" @staticmethod def bitwise_xor(a, b): return f"{a} ^ {b}" @staticmethod def bitwise_left_shift(a, b): return f"{a} << {b}" @staticmethod def bitwise_right_shift(a, b): return f"{a} >> {b}" @staticmethod def rand(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.rand({seed}, {offset})" @staticmethod def randn(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.randn({seed}, {offset})" @staticmethod def randint64(seed, offset, low, high): offset = f"({offset}).to(tl.uint32)" return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" @staticmethod def load_seed(name, offset): raise NotImplementedError("ops.load_seed not implemented outside a kernel") @staticmethod def rsqrt(x): return f"libdevice.rsqrt({x})" @staticmethod def log1p(x): return f"libdevice.log1p({x})" @staticmethod def tan(x): return f"libdevice.tan({x})" @staticmethod def tanh(x): return f"libdevice.tanh({x})" @staticmethod def sigmoid(x): return f"tl.sigmoid({x})" @staticmethod def signbit(x): # XX: This is wrong for the value -0.0 in floating point return ( f"(libdevice.signbit({x}) != 0) if ({x}).dtype is tl.float32 else {x} < 0" ) @staticmethod def fmod(a, b): return f"libdevice.fmod({a}, {b})" @staticmethod def pow(a, b): return f"libdevice.pow({a}, {b})" @staticmethod def log(x): return f"tl_math.log({x})" @staticmethod def libdevice_log(x): return f"libdevice.log({x})" @staticmethod def isinf(x): return f"libdevice.isinf({x}).to(tl.int1)" @staticmethod def isnan(x): return f"libdevice.isnan({x}).to(tl.int1)" @staticmethod def round(x): return f"libdevice.nearbyint({x})" @staticmethod def floor(x): return f"libdevice.floor({x})" @staticmethod def floordiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Similar to div_floor_kernel_cuda in pytorch core. # Notice that // in triton behaves as truncdiv instead of floordiv quot = f"{a} // {b}" rem = f"{a} % {b}" return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" @staticmethod def sign(x): z = ops.constant(0, torch.int32) left = ops.to_dtype((ops.lt(z, x)), torch.int8) right = ops.to_dtype((ops.lt(x, z)), torch.int8) sub = ops.sub(left, right) return f"{sub}.to({x}.dtype)" @staticmethod def trunc(x): return f"libdevice.trunc({x})" @staticmethod def truncdiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Notice that // in triton behaves as truncdiv instead of floordiv return f"{a} // {b}" @staticmethod def ceil(x): return f"libdevice.ceil({x})" TritonOverrides._initialize_pointwise_overrides("triton") # Use mypy to check protocol implemented correctly def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: return h class TritonKernelOverrides(TritonOverrides): """Map element-wise ops to Triton within a TritonKernel Unlike TritonOverrides, these assume the code is going to be inserted into the body of the main triton kernel and so it may use indexing and mask variables which are assumed to already be defined in the current scope. """ @classmethod def constant(cls, value, dtype): # NOTE: Cannot use shape=[] as it's not supported by triton-rocm # We could use shape=[1] instead but starting with the correct # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. ndim = V.kernel.triton_tensor_ndim() shape = [1] * ndim return cls._shaped_constant(value, dtype, shape=shape) @classmethod def index_expr(cls, expr, dtype): indexing = V.kernel.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) # Our sympy expr printing casts to the current kernel index dtype. # we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype index_dtype = torch.int32 if V.kernel.index_dtype == "tl.int32" else torch.int64 dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype var = V.kernel.cse.generate( V.kernel.compute, indexing.index_str, bounds=get_bounds_index_expr(expr), dtype=dtype, ) if dtype not in (torch.int32, torch.int64): var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, dtype), dtype=upcast_compute_type(dtype), ) else: # TODO: we are not always consistent in enforcing that the output of the index expr printing # results in the indexing dtype. So if we detect that we have an input which might type promote # to a dtype other than indexing dtype, add a cast. # Trying to avoid dtype = index_dtype for index_var in expr.free_symbols: if symbol_is_type(index_var, SymT.TMP): dtype = torch.promote_types( dtype, V.kernel.cse.varname_map[index_var.name].dtype ) if dtype != index_dtype: var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, index_dtype), dtype=index_dtype, ) var.mask_vars = indexing.mask_vars return var @staticmethod def masked(mask, body, other): if mask is not None and torch.version.hip is not None: mask = V.kernel.cse.generate( V.kernel.compute, f"{mask}.to(tl.int1)", dtype=torch.bool, ) nodes = body.graph.find_nodes(op="output") assert nodes, "graph for body does not contain an output" need_where = False for node in nodes: for arg in node.args: if arg.target != "load" or should_unwrap_unspec_arg(arg.args[0]): need_where = True value = None if need_where else other with V.kernel.mask_loads(mask, value=value) as new_mask: result = body() if need_where: # Remove once CSEVariables track the dtype if result.bounds.is_bool: other = bool(other) # Take dtype from result to prevent accidental promotion other = V.kernel.cse.generate( V.kernel.compute, f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)", bounds=ValueRanges.wrap(other), dtype=result.dtype, ) ret = ops.where(new_mask, result, other) else: ret = result ret.mask_vars.discard(new_mask) return ret @staticmethod def load_seed(name, offset): var = V.kernel.args.input(name) return ( f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" ) @staticmethod def frexp(x): cache_key = f"frexp({x})" if cse_val := V.kernel.cse.try_get(cache_key): return cse_val mantissa = V.kernel.cse.newvar(dtype=x.dtype) exponent = V.kernel.cse.newvar(dtype=torch.int32) V.kernel.compute.writeline( f"{mantissa}, {exponent} = triton_helpers.frexp({x})" ) V.kernel.cse.put(cache_key, (mantissa, exponent)) return (mantissa, exponent) # Use mypy to check protocol implemented correctly def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: return h class HelperFunctions: """An ordered set of helper functions.""" _templates_seen: Dict[str, str] # Template code to function name finalized_helpers: List[str] def __init__(self) -> None: self._templates_seen = {} self.finalized_helpers = [] def add(self, template_code: str, *, base_name="_triton_helper_fn") -> str: """This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("@triton.jit") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} ) @triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
@triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} )
alexanderb14/pytorch
torch/_inductor/codegen/triton.py
https://github.com/alexanderb14/pytorch/blob/8da4224042665686de22f8e351a0b42bfa42cab8/torch/_inductor/codegen/triton.py
# mypy: allow-untyped-defs from __future__ import annotations import collections import contextlib import dataclasses import functools import itertools import logging import os import re import textwrap from functools import lru_cache from typing import ( Any, Callable, cast, Dict, Iterable, List, Optional, Sequence, Tuple, Type, TYPE_CHECKING, Union, ) import sympy from sympy.printing.precedence import PRECEDENCE import torch import torch._logging from torch._dynamo.utils import identity, preserve_rng_state from torch._prims_common import is_integer_dtype from torch.utils._ordered_set import OrderedSet from torch.utils._sympy.functions import CeilDiv, FloorDiv, ModularIndexing from torch.utils._triton import has_triton_package from ...utils._sympy.symbol import free_symbol_is_type, prefix_str, symbol_is_type, SymT from ...utils._sympy.value_ranges import ValueRanges from .. import config, ir, metrics from ..codecache import code_hash, get_path, PyCodeCache from ..runtime.benchmarking import benchmarker from ..runtime.hints import ( AutotuneHint, DeviceProperties, TRITON_MAX_BLOCK, TRITON_MAX_RSPLIT, ) from ..runtime.runtime_utils import get_max_y_grid, next_power_of_2 from ..runtime.triton_heuristics import ( cooperative_reduction_grid, grid as default_grid_fn, ) from ..scheduler import BaseSchedulerNode, FusedSchedulerNode, Scheduler, SchedulerNode from ..utils import ( DelayReplaceLine, get_bounds_index_expr, get_fused_kernel_name, get_kernel_metadata, is_welford_reduction, Placeholder, sympy_subs, upcast_compute_type, ) from ..virtualized import _ops as ops, OpsHandler, ReductionType, StoreMode, V from ..wrapper_benchmark import get_kernel_category_by_source_code from .block_analysis import BlockPatternMatcher from .common import ( BackendFeature, CSE, CSEVariable, DeferredLine, IndentedBuffer, OpOverrides, PythonPrinter, SizeArg, TensorArg, WorkspaceArg, WorkspaceZeroMode, ) from .simd import ( constant_repr, IterationRanges, IterationRangesEntry, IterationRangesRoot, pexpr, prefix_is_reduction, SIMDKernel, SIMDScheduling, ) from .triton_utils import ( config_of, should_unwrap_unspec_arg, signature_of, signature_to_meta, ) if TYPE_CHECKING: from ..ir import IRNode log = logging.getLogger(__name__) perf_hint_log = torch._logging.getArtifactLogger(__name__, "perf_hints") schedule_log = torch._logging.getArtifactLogger(__name__, "schedule") fusion_log = torch._logging.getArtifactLogger(__name__, "fusion") @lru_cache(None) def gen_attr_descriptor_import(): """ import AttrsDescriptor if the triton version is new enough to have this class defined. """ if not has_triton_package(): return "" import triton.compiler.compiler # Note: this works because triton.compiler.compiler imports AttrsDescriptor from triton.backends.compiler # When support for the legacy AttrsDescriptor is removed then this import path should be changed. if hasattr(triton.compiler.compiler, "AttrsDescriptor"): return "from triton.compiler.compiler import AttrsDescriptor" else: return "" @lru_cache(None) def gen_common_triton_imports(): imports = IndentedBuffer() imports.splice( """ import triton import triton.language as tl """ ) if attr_desc := gen_attr_descriptor_import(): imports.writeline(attr_desc) imports.splice( """ from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties """ ) return imports.getvalue() class TritonSymbols: """ Stores sympy.Symbol instances and constants associated with triton codegen. """ block_offsets = { symt: sympy.Symbol(f"{prefix_str[symt]}offset", integer=True, nonnegative=True) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } block_sizes = { symt: sympy.Symbol( f"{prefix_str[symt].upper()}BLOCK", integer=True, positive=True ) for symt in [SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK, SymT.RINDEX] } @classmethod def get_block_size(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_sizes[tree.symt] @classmethod def get_block_offset(cls, tree: IterationRanges) -> sympy.Symbol: return cls.block_offsets[tree.symt] @dataclasses.dataclass class IndexingOptions: index_str: str mask_vars: OrderedSet[str] mask_str: str expand_str: Optional[str] _has_rindex: bool index: sympy.Expr def has_mask(self): return bool(self.mask_vars) def has_indirect(self): return free_symbol_is_type(self.index, SymT.TMP) def has_rindex(self): return self._has_rindex def has_tmpmask(self): return "tmp" in self.mask_str def has_rmask(self): return "rmask" in self.mask_str @dataclasses.dataclass class BlockPtrOptions: params: BlockParameters constant_offset: sympy.Expr order: List[int] mask_vars: OrderedSet[str] broadcast_shape: Sequence[sympy.Expr] broadcasting_dims: List[bool] final_shape: Sequence[sympy.Expr] _boundary_check: Optional[List[int]] = None @property def shape(self) -> List[sympy.Expr]: return self.params.shape @property def block_shape(self) -> List[sympy.Expr]: return self.params.block_shape @property def strides(self) -> List[sympy.Expr]: return self.params.strides @property def offsets(self) -> List[sympy.Expr]: return self.params.offsets def codegen_broadcast_and_reshape( self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool, ) -> str: """ Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer. """ # Reshape to add singletons. pre_broadcast_shape = [ sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip( self.broadcast_shape, self.broadcasting_dims ) ] value = triton_reshape(value, initial_shape, pre_broadcast_shape) # Broadcast singletons. # For loads, we can often implicitly broadcast singleton dimensions. # We need an explicit broadcast for stores, or if the final reshape does more # than add singletons. sizevars = V.graph.sizevars require_broadcast = any(self.broadcasting_dims) and ( len(pre_broadcast_shape) != len(final_shape) or any( not ( sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) ) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape) ) ) if not allow_implicit or require_broadcast: value = f"tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})" # Reshape to the final shape. value = triton_reshape(value, self.broadcast_shape, final_shape) return value @staticmethod def create( *, params: BlockParameters, constant_offset: sympy.Expr, range_trees: List[IterationRangesEntry], mask_vars: OrderedSet[str], get_max_block: Callable[[str], int], ) -> BlockPtrOptions: """Helper to create a BlockPtrOptions instance""" sizevars = V.graph.sizevars def lookup_size(exprs: Iterable[sympy.Expr]) -> List[sympy.Expr]: return [sizevars.lookup_precomputed_size(expr) for expr in exprs] # Look up precomputed sizes params.shape = lookup_size(params.shape) params.strides = lookup_size(params.strides) # Strip out dimensions of stride 0. # These will be restored with tl.broadcast_to. broadcasting_dims = [ sizevars.statically_known_equals(stride, 0) for stride in params.strides ] # Strip out dimensions of size 1. # These will be restored by tl.reshape. singleton_dims = [ sizevars.statically_known_equals(dim, 1) for dim in params.block_shape ] if all(singleton_dims): # Handle a pure singletons, e.g. [1, 1] singleton_dims[-1] = False # Record the post-broadcast shape before broadcasting dims are removed. # The pre-broadcast shape is identical to this, except broadcasting dims are # replaced with 1. broadcast_shape = [ dim for dim, is_singleton in zip(params.block_shape, singleton_dims) if not is_singleton ] # Combine all removable dims. removable_dims = [any(dims) for dims in zip(singleton_dims, broadcasting_dims)] def remove_dims(it): """Removes any broadcasting or singleton dims from a given sequence""" return [ item for item, is_removable in zip(it, removable_dims) if not is_removable ] # Drop removable dimensions from the input. params = BlockParameters( **{key: remove_dims(val) for key, val in dataclasses.asdict(params).items()} ) # Compute the final shape, adjusting for special kernel types. final_shape = [TritonSymbols.get_block_size(tree) for tree in range_trees] if V.kernel.no_x_dim: assert range_trees[0].prefix == "x" final_shape.pop(0) if ( not V.kernel.inside_reduction and len(params.strides) == len(V.kernel.numels) - 1 and V.kernel.numels["r"] != 1 ): # Need to expand rank by 1 to match rank when self.inside_reduction=True final_shape.append(sympy.S.One) result = BlockPtrOptions( params=params, constant_offset=V.graph.sizevars.lookup_precomputed_size(constant_offset), order=list(reversed(range(len(params.shape)))), mask_vars=mask_vars, final_shape=final_shape, broadcast_shape=broadcast_shape, broadcasting_dims=broadcasting_dims, ) result.compute_boundary_check(get_max_block) return result def replace_roffset(self, expr: sympy.Expr, replacement: sympy.Expr) -> sympy.Expr: """ Replaces instances of roffset with the new expression. """ roffset = TritonSymbols.block_offsets[SymT.RINDEX] return sympy_subs(expr, {roffset: replacement}) def format(self, name: str, roffset=True) -> str: """ Codegen a call to tl.make_block_ptr() Args: name: variable name for pointer roffset: should roffset be included in offsets=..., for use with tl.advance() Returns: "tl.make_block_ptr(...)" """ f = V.kernel.index_to_str offsets = [*self.offsets] if not roffset: offsets = [self.replace_roffset(offset, sympy.S.Zero) for offset in offsets] args = [ ( f"{name} + ({f(self.constant_offset)})" if self.constant_offset != 0 else name ), f"shape={f(self.shape)}", f"strides={f(self.strides)}", f"block_shape={f(self.block_shape)}", f"order={f(self.order)}", f"offsets={f(offsets)}", ] return f"tl.make_block_ptr({', '.join(args)})" def compute_boundary_check(self, get_max_block: Callable[[str], int]) -> None: """List of indices to pass to tl.load(boundary_check=...)""" sizevars = V.graph.sizevars # Substitute maximum block sizes in shape expressions. # This works in multiple_of checks because block sizes are powers of 2. block_to_max: Dict[sympy.Expr, Any] = { block_size: get_max_block(prefix_str[symt]) for symt, block_size in TritonSymbols.block_sizes.items() } self._boundary_check = [ idx for idx in range(len(self.shape)) if ( not sizevars.statically_known_equals(self.strides[idx], sympy.S.Zero) and not sizevars.statically_known_multiple_of( self.shape[idx], self.block_shape[idx] ) and not sizevars.statically_known_multiple_of( self.shape[idx], sympy_subs(self.block_shape[idx], block_to_max) ) and not ( V.kernel.no_x_dim and self.block_shape[idx] == TritonSymbols.block_sizes[SymT.XBLOCK] ) ) ] def boundary_check(self): assert self._boundary_check is not None return self._boundary_check def advance_roffset(self): """ Codegen string to pass to tl.advance(name, ...). Advance is the difference between offsets in each loop iteration. To compute it, we replace roffset with multiples of RBLOCK. Since we expect roffset to vary in range(0, rnumel, RBLOCK), the first iteration has roffset=0, while the second has roffset=RBLOCK. """ rblock = TritonSymbols.block_sizes[SymT.RINDEX] advance = [ ( self.replace_roffset(offset, rblock) - self.replace_roffset(offset, sympy.S.Zero) ) for offset in self.offsets ] return V.kernel.index_to_str(advance) def has_indirect(self): return False # block_ptr can't do indirect indexing def has_rindex(self) -> bool: return any(free_symbol_is_type(expr, SymT.RINDEX) for expr in self.block_shape) def has_rmask(self): return self.has_rindex() def has_tmpmask(self): return False # block_ptr can't do indirect indexing def has_mask(self): return bool(self.boundary_check()) def triton_reshape( value: str, old_shape: Sequence[sympy.Expr], new_shape: Sequence[sympy.Expr] ): """Workaround https://github.com/openai/triton/issues/2836""" assert isinstance(old_shape, list) and isinstance(new_shape, list) old_shape_str = [V.kernel.index_to_str(shape) for shape in old_shape] new_shape_str = [V.kernel.index_to_str(shape) for shape in new_shape] if old_shape_str == new_shape_str: return value if [s for s in new_shape_str if s != "1"] != old_shape_str: return f"tl.reshape({value}, [{', '.join(new_shape_str)}])" # rewrite to [:, None] syntax, which is less buggy idx = 0 expand = [] for size in new_shape_str: if idx < len(old_shape_str) and size == old_shape_str[idx]: expand.append(":") idx += 1 else: assert size == "1" expand.append("None") assert idx == len(old_shape_str) return f"{value}[{', '.join(expand)}]" # NB: Inheriting from PythonPrinter is somewhat dangerous, because there are a # number of operators which Triton "implements", but in a way that is # inconsistent with Python semantics (and consistent with C semantics). We # must override all of these, or it is potential silent correctness problem class TritonPrinter(PythonPrinter): def _print_TruncToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_Float(self, expr): if config.is_fbcode() and torch.version.hip: ret = f"{expr}" else: ret = f"tl.full([], {expr}, tl.float64)" return ret def _print_ToFloat(self, expr): assert len(expr.args) == 1 s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5) return f"{s}.to(tl.float64)" def _print_PythonMod(self, expr): quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.remainder_integer({quot_s}, {div_s})" def _print_FloorDiv(self, expr): assert expr.is_integer quot, div = expr.args if quot.is_nonnegative and div.is_nonnegative: return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5) quot_s = self._print(quot) div_s = self._print(div) return f"triton_helpers.div_floor_integer({quot_s}, {div_s})" # TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher # precision algorithm, which we would need to replicate here def _print_IntTrueDiv(self, expr): return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5) # NB: sympy.floor/ceiling produce integers, so we have to do the # conversion to index dtype def _print_floor(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_FloorToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_ceiling(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _print_CeilToInt(self, expr): assert len(expr.args) == 1 return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})" def _helper_sqrt(self, expr): return f"libdevice.sqrt({self._print(expr)}.to(tl.float32))" def _print_FloatPow(self, expr): return ( f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})" ) _print_PowByNatural = _print_FloatPow def _print_Where(self, expr): c = self.doprint(expr.args[0]) p = self.doprint(expr.args[1]) q = self.doprint(expr.args[2]) return f"tl.where({c}, {p}, {q})" def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str: """ Helper for max/min code genereration. cmp: > or < """ nargs = len(expr.args) if len(expr.args) == 1: return self._print(expr.args[0]) mid = len(expr.args) // 2 cls = type(expr) a = self._print(cls(*expr.args[:mid])) b = self._print(cls(*expr.args[mid:])) # Use a macro so we can propagate constexprs. # https://github.com/triton-lang/triton/issues/3815 a, b = tuple(f"({x})" for x in (a, b)) assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'" return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))" def _print_Min(self, expr): return self._print_min_max_helper(expr, "<") def _print_Max(self, expr): return self._print_min_max_helper(expr, ">") def _print_Abs(self, expr): assert len(expr.args) == 1 return f"tl_math.abs({self._print(expr.args[0])})" def _print_OpaqueUnaryFn_cos(self, expr): assert len(expr.args) == 1 return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_cosh(self, expr): assert len(expr.args) == 1 return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_acos(self, expr): assert len(expr.args) == 1 return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sin(self, expr): assert len(expr.args) == 1 return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_sinh(self, expr): assert len(expr.args) == 1 return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_asin(self, expr): assert len(expr.args) == 1 return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tan(self, expr): assert len(expr.args) == 1 return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_tanh(self, expr): assert len(expr.args) == 1 return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))" def _print_OpaqueUnaryFn_atan(self, expr): assert len(expr.args) == 1 return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))" def _print_RoundToInt(self, expr): assert len(expr.args) == 1 return ( f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})" ) def _print_RoundDecimal(self, expr): assert len(expr.args) == 2 number, ndigits = expr.args if number.is_integer: # ndigits < 0 should have been filtered by the sympy function assert ndigits < 0 raise ValueError( f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}." ) number_str = self.parenthesize(number, PRECEDENCE["Mul"]) return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}" texpr = TritonPrinter().doprint # correct cases where Triton types names don't match PyTorch _triton_type_mapping = { "tl.bool": "tl.int1", "tl.float8_e4m3fn": "tl.float8e4nv", "tl.float8_e5m2": "tl.float8e5", "tl.float8_e4m3fnuz": "tl.float8e4b8", "tl.float8_e5m2fnuz": "tl.float8e5b16", } _triton_type_re = re.compile(r"^.*[.]") def triton_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type""" triton_type_name = _triton_type_re.sub("tl.", str(dtype)) return _triton_type_mapping.get(triton_type_name, triton_type_name) def triton_compute_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type and upcast [b]float16 to float32""" return triton_type(upcast_compute_type(dtype)) def _get_primitive_bitwidth(dtype: torch.dtype) -> int: """Number of bits of triton_compute_type()""" dtype = upcast_compute_type(dtype) itemsize = getattr(dtype, "itemsize", None) if itemsize: return itemsize * 8 else: return -1 def triton_store_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with fix for storing tl.bool""" if dtype == torch.bool: dtype = torch.int8 return triton_type(dtype) def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype: """Implicit upcasts used for Triton reduction types""" if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4: return torch.int32 return upcast_compute_type(dtype) def triton_acc_type(dtype: torch.dtype) -> str: """Convert torch.dtype to triton type, with reduction upcasts""" return triton_compute_type(upcast_acc_dtype(dtype)) class TritonCSEVariable(CSEVariable): def __init__(self, name, bounds: ValueRanges[Any], dtype: torch.dtype) -> None: super().__init__(name, bounds, dtype) # We'll use this to track which masks the variable needs when used for indirect indexing self.mask_vars: OrderedSet[str] = OrderedSet() assert dtype is not None, "TritonCSEVariable must have dtype" def update_on_args(self, name, args, kwargs): for arg in args: if isinstance(arg, TritonCSEVariable): self.mask_vars.update(arg.mask_vars) elif isinstance(arg, sympy.Symbol) and arg.name[0] in "xyr": # most of the time index vars don't need masks associated with them # however, when index vars are used to compute indices for indirect reads # those reads should subsequently be masked, self.mask_vars.update({f"{arg.name[0]}mask"}) class TritonOverrides(OpOverrides): """Map element-wise ops to Triton""" @staticmethod def to_dtype( x, dtype: torch.dtype, src_dtype: Optional[torch.dtype] = None, use_compute_types=True, ): def _get_min_elements_per_thread( src_dtype: torch.dtype, dst_dtype: torch.dtype ) -> int: if src_dtype == dst_dtype: # No data type conversion is needed. No requirements on min_elem_per_thread. return 0 # fp8 data type conversions has min_elem_per_thread requirements. # Refer to Triton implementations here: # https://github.com/openai/triton/blob/10f59d8ce04052521c1bc0cb3a3f8b98918fc7e3/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp#L10. fp8_dtypes = ( torch.float8_e4m3fn, torch.float8_e5m2, ) # Triton doesn't support type conversions between fp8_e4m3 and fp8_e5m2. assert not ( src_dtype in fp8_dtypes and dst_dtype in fp8_dtypes and src_dtype != dst_dtype ), "Conversions between float8_e5m2 and float8_e4m3fn is not supported!" if src_dtype == torch.float8_e5m2 or dst_dtype == torch.float8_e5m2: return 4 if src_dtype == torch.float8_e4m3fn or dst_dtype == torch.float8_e4m3fn: return 2 # No requirements on min_elem_per_thread. return 0 if src_dtype is not None: # Both dtype and src_dtype are set. This is used by torch to(dtype=dtype). # It takes the maximum min_elem_per_thread if there are multiple fp8 conversions # in the same kernel. V.kernel.min_elem_per_thread = max( _get_min_elements_per_thread(src_dtype, dtype), V.kernel.min_elem_per_thread, ) if dtype == torch.bool: return f"({x} != 0)" elif dtype == torch.uint8: # to work around llvm uint conversion semantics # that produces 0's for negative values return f"{x}.to(tl.int8).to(tl.uint8)" if use_compute_types: out_dtype = triton_compute_type(dtype) else: out_dtype = triton_store_type(dtype) return f"{x}.to({out_dtype})" @staticmethod def to_dtype_bitcast(x, dtype: torch.dtype, src_dtype: torch.dtype): triton_dtype = triton_compute_type(dtype) # We may promote float16 or bfloat16 to float32 and cause the # bitwidth of dtype to be different from the input tensor (i.e. float32). # In such as case, we will have to convert the input tensor to # its src_type, perform bitcast, and then convert the bit-casted # tensor back to float to ensure we use values with the right precision. if ( src_dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): triton_src_dtype = str(src_dtype).split(".")[-1] cast_x = f"{x}.to(tl.{triton_src_dtype})" if dtype in (torch.float16, torch.bfloat16): triton_type_name = str(dtype).split(".")[-1] triton_dtype = f"tl.{triton_type_name}" cast_x = f"{cast_x}.to({triton_dtype}, bitcast=True)" if dtype in (torch.float16, torch.bfloat16): return f"{cast_x}.to(tl.float32)" return cast_x else: src_dtype_bitwidth = _get_primitive_bitwidth(src_dtype) target_dtype_bitwidth = _get_primitive_bitwidth(dtype) bitcast = "True" if src_dtype_bitwidth == target_dtype_bitwidth else "False" return f"{x}.to({triton_dtype}, bitcast={bitcast})" @staticmethod def _shaped_constant(value, dtype, shape): type_ = torch._prims_common.dtype_to_type(dtype) triton_val = constant_repr(type_(value)) triton_type = triton_compute_type(dtype) if triton_type == "tl.float32": # Float constants are always f32 in triton return triton_val # NOTE: We use a tensor here in order to get the expected type. # Otherwise, e.g. float64 constants would be trunctated to float32. return f"tl.full({shape}, {triton_val}, {triton_type})" @classmethod def constant(cls, value, dtype): return cls._shaped_constant(value, dtype, shape=[]) @staticmethod def abs(x): return f"tl_math.abs({x})" @staticmethod def libdevice_abs(x): return f"libdevice.abs({x})" @staticmethod def exp(x): return f"tl_math.exp({x})" @staticmethod def libdevice_exp(x): return f"libdevice.exp({x})" @staticmethod def exp2(x): return f"libdevice.exp2({x})" @staticmethod def expm1(x): return f"libdevice.expm1({x})" @staticmethod def sqrt(x): if config.triton.codegen_upcast_to_fp32: return f"libdevice.sqrt({x})" else: needs_upcast = x.dtype in (torch.float16, torch.bfloat16) orig_dtype = triton_type(x.dtype) upcast_string = ".to(tl.float32)" if needs_upcast else "" downcast_string = f".to({orig_dtype})" if needs_upcast else "" return f"libdevice.sqrt({x}{upcast_string}){downcast_string}" @staticmethod def libdevice_sqrt(x): return f"libdevice.sqrt({x})" @staticmethod def relu(x): bug = config.triton.inject_relu_bug_TESTING_ONLY if bug == "compile_error": return "compile error!" elif bug == "runtime_error": # NB: this only triggers runtime error as long as input # is not all zero return f'triton_helpers.device_assert_then({x} == 0, "injected assert fail", {x})' elif bug == "accuracy": return f"{x} + 1" elif bug is None: return ops.maximum(ops.constant(0, torch.int32), x) else: raise AssertionError( f"unrecognized config triton.inject_relu_bug_TESTING_ONLY = {bug!r}" ) @staticmethod def minimum(a, b): return f"triton_helpers.minimum({a}, {b})" @staticmethod def maximum(a, b): return f"triton_helpers.maximum({a}, {b})" @staticmethod def where(a, b, c): return f"tl.where({a}, {b}, {c})" @staticmethod def inline_asm_elementwise( *inputs, asm, constraints=None, dtype=torch.float32, is_pure=True, pack=1 ): triton_type = triton_compute_type(dtype) input_refs = ", ".join([str(i) for i in inputs]) if constraints is None: constraints = ", ".join(["=r"] + ["r" for _ in inputs]) return f"tl.inline_asm_elementwise('{asm}', '{constraints}', [{input_refs}], dtype={triton_type}, is_pure={is_pure}, pack={pack})" # noqa: B950 @staticmethod def cos(x): return f"tl_math.cos({x})" @staticmethod def libdevice_cos(x): return f"libdevice.cos({x})" @staticmethod def sin(x): return f"tl_math.sin({x})" @staticmethod def libdevice_sin(x): return f"libdevice.sin({x})" @classmethod def index_expr(cls, expr, dtype): raise NotImplementedError("ops.index_expr not implemented outside a kernel") @staticmethod def masked(mask, body, other): raise NotImplementedError("ops.masked not implemented outside a kernel") @staticmethod def lgamma(x): return f"libdevice.lgamma({x})" @staticmethod def erf(x): return f"libdevice.erf({x})" @staticmethod def cosh(x): return f"libdevice.cosh({x})" @staticmethod def sinh(x): return f"libdevice.sinh({x})" @staticmethod def acos(x): return f"libdevice.acos({x})" @staticmethod def acosh(x): return f"libdevice.acosh({x})" @staticmethod def asin(x): return f"libdevice.asin({x})" @staticmethod def asinh(x): return f"libdevice.asinh({x})" @staticmethod def atan2(x, y): return f"libdevice.atan2({x}, {y})" @staticmethod def atan(x): return f"libdevice.atan({x})" @staticmethod def atanh(x): return f"libdevice.atanh({x})" @staticmethod def copysign(x, y): return f"libdevice.copysign({x}, {y})" @staticmethod def erfc(x): return f"libdevice.erfc({x})" @staticmethod def erfinv(x): return f"libdevice.erfinv({x})" @staticmethod def hypot(x, y): return f"libdevice.hypot({x}, {y})" @staticmethod def log10(x): return f"libdevice.log10({x})" @staticmethod def log2(x): return f"libdevice.log2({x})" @staticmethod def nextafter(x, y): return f"libdevice.nextafter({x}, {y})" @staticmethod def logical_and(a, b): return f"{a} & {b}" @staticmethod def logical_not(a): return f"{a} == 0" @staticmethod def logical_or(a, b): return f"{a} | {b}" @staticmethod def logical_xor(a, b): return f"({a} ^ {b})" @staticmethod def bitwise_and(a, b): return f"{a} & {b}" @staticmethod def bitwise_not(a): return f"~{a}" @staticmethod def bitwise_or(a, b): return f"{a} | {b}" @staticmethod def bitwise_xor(a, b): return f"{a} ^ {b}" @staticmethod def bitwise_left_shift(a, b): return f"{a} << {b}" @staticmethod def bitwise_right_shift(a, b): return f"{a} >> {b}" @staticmethod def rand(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.rand({seed}, {offset})" @staticmethod def randn(seed, offset): offset = f"({offset}).to(tl.uint32)" return f"tl.randn({seed}, {offset})" @staticmethod def randint64(seed, offset, low, high): offset = f"({offset}).to(tl.uint32)" return f"triton_helpers.randint64({seed}, {offset}, {low}, {high})" @staticmethod def load_seed(name, offset): raise NotImplementedError("ops.load_seed not implemented outside a kernel") @staticmethod def rsqrt(x): return f"libdevice.rsqrt({x})" @staticmethod def log1p(x): return f"libdevice.log1p({x})" @staticmethod def tan(x): return f"libdevice.tan({x})" @staticmethod def tanh(x): return f"libdevice.tanh({x})" @staticmethod def sigmoid(x): return f"tl.sigmoid({x})" @staticmethod def signbit(x): # XX: This is wrong for the value -0.0 in floating point return ( f"(libdevice.signbit({x}) != 0) if ({x}).dtype is tl.float32 else {x} < 0" ) @staticmethod def fmod(a, b): return f"libdevice.fmod({a}, {b})" @staticmethod def pow(a, b): return f"libdevice.pow({a}, {b})" @staticmethod def log(x): return f"tl_math.log({x})" @staticmethod def libdevice_log(x): return f"libdevice.log({x})" @staticmethod def isinf(x): return f"libdevice.isinf({x}).to(tl.int1)" @staticmethod def isnan(x): return f"libdevice.isnan({x}).to(tl.int1)" @staticmethod def round(x): return f"libdevice.nearbyint({x})" @staticmethod def floor(x): return f"libdevice.floor({x})" @staticmethod def floordiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Similar to div_floor_kernel_cuda in pytorch core. # Notice that // in triton behaves as truncdiv instead of floordiv quot = f"{a} // {b}" rem = f"{a} % {b}" return f"tl.where(({a} < 0) != ({b} < 0), tl.where({rem} != 0, {quot} - 1, {quot}), {quot})" @staticmethod def sign(x): z = ops.constant(0, torch.int32) left = ops.to_dtype((ops.lt(z, x)), torch.int8) right = ops.to_dtype((ops.lt(x, z)), torch.int8) sub = ops.sub(left, right) return f"{sub}.to({x}.dtype)" @staticmethod def trunc(x): return f"libdevice.trunc({x})" @staticmethod def truncdiv(a, b): # See the comment in lowering.div_mode. a and b are integer type. # Notice that // in triton behaves as truncdiv instead of floordiv return f"{a} // {b}" @staticmethod def ceil(x): return f"libdevice.ceil({x})" TritonOverrides._initialize_pointwise_overrides("triton") # Use mypy to check protocol implemented correctly def _typecheck_TritonOverrides(h: TritonOverrides) -> OpsHandler[str]: return h class TritonKernelOverrides(TritonOverrides): """Map element-wise ops to Triton within a TritonKernel Unlike TritonOverrides, these assume the code is going to be inserted into the body of the main triton kernel and so it may use indexing and mask variables which are assumed to already be defined in the current scope. """ @classmethod def constant(cls, value, dtype): # NOTE: Cannot use shape=[] as it's not supported by triton-rocm # We could use shape=[1] instead but starting with the correct # ndim avoids extra `tt.expand_dim` ops appearing in the triton IR. ndim = V.kernel.triton_tensor_ndim() shape = [1] * ndim return cls._shaped_constant(value, dtype, shape=shape) @classmethod def index_expr(cls, expr, dtype): indexing = V.kernel.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) # Our sympy expr printing casts to the current kernel index dtype. # we only respect non int32-int64 dtypes and otherwise use current kernel indexing dtype index_dtype = torch.int32 if V.kernel.index_dtype == "tl.int32" else torch.int64 dtype = dtype if dtype not in (torch.int32, torch.int64) else index_dtype var = V.kernel.cse.generate( V.kernel.compute, indexing.index_str, bounds=get_bounds_index_expr(expr), dtype=dtype, ) if dtype not in (torch.int32, torch.int64): var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, dtype), dtype=upcast_compute_type(dtype), ) else: # TODO: we are not always consistent in enforcing that the output of the index expr printing # results in the indexing dtype. So if we detect that we have an input which might type promote # to a dtype other than indexing dtype, add a cast. # Trying to avoid dtype = index_dtype for index_var in expr.free_symbols: if symbol_is_type(index_var, SymT.TMP): dtype = torch.promote_types( dtype, V.kernel.cse.varname_map[index_var.name].dtype ) if dtype != index_dtype: var = V.kernel.cse.generate( V.kernel.compute, cls.to_dtype(var, index_dtype), dtype=index_dtype, ) var.mask_vars = indexing.mask_vars return var @staticmethod def masked(mask, body, other): if mask is not None and torch.version.hip is not None: mask = V.kernel.cse.generate( V.kernel.compute, f"{mask}.to(tl.int1)", dtype=torch.bool, ) nodes = body.graph.find_nodes(op="output") assert nodes, "graph for body does not contain an output" need_where = False for node in nodes: for arg in node.args: if arg.target != "load" or should_unwrap_unspec_arg(arg.args[0]): need_where = True value = None if need_where else other with V.kernel.mask_loads(mask, value=value) as new_mask: result = body() if need_where: # Remove once CSEVariables track the dtype if result.bounds.is_bool: other = bool(other) # Take dtype from result to prevent accidental promotion other = V.kernel.cse.generate( V.kernel.compute, f"tl.full({result}.shape, {constant_repr(other)}, {result}.dtype)", bounds=ValueRanges.wrap(other), dtype=result.dtype, ) ret = ops.where(new_mask, result, other) else: ret = result ret.mask_vars.discard(new_mask) return ret @staticmethod def load_seed(name, offset): var = V.kernel.args.input(name) return ( f"tl.load({var} + {V.kernel.args.seed_offset('load_seed_offset', offset)})" ) @staticmethod def frexp(x): cache_key = f"frexp({x})" if cse_val := V.kernel.cse.try_get(cache_key): return cse_val mantissa = V.kernel.cse.newvar(dtype=x.dtype) exponent = V.kernel.cse.newvar(dtype=torch.int32) V.kernel.compute.writeline( f"{mantissa}, {exponent} = triton_helpers.frexp({x})" ) V.kernel.cse.put(cache_key, (mantissa, exponent)) return (mantissa, exponent) # Use mypy to check protocol implemented correctly def _typecheck_TritonKernelOverrides(h: TritonKernelOverrides) -> OpsHandler[str]: return h class HelperFunctions: """An ordered set of helper functions.""" _templates_seen: Dict[str, str] # Template code to function name finalized_helpers: List[str] def __init__(self) -> None: self._templates_seen = {} self.finalized_helpers = [] def add(self, template_code: str, *, base_name="_triton_helper_fn") -> str: """This accepts a function definition with the function name left as a format specifier e.g. @triton.jit def {name}(arg0, arg1): return arg0 + arg1 We add the templated code to the function set and return the name assigned to that function. """ existing_name = self._templates_seen.get(template_code) if existing_name is not None: # Don't duplicate existing helpers return existing_name name = f"{base_name}{len(self.finalized_helpers)}" self._templates_seen[template_code] = name self.finalized_helpers.append(template_code.format(name=name)) return name def __iter__(self): return iter(self.finalized_helpers) def __getitem__(self, idx): return self.finalized_helpers[idx] @dataclasses.dataclass class BlockParameters: """ Class representing ND block dimensions, for block pointer analysis. """ shape: List[sympy.Expr] = dataclasses.field(default_factory=list) block_shape: List[sympy.Expr] = dataclasses.field(default_factory=list) strides: List[sympy.Expr] = dataclasses.field(default_factory=list) offsets: List[sympy.Expr] = dataclasses.field(default_factory=list) def __add__(self, other: BlockParameters) -> BlockParameters: """ Concatenates block parameters. """ cls = type(self) a, b = tuple(dataclasses.asdict(x) for x in (self, other)) return cls(**{key: a[key] + b[key] for key in a}) class CooperativeReductionWorkspaceCache: """ The scratch space used for cooperative reductions can be reused after two reduction loops. This keeps track of what can be reused. """ def __init__(self, args): self.args = args self.current_loop = [] self.prior_loop = [] self.ready_for_reuse = collections.defaultdict(collections.deque) self.loop_count = 0 self.store_count = 0 def allocate(self, nbytes: sympy.Expr): cached = self.ready_for_reuse.get(nbytes) if cached: return cached.popleft() ws_name, ws_offset = self.args.workspace(nbytes, False) self.current_loop.append((nbytes, ws_name, ws_offset)) return (ws_name, ws_offset) def on_loop_end(self): # Buffers can be reused after 2 loop ends for nbytes, ws_name, ws_offset in self.prior_loop: self.ready_for_reuse[nbytes].append((ws_name, ws_offset)) self.prior_loop = self.current_loop self.current_loop = [] self.loop_count += 1 def increment_store_count(self): prior = self.store_count self.store_count += 1 return prior @dataclasses.dataclass class FixedTritonConfig: config: Dict[str, int] def __getitem__(self, item): return self.config[item] class TritonCSE(CSE): """ Subclasses CSE to apply the current load mask to the cache key to avoid CSEing variables across separate masked blocks. """ def augment_key(self, cache_key: object) -> object: if mask := V.kernel._load_mask: return (cache_key, mask.name) else: return cache_key class TritonKernel(SIMDKernel): overrides = TritonKernelOverrides # type: ignore[assignment] helper_functions: HelperFunctions kexpr: Callable[[sympy.Expr], str] = texpr allow_block_ptr = True def __init__( self, tiling: Dict[str, sympy.Expr], min_elem_per_thread=0, optimize_mask=True, fixed_config: Optional[FixedTritonConfig] = None, **kwargs, ) -> None: self.optimize_mask: bool = optimize_mask self.fixed_config = fixed_config super().__init__(tiling, **kwargs) self.cse = TritonCSE(self.newvar_prefix, self.suffix) self.post_loop_combine: IndentedBuffer = IndentedBuffer() self.post_loop_store: IndentedBuffer = IndentedBuffer() self.outside_loop_vars: OrderedSet[Any] = OrderedSet() self.min_elem_per_thread = min_elem_per_thread self.block_ptr_id = itertools.count() self.helper_functions = HelperFunctions() self._load_counts: collections.Counter[str] = collections.Counter() # A set of autotuning hints to pass as part of triton_meta self.autotune_hints: OrderedSet[AutotuneHint] = OrderedSet() self.triton_meta: Optional[Dict[str, object]] = None if self.cooperative_reduction: self.init_cooperative_reduction() self.codegen_range_tree() def dtype_to_str(self, dtype: torch.dtype) -> str: return triton_type(dtype) def should_use_cooperative_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_cooperative_reduction( self.features ) def init_cooperative_reduction(self): """One time setup code for cooperative reductions.""" assert self.cooperative_reduction # shift all the grids over since tl.program_id(0) is for rsplit for tree in self.range_trees: if tree.grid_dim is not None: tree.grid_dim += 1 sem_count = self.numels["x"] if self.fixed_config: sem_count = CeilDiv(sem_count, self.fixed_config["XBLOCK"]) self.semaphores_name = self.args.semaphores(sem_count) self.cooperative_reduction_workspace_cache = CooperativeReductionWorkspaceCache( self.args ) self.body.splice( """ rsplit_id = tl.program_id(0) num_rblocks = (rnumel + RBLOCK - 1) // RBLOCK rsplit_chunk = (num_rblocks + RSPLIT - 1) // RSPLIT * RBLOCK rsplit_start = rsplit_chunk * rsplit_id rsplit_end = rsplit_chunk * (rsplit_id + 1) """, strip=True, ) if not self._has_constant_mask(self.range_trees[-1]): self.body.writeline( "rsplit_end = tl.where(rsplit_end < rnumel, rsplit_end, rnumel)" ) def codegen_range_tree(self): for tree in self.range_trees: # reduction indexing goes inside a loop if not tree.is_loop: self.iteration_ranges_codegen_header(tree, self.body) if self.inside_reduction and self.range_trees[-1].is_loop: # workaround for this issue: # https://gist.github.com/jansel/6527126f781559095c5531f98a4235a7 self.body.writeline( f"rbase = {self.iteration_ranges_ranges_code(self.range_trees[-1])}" ) def need_numel_args(self): r""" Indicate whether we need provide numel as arguments for the generated kernel calls in the benchmark. Should be true for pointwise/reduction kernels but false for triton matmul kernels. """ return True def should_use_persistent_reduction(self) -> bool: return self.inside_reduction and V.choices.should_use_persistent_reduction( self.features, self.cooperative_reduction ) def want_no_x_dim(self): if self.persistent_reduction and len(self.numels) == 2: if self.fixed_config: return self.fixed_config["XBLOCK"] == 1 return V.choices.want_no_x_dim(self.features) return False @property def assert_function(self) -> str: return "tl.device_assert" def indexing( self, index: sympy.Expr, *, copy_shape=None, dense_indexing=False, override_mask=None, block_ptr=False, ): """ Compute the index and mask to pass to tl.load() or tl.store() """ index = self.prepare_indexing(index) index_vars = index.free_symbols has_rindex = False mask_vars: OrderedSet[str] = OrderedSet() for var in index_vars: assert isinstance(var, sympy.Symbol) has_rindex = has_rindex or symbol_is_type(var, SymT.RINDEX) if override_mask: pass elif symbol_is_type(var, SymT.TMP): # indirect indexing cse_var = self.cse.varname_map[var.name] mask_vars.update(cse_var.mask_vars) elif symbol_is_type( var, ( SymT.UNBACKED_INT, SymT.SIZE, SymT.PRECOMPUTED_SIZE, SymT.INDEX, SymT.FLOAT, SymT.UNBACKED_FLOAT, ), ): pass else: # var is one of xN, yN or rN assert symbol_is_type( var, (SymT.RINDEX, SymT.XBLOCK, SymT.YBLOCK, SymT.ZBLOCK) ), var.name mask_vars.add(f"{var.name[0]}mask") need_dense = ( config.triton.dense_indexing or dense_indexing or self._load_mask is not None ) and index != 0 have_dense = True have_loop_vars = False dense_mask_vars: OrderedSet[str] = OrderedSet() for tree in self.active_range_trees(): if index_vars.intersection(tree.var_list): have_loop_vars = True else: have_dense = False dense_mask_vars.add(f"{tree.prefix}mask") if ( block_ptr and self.allow_block_ptr and config.triton.use_block_ptr and not override_mask and not self._load_mask and len(mask_vars - dense_mask_vars) == 0 and not self.is_indirect_indexing(index) and have_loop_vars # workaround https://github.com/openai/triton/issues/2821 and self.index_dtype == "tl.int32" ): def match_strided_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches expressions of the form: idx = s * xindex This implies stride (s,), and shape (XBLOCK,). """ symbol = range_tree.symbol() stride = sympy.Wild("stride", exclude=[symbol]) m = index.match(symbol * stride) if m is None: return None return BlockParameters( shape=[range_tree.numel], block_shape=[TritonSymbols.get_block_size(range_tree)], strides=[m[stride]], offsets=[TritonSymbols.get_block_offset(range_tree)], ) def match_mod_div_block( index: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Matches higher-dimensional blocks coming from FloorDiv and ModularIndexing. Example expression to match: sN * ((rindex//(d1 * ... * d(N-1)))) + s1 * ModularIndexing(rindex, 1, d1) + ... + s(N-1) * ModularIndexing(rindex, d1 * ... * d(N-2), d(N-1)) This iterates over a block of shape (dN, ..., d1) and stride (sN, ..., s1). (d1,...,d(N-1)) and (s1,...,sN) are wildcards that we match. Note that dN does not appear in the expression, but we solve for it using range tree numels and the other dims. """ # Bound the possible number of dims. We use the following heuristics: # - At least one dim for each range tree node. # - At least one dim for every FloorDiv or ModularIndexing op. # - At least 2 dims to pattern match. num_dims = max( 2, len(self.range_tree_nodes), (index.count(FloorDiv) + index.count(ModularIndexing)), ) # Pattern match to find the strides and offset. index_var = range_tree.symbol() match_result = BlockPatternMatcher.match_mod_div_block_expr( index, index_var, range_tree.numel, num_dims ) if match_result is None: return None ( dims, strides, block_index_exprs, ) = match_result slice_numels = BlockPatternMatcher.get_slice_numels(dims) # Check for applicable iteration range sizes. # When mapping a 1D block into an ND one, we need to know that # the number of elements is not changed. This means the slice numels of # the ND iteration range must evenly divide the length of the 1D block. # There are two cases where we can guarantee this: # 1. Numels are powers of 2. If numel == 2 ** n, and we know XBLOCK == 2 ** m, # with n and m integers, then either numel is a multiple of XBLOCK, or numel # is less than XBLOCK. (If numel is less than XBLOCK, we round up to 1 below.) # 2. Numels are multiples of the maximum possible block size. sizevars = V.graph.sizevars max_block = self.max_block(range_tree.prefix) if any( not sizevars.statically_known_multiple_of(numel, max_block) and not sizevars.statically_known_power_of_2(numel) for numel in slice_numels ): return None # Compute the ND block shape from the linear block size. # Use CielDiv to round leading dimensions up to 1. # Non-leading dimensions are clamped to the size of the iteration range, # while the leading dimension can exceed this to accomodate a larger # block size. linear_block_size = TritonSymbols.get_block_size(range_tree) block_shape: List[sympy.Expr] = [ CeilDiv(linear_block_size, slice_numels[0]) ] + [ sympy.Min(CeilDiv(linear_block_size, numel), dim) for numel, dim in zip(slice_numels[1:], dims[1:]) ] # Compute block offsets from {xyzr}offset and the matched expressions. block_offsets: List[sympy.Expr] = [ sympy_subs( expr, {index_var: TritonSymbols.get_block_offset(range_tree)} ) for expr in block_index_exprs ] return BlockParameters( shape=dims, block_shape=block_shape, strides=strides, offsets=block_offsets, ) def match_block_pointer_subexpr( expr: sympy.Expr, range_tree: IterationRangesEntry ) -> Optional[BlockParameters]: """ Match a block indexing subexpression involving a single range tree. """ for match_func in ( match_strided_block, match_mod_div_block, ): match = match_func(expr, range_tree) if match is not None: return match return None def match_block_pointer() -> Optional[BlockPtrOptions]: index_relative_to_xyr_index = sympy_subs( index, {v: t.expr for v, t in self.range_tree_nodes.items()} ) range_trees = self.active_range_trees(reorder=True) # Partition the index into subexpressions pertaining to each range tree. # For example xindex * 5 + rindex * 3 is partitioned to # (xindex * 5, rindex * 3). index_subexprs = [ BlockPatternMatcher.get_subexpr_involving_symbol( index_relative_to_xyr_index, tree.symbol() ) for tree in range_trees ] # Match each range tree's subexpression separately. range_symbols = {tree.symbol() for tree in range_trees} block_params = BlockParameters() for tree, subexpr in zip(range_trees, index_subexprs): # Reject mixed terms, e.g. xindex * rindex. # NB: the zero expression is allowed, for broadcasting. if len(range_symbols.intersection(subexpr.free_symbols)) > 1: return None # Match the subexpression for this range tree. params = match_block_pointer_subexpr(subexpr, tree) if params is None: return None block_params += params # Collect leftover terms as a constant offset. offset = index_relative_to_xyr_index - sum(index_subexprs) # Form the block pointer. self.filter_masks(mask_vars) return BlockPtrOptions.create( params=block_params, constant_offset=offset, range_trees=range_trees, mask_vars=mask_vars, get_max_block=self.max_block, ) # Return a block pointer, if indexing matches the pattern. options = match_block_pointer() if options is not None: return options expand_str = None index_str = self.index_to_str(index) if isinstance(index, sympy.Integer): expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.full({expand_str}, {index_str}, tl.int32)" return IndexingOptions( index_str, OrderedSet(), "None", expand_str, has_rindex, index ) if need_dense and not have_dense: expand_str = f"{copy_shape}.shape" if copy_shape else self.dense_size_str() index_str = f"tl.broadcast_to({index_str}, {expand_str})" mask_vars = dense_mask_vars elif not have_loop_vars and copy_shape: index_str = f"tl.broadcast_to({index_str}, {copy_shape}.shape)" mask_vars = dense_mask_vars if override_mask: mask_vars = OrderedSet([override_mask]) if self._load_mask: mask_vars.add(self._load_mask) self.filter_masks(mask_vars) mask_str = " & ".join(sorted(map(str, mask_vars))) if mask_vars else "None" return IndexingOptions(index_str, mask_vars, mask_str, expand_str, has_rindex, index) # type: ignore[arg-type] def codegen_block_ptr( self, name: str, var: str, indexing: BlockPtrOptions, other="" ) -> Tuple[str, Optional[DeferredLine], str]: advance_block_ptr = None check = indexing.boundary_check() if not check: # workaround https://github.com/openai/triton/issues/2813 other = "" elif other: assert other == ", other=0.0" other = f", boundary_check={check!r}, padding_option='zero'" else: other = f", boundary_check={check!r}" if ( self.inside_reduction and self.range_trees[-1].is_loop and indexing.has_rindex() ): block_ptr = f"block_ptr{next(self.block_ptr_id)}" self.body.writeline( DeferredLine( name, f"{block_ptr} = {indexing.format(var, roffset=False)}" ) ) advance_block_ptr = DeferredLine( name, f"{block_ptr} = tl.advance({block_ptr}, {indexing.advance_roffset()})", ) else: block_ptr = indexing.format(var) return block_ptr, advance_block_ptr, other def codegen_block_ptr_store_line(self, name, indexing, block_ptr, value, other=""): # Stores require an explicit broadcast. value = indexing.codegen_broadcast_and_reshape( value, indexing.final_shape, indexing.block_shape, False ) # workaround https://github.com/openai/triton/issues/2814 value = f"{value}.to({triton_store_type(V.graph.get_dtype(name))})" return f"tl.store({block_ptr}, {value}{other})" def check_bounds( self, expr: sympy.Expr, size: sympy.Expr, lower: bool, upper: bool, ): if not (lower or upper): return assert isinstance(expr, sympy.Expr) indexing = self.indexing(expr, block_ptr=False) assert isinstance(indexing, IndexingOptions) index_str = indexing.index_str mask_str = indexing.mask_str if indexing.has_mask() else None size_str = texpr(self.rename_indexing(size)) if upper else None # expr is already wrapped line = self.indirect_assert( index_str, "0" if lower else None, size_str, mask_str ) buffer = self.get_load_buffer(indexing) self.cse.generate(buffer, line, assignment=False, dtype=torch.int32) def get_load_buffer(self, indexing): if indexing.has_indirect() or indexing.has_tmpmask(): # Masked loads must come after the mask is computed return self.compute elif ( self.inside_reduction and self.range_trees[-1].is_loop and not indexing.has_rindex() ): # can lift a common load outside of reduction loop # One exception is when this is an indirect_load. return self.body else: return self.loads def load(self, name: str, index: sympy.Expr): var = self.args.input(name) load_counts = self._load_counts load_counts[name] += 1 make_line: Callable[[str], Union[str, DelayReplaceLine]] = identity indirect_indexing = self.is_indirect_indexing(index) original_index = index indexing = self.indexing(index, block_ptr=True) has_rindex = indexing.has_rindex() has_tmpmask = indexing.has_tmpmask() # Keep the variable in cache if were going to reuse it. Equiv., if any of the following hold # 1) We are doing broadcasting # 2) It is a non-coalesced load. The intuition is that if it's # non-coalesced, we will likely load each element multiple times in # practice. # 3) It will be used later and it won't be CSE'd. Equiv., if all the following hold # 3.1) We are in a reduction loop # 3.2) Its not its last use # 3.3) This load will not be lifted to the body # is_coalesced = any( i == 1 for i in self.get_strides_of_load(original_index).values() ) if self.is_broadcasted(original_index): ep = ", eviction_policy='evict_last'" elif not is_coalesced: ep = ", eviction_policy='evict_last'" elif self.inside_reduction and self.range_trees[-1].is_loop: def decide_later(): if load_counts[name] > expected_count and ( has_rindex or indirect_indexing ): return "evict_last" return "evict_first" expected_count = load_counts[name] ep = ", eviction_policy='<EP>'" make_line = functools.partial(DelayReplaceLine, "<EP>", decide_later) else: ep = "" if (has_tmpmask or has_rindex) and indexing.has_mask(): if self._load_other: other = f", other={constant_repr(self._load_other)}" else: other = ", other=0.0" else: other = "" advance_block_ptr = None append_broadcast = None dtype = V.graph.get_dtype(name) if should_unwrap_unspec_arg(name): line = var else: if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing, other ) line = f"tl.load({block_ptr}{other}{ep})" line = indexing.codegen_broadcast_and_reshape( line, indexing.block_shape, indexing.final_shape, True ) elif isinstance(original_index, sympy.Integer): line = f"tl.load({var} + ({original_index}))" append_broadcast = indexing.expand_str else: line = f"tl.load({var} + ({indexing.index_str}), {indexing.mask_str}{ep}{other})" if ( dtype in (torch.float16, torch.bfloat16) and config.triton.codegen_upcast_to_fp32 ): line += ".to(tl.float32)" dtype = torch.float32 if dtype == torch.bool and torch.version.hip is None: # Workaround for https://github.com/openai/triton/issues/2151 # tl.load returns int8 when loading from pointer to int1 # NOTE: Currently causes hangs on bool UTs for ROCm line += ".to(tl.int1)" dtype = torch.bool load_buffer = self.get_load_buffer(indexing) result_var = self.cse.generate(load_buffer, make_line(line), dtype=dtype) if result_var.use_count > 1: load_counts[name] -= 1 # don't double count cache hit assert isinstance(result_var, TritonCSEVariable) result_var.mask_vars = indexing.mask_vars # type: ignore[assignment] if append_broadcast: line = f"tl.broadcast_to({result_var}, {append_broadcast})" result_var = self.cse.generate(load_buffer, line, dtype=dtype) if advance_block_ptr: load_buffer.writeline(advance_block_ptr) if not self.inside_reduction or (not indexing.has_rmask() and not has_rindex): self.outside_loop_vars.add(result_var) return result_var def store( self, name: str, index: sympy.Expr, value: CSEVariable, mode: StoreMode = None ) -> None: var = self.args.output(name) original_index = index indexing = self.indexing(index, dense_indexing=True, block_ptr=mode is None) # Guard against write-after-read corruption in triton. # See # https://github.com/openai/triton/issues/1615 # This triton bug means that a load which is broadcasted over multiple # warps may see the result of a store that happens later in the triton # program. The workaround is to add a barrier before storing, which # enforces that all warps have already read the data. is_inplace = name in self.args.inplace_buffers is_broadcasted = self.is_broadcasted(original_index) if is_inplace and is_broadcasted: self.stores.writeline(DeferredLine(name, "tl.debug_barrier()")) advance_block_ptr = None if isinstance(indexing, BlockPtrOptions): block_ptr, advance_block_ptr, other = self.codegen_block_ptr( name, var, indexing ) # block_ptr stores don't do implicit casting line = self.codegen_block_ptr_store_line( name, indexing, block_ptr, value, other ) elif mode is None: line = f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})" elif mode == "atomic_add": line = f"tl.atomic_add({var} + ({indexing.index_str}), {value}, {indexing.mask_str}, sem='relaxed')" else: raise NotImplementedError(f"store mode={mode}") exit_stack = contextlib.ExitStack() if not self.inside_reduction and self.cooperative_reduction: exit_stack.enter_context(self.guard_cooperative_store(name, self.stores)) self.stores.writeline(DeferredLine(name, line)) if advance_block_ptr: self.stores.writeline(advance_block_ptr) if not self.inside_reduction: self.outside_loop_vars.add(value) exit_stack.close() def guard_cooperative_store(self, name, buffer): """ For cooperative reductions only one thread block should write out the result. We rotate which thread block does each write for better parallelism """ idx = self.cooperative_reduction_workspace_cache.increment_store_count() buffer.writeline(DeferredLine(name, f"if rsplit_id == ({idx} % RSPLIT):")) return buffer.indent() def bucketize( self, values: CSEVariable, boundaries: Tuple[str, sympy.Expr, sympy.Expr, sympy.Expr], boundary_indices: CSEVariable, indexing_dtype: torch.dtype, right: bool, sorter: Optional[Tuple[str, sympy.Expr]] = None, sorter_indices: Optional[CSEVariable] = None, ) -> CSEVariable: """ See [Note: Inductor bucketize op] """ # Triton performance for bucketize_binary_search is much better when the number # of threads equals the number of elements. # If we're trying to use a bucketize kernel, we should make sure that an # autotuning config with num_elements_per_warp=(warp_size) exists. self.autotune_hints.add(AutotuneHint.ONE_ELEMENT_PER_THREAD) boundaries_ptr = self.args.input(boundaries[0]) boundary_size = self.index_to_str(boundaries[1]) boundaries_underlying_numel = self.index_to_str(boundaries[2]) boundary_stride = self.index_to_str(boundaries[3]) sorter_ptr = self.args.input(sorter[0]) if sorter else "None" sorter_stride = self.index_to_str(sorter[1]) if sorter else "None" block_size = self.dense_size_str() if indexing_dtype == torch.int32: triton_dtype = "tl.int32" elif indexing_dtype == torch.int64: triton_dtype = "tl.int64" else: raise NotImplementedError( "Bucketize only supports indexing with int32 and int64" ) result = self.cse.generate( self.compute, f"triton_helpers.bucketize_binary_search({values}, " f"{boundaries_ptr}, {boundary_size}, {boundaries_underlying_numel}, {boundary_stride}, " f"{boundary_indices}, " f"{triton_dtype}, " f"{right}, " f"{sorter_ptr}, {sorter_stride}, " f"{sorter_indices}, " f"{block_size}, " ")", dtype=indexing_dtype, # type: ignore[attr-defined] ) return result def reduction_resize(self, value): ndims = self.triton_tensor_ndim() if ndims == 1: return f"triton_helpers.promote_to_tensor({value})" sizes = [":"] * ndims sizes[-1] = "None" return f"{value}[{', '.join(sizes)}]" def reduction( self, dtype: torch.dtype, src_dtype: torch.dtype, reduction_type: ReductionType, value: Union[CSEVariable, Tuple[CSEVariable, ...]], ) -> Union[CSEVariable, Tuple[CSEVariable, ...]]: assert self.inside_reduction masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) if self._load_mask: masks.append(self._load_mask) reduction_range_prefix = self.range_trees[-1].prefix # Say we have # tmp0 = ops.constant(1, torch.int64) # tmp1 = ops.reduction(torch.int64, torch.int64, "sum", tmp0) # tmp0 in the triton code is either a scalar, or single-element tensor # so if we emit tl.sum directly, it will only give 1 instead of RBLOCK * 1 # To avoid this, we broadcast to the expected shape first. dense_size_str = self.dense_size_str() value = self._map_tuple_or_scalar( lambda v: self.cse.generate( self.compute, f"tl.broadcast_to({v}, {dense_size_str})", dtype=v.dtype, ), value, ) dim: int root_op: str def final_reduction(value): use_helper = reduction_type in {"any", "max", "min", "prod"} module = "triton_helpers" if use_helper else "tl" if reduction_type in {"max", "min"}: return self.reduction_resize( f"{module}.{reduction_type}2({value}, {dim})" ) return self.reduction_resize(f"{module}.{reduction_type}({value}, {dim})") def final_argreduce(buffer, result_var, value, index): buffer.splice( f"""\ {result_var}_val, {result_var}_idx = triton_helpers.{root_op}_with_index({value}, {index}, {dim}) {result_var} = {self.reduction_resize(f'{result_var}_idx')} """ ) cache_key = (src_dtype, reduction_type, value) if cache_key in self.cse.reduction_cache: return self.cse.reduction_cache[cache_key] dim = self.triton_tensor_ndim() - 1 acc_type = triton_acc_type(src_dtype) torch_acc_type = upcast_acc_dtype(src_dtype) result_var: Any = self.cse.newvar(dtype=torch_acc_type) result_var.mask_vars = OrderedSet( var for var in masks if not prefix_is_reduction(var[0]) ) cond = " & ".join(masks) def where_cond(tval, fval): if not cond: return tval return TritonKernelOverrides.where(cond, tval, fval) if self.persistent_reduction: default = ir.Reduction.default_value(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) def _mask_value(value, default): return self.cse.generate( self.compute, where_cond(value, default), dtype=value.dtype ) if isinstance(value, tuple): masked_value = [_mask_value(v, d) for v, d in zip(value, default)] else: masked_value = _mask_value(value, default) if reduction_type in {"argmax", "argmin"}: accumulator_index = str( self.cse.generate( self.compute, f"tl.broadcast_to({reduction_range_prefix}index, {masked_value}.shape)", dtype=torch.int64, ) ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] final_argreduce( self.compute, result_var, masked_value, accumulator_index ) elif reduction_type == "welford_reduce": if self.cooperative_reduction: # cooperative reductions require full welford for correctness result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: # For persistent reductions, don't bother with # welford's algorithm since it uses more registers, and # taking two reductions doesn't increase memory usage. result_var = self.welford_reduce_fallback(dtype, value) elif reduction_type == "welford_combine": mean, m2, weight = masked_value welford = f"triton_helpers.welford({mean}, {m2}, {weight}, {dim})" mean, m2, weight = (self.cse.newvar(dtype=dtype) for _ in range(3)) self.compute.writeline(f"{mean}, {m2}, {weight} = {welford}") result_var = tuple( self.cse.generate( self.compute, self.reduction_resize(var_name), dtype=dtype ) for var_name in (mean, m2, weight) ) else: result_var = self.cse.generate( self.compute, final_reduction(masked_value), dtype=dtype ) else: accumulator = self.cse.namedvar(f"_{result_var}", dtype=torch_acc_type) default = ir.Reduction.default_accumulator(reduction_type, src_dtype) default = self._map_tuple_or_scalar(constant_repr, default) if not isinstance(default, tuple): self.body.writeline( f"{accumulator} = tl.full({self.dense_size_str()}, {default}, {acc_type})" ) if reduction_type in {"argmax", "argmin"}: accumulator_index = f"_{result_var}_index" long_max = torch.iinfo(torch.int64).max self.body.writeline( f"{accumulator_index} = tl.full({self.dense_size_str()}, {long_max}, tl.int64)" ) root_op = {"argmax": "max", "argmin": "min"}[reduction_type] self.compute.splice( f"""\ {accumulator}_next, {accumulator_index}_next = triton_helpers.{root_op}imum_with_index( {accumulator}, {accumulator_index}, {value}, {reduction_range_prefix}index ) {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_index} = {where_cond(f'{accumulator_index}_next', accumulator_index)} """ ) final_argreduce( self.post_loop_combine, result_var, accumulator, accumulator_index ) elif is_welford_reduction(reduction_type): result_var = self.welford_reduce( result_var, reduction_type, value, where_cond, acc_type, dtype ) else: combine_fn = ir.get_reduction_combine_fn(reduction_type, src_dtype) updated = combine_fn(accumulator, value) self.compute.writeline( f"{accumulator} = {where_cond(updated, accumulator)}" ) if src_dtype == torch.bool: # This is only really used for aten.any. It changes the # final reduction of a non-persistent reduction from # tmp5 = triton_helpers.max(_tmp5, 1)[:, None] # to # tmp5 = triton_helpers.max(_tmp5.to(tl.int8), 1)[:, None].to(tl.int1) # which is needed because tl.reduce doesn't support tl.int1 accumulator_casted_str = f"{accumulator}.to(tl.int8)" result_type = triton_compute_type(dtype) self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator_casted_str)}.to({result_type})" ) else: self.post_loop_combine.writeline( f"{result_var} = {final_reduction(accumulator)}" ) if self.cooperative_reduction: exit_stack = contextlib.ExitStack() for buf in (self.post_loop_combine, self.post_loop_store): # only do cooperative reduction combines if we have more than one thread block buf.writeline("if RSPLIT > 1:") exit_stack.enter_context(buf.indent()) if reduction_type in {"argmax", "argmin"}: self.post_loop_combine.writeline( f"{result_var}_bval = {self.reduction_resize(f'{result_var}_val')}" ) peer_val = self.codegen_cooperative_reduction_peer_combine( f"{result_var}_bval", src_dtype ) peer_idx = self.codegen_cooperative_reduction_peer_combine( result_var, dtype ) final_argreduce(self.post_loop_store, result_var, peer_val, peer_idx) elif is_welford_reduction(reduction_type): assert reduction_type == "welford_reduce" result_mean, result_m2, result_weight = result_var peer_mean = self.codegen_cooperative_reduction_peer_combine( result_mean, upcast_acc_dtype(src_dtype) ) peer_m2 = self.codegen_cooperative_reduction_peer_combine( result_m2, upcast_acc_dtype(src_dtype) ) peer_weight = self.codegen_cooperative_reduction_peer_combine( result_weight, upcast_acc_dtype(src_dtype) ) self.welford_reduce_final_reduction( self.post_loop_store, result_mean, result_m2, result_weight, peer_mean, peer_m2, peer_weight, dim, ) else: peers = self.codegen_cooperative_reduction_peer_combine( result_var, upcast_acc_dtype(src_dtype) ) self.post_loop_store.writeline( f"{result_var} = {final_reduction(peers)}" ) exit_stack.close() self.cse.reduction_cache[cache_key] = result_var if isinstance(result_var, tuple): assert all(isinstance(x, TritonCSEVariable) for x in result_var) self.outside_loop_vars |= OrderedSet(result_var) else: assert isinstance(result_var, TritonCSEVariable) self.outside_loop_vars.add(result_var) return result_var def welford_reduce( self, result_var, reduction_type, value, where_cond, acc_type, dtype ): """Helper to codegen a welford reduction""" dim = self.triton_tensor_ndim() - 1 accumulator = f"{result_var}_mean" accumulator_m2 = f"{result_var}_m2" accumulator_weight = f"{result_var}_weight" self.body.writeline( f"{accumulator} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_m2} = tl.zeros({self.dense_size_str()}, {acc_type})" ) self.body.writeline( f"{accumulator_weight} = tl.zeros({self.dense_size_str()}, {acc_type})" ) if reduction_type == "welford_combine": mean, m2, weight = value self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_combine( {accumulator}, {accumulator_m2}, {accumulator_weight}, {mean}, {m2}, {weight} ) """ ) else: assert reduction_type == "welford_reduce" self.compute.splice( f"""\ {accumulator}_next, {accumulator_m2}_next, {accumulator_weight}_next = triton_helpers.welford_reduce( {value}, {accumulator}, {accumulator_m2}, {accumulator_weight}, roffset == 0 ) """ ) self.compute.splice( f"""\ {accumulator} = {where_cond(f'{accumulator}_next', accumulator)} {accumulator_m2} = {where_cond(f'{accumulator_m2}_next', accumulator_m2)} {accumulator_weight} = {where_cond(f'{accumulator_weight}_next', accumulator_weight)} """ ) result_mean = result_var result_m2 = self.cse.newvar(dtype=dtype) result_weight = self.cse.newvar(dtype=dtype) return self.welford_reduce_final_reduction( self.post_loop_combine, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ) def welford_reduce_final_reduction( self, buf, result_mean, result_m2, result_weight, accumulator, accumulator_m2, accumulator_weight, dim, ): """Helper to codegen call to triton_helpers.welford""" buf.splice( f"""\ {result_mean}_tmp, {result_m2}_tmp, {result_weight}_tmp = triton_helpers.welford( {accumulator}, {accumulator_m2}, {accumulator_weight}, {dim} ) {result_mean} = {self.reduction_resize(f'{result_mean}_tmp')} {result_m2} = {self.reduction_resize(f'{result_m2}_tmp')} {result_weight} = {self.reduction_resize(f'{result_weight}_tmp')} """ ) return result_mean, result_m2, result_weight def max_rsplit(self): if self.fixed_config: return self.fixed_config["RSPLIT"] return TRITON_MAX_RSPLIT def codegen_cooperative_reduction_peer_combine(self, result_var, dtype): """ Generate code to save a [XBLOCK, RSPLIT] temporary workspace, where each thread block writes a different column. After the barrier, every thread block loads the completed value so that it can compute the final value independently. """ xnumel = self.numels["x"] mask = "xindex < xnumel" if xnumel != 1 and not self.no_x_dim else None expand = "" if self.no_x_dim else "[None,:]" nbytes = xnumel * dtype.itemsize * self.max_rsplit() ws_name, ws_offset = self.cooperative_reduction_workspace_cache.allocate(nbytes) self.post_loop_combine.splice( f""" {result_var}_ws = ({ws_name} + {self.index_to_str(ws_offset)}).to(tl.pointer_type({triton_type(dtype)})) tl.store({result_var}_ws + (xindex * RSPLIT + rsplit_id), {result_var}, {mask}) """, strip=True, ) self.post_loop_store.writeline( f"{result_var}_peers = tl.load({result_var}_ws + (xindex * RSPLIT + tl.arange(0, RSPLIT){expand}), " f"{mask}, eviction_policy='evict_first')" ) return f"{result_var}_peers" def store_reduction(self, name: str, index: sympy.Expr, value: CSEVariable): assert self.inside_reduction self.inside_reduction = False indexing = self.indexing(index, block_ptr=True) self.inside_reduction = True var = self.args.output(name) exit_stack = contextlib.ExitStack() if self.cooperative_reduction: exit_stack.enter_context( self.guard_cooperative_store(name, self.post_loop_store) ) if isinstance(indexing, BlockPtrOptions): self.post_loop_store.writeline( DeferredLine( name, self.codegen_block_ptr_store_line( name, indexing, indexing.format(var), value, f", boundary_check={indexing.boundary_check()!r}", ), ) ) else: assert isinstance(indexing, IndexingOptions) self.post_loop_store.writeline( DeferredLine( name, f"tl.store({var} + ({indexing.index_str}), {value}, {indexing.mask_str})", ) ) exit_stack.close() def _lift_helper(self, fn, num_args) -> str: # Lift IR function for scan operations into a triton function # in the global namespace helper = IndentedBuffer() helper.writeline("@triton.jit") args = [tuple(f"arg{i}_{n}" for n in range(num_args)) for i in range(2)] signature = ", ".join(itertools.chain.from_iterable(args)) helper.writeline(f"def {{name}}({signature}):") cse = CSE(prefix="", suffix="") overrides = TritonOverrides(V.MockHandler()) # Build a name that changes depending on fn to workaround a triton bug # where the combine_fn to reduce and scan is not hashed, and so different # scan ops may collide in the triton cache. # This is fixed with the latest triton pin, but not the triton-rocm pin. helper_name = "_triton_helper_fn" class CSEProxy: def __getattr__(self, name: str) -> Callable[..., CSEVariable]: def inner(*args, **kwargs): nonlocal helper_name helper_name += f"_{name}" return cse.generate( helper, getattr(overrides, name)(*args, **kwargs), dtype=torch.float32, ) return inner with helper.indent(), V.set_ops_handler(CSEProxy()): outputs = fn(*args) outputs = ", ".join(str(output) for output in outputs) helper.writeline(f"return {outputs}") return self.helper_functions.add(helper.getvalue(), base_name=helper_name) def scan( self, dtypes: Tuple[torch.dtype, ...], combine_fn: Callable[ [Tuple[CSEVariable, ...], Tuple[CSEVariable, ...]], Tuple[CSEVariable, ...] ], values: Tuple[CSEVariable, ...], ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.scan not supported inside ops.masked" broadcasted_values = [] accumulators = [] cse_compute = functools.partial(self.cse.generate, self.compute) combine_helper_fn = self._lift_helper(combine_fn, len(values)) dim = self.triton_tensor_ndim() - 1 for value, dtype in zip(values, dtypes): value_dtype = self.cse.generate( self.compute, f"{value}.to({triton_compute_type(dtype)})", dtype=upcast_compute_type(dtype), ) value = self.cse.generate( self.compute, f"tl.broadcast_to({value_dtype}, {self.dense_size_str()})", dtype=upcast_compute_type(dtype), ) broadcasted_values.append(value) acc_type = triton_acc_type(dtype) if not self.persistent_reduction: accumulator = self.cse.newvar(dtype=upcast_compute_type(dtype)) reduced_size = self.dense_size_list() reduced_size[-1] = "1" reduced_size = f"[{', '.join(reduced_size)}]" default = "float('nan')" if dtype.is_floating_point else "-1" self.body.writeline( f"{accumulator} = tl.full({reduced_size}, {default}, {acc_type})" ) accumulators.append(accumulator) def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, values, masks, dtypes): n = len(values) cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=_dtype) for _dtype in dtypes] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) partial_scan_vars = cse_multiple( f"tl.associative_scan(({csv(broadcasted_values)}), {dim}, {combine_helper_fn})", values, masks, (upcast_compute_type(dtype) for dtype in dtypes), ) if not self.persistent_reduction: # tl.reduce doesn't work for non-commutative operators, so instead # of repeating the scan op as a reduction, we use sum to select the # last scan value partial_reduce_vars = [ cse_compute( f"triton_helpers.select_one(({partial_scan_var}), rbase == (RBLOCK - 1), dim=-1, keep_dims=True)", dtype=upcast_compute_type(partial_scan_var.dtype), ) for partial_scan_var in partial_scan_vars ] accs_next = combine_fn(tuple(accumulators), tuple(partial_reduce_vars)) full_scan_vars = combine_fn(tuple(accumulators), partial_scan_vars) result_vars = [ cse_compute( f"tl.where(roffset > 0, {full_scan}, {partial_scan})", dtype=partial_scan.dtype, ) for full_scan, partial_scan in zip(full_scan_vars, partial_scan_vars) ] for acc_next, accumulator, partial_reduce in zip( accs_next, accumulators, partial_reduce_vars ): self.compute.writeline( f"{accumulator} = tl.where(roffset > 0, {acc_next}, {partial_reduce})" ) else: result_vars = partial_scan_vars for result_var in result_vars: result_var.mask_vars = masks # type: ignore[attr-defined] return tuple(result_vars) def sort( self, dtypes: Tuple[torch.dtype, ...], values: Tuple[CSEVariable, ...], stable: bool, descending: bool, ) -> Tuple[CSEVariable, ...]: assert self.inside_reduction assert not self.cooperative_reduction, "TODO" masks = OrderedSet(f"{tree.prefix}mask" for tree in self.range_trees) self.filter_masks(masks) masks = sorted(masks) assert not self._load_mask, "ops.sort not supported inside ops.masked" assert ( self.persistent_reduction ), "ops.sort is only supported in persistent reductions" reduction_range_prefix = self.range_trees[-1].prefix cse_compute = functools.partial(self.cse.generate, self.compute) dim = self.triton_tensor_ndim() - 1 assert len(dtypes) == len(values) broadcasted_values = [ cse_compute( f"tl.broadcast_to({value}, {self.dense_size_str()})", dtype=dtypes[i] ) for i, value in enumerate(values) ] def csv(values): return " ".join(f"{value}," for value in values) def cse_multiple(line, n, masks, dtypes): cache_keys = [f"{line}, {i}, {masks}" for i in range(n)] if all(self.cse.contains(cache_key) for cache_key in cache_keys): return [self.cse.get(cache_key) for cache_key in cache_keys] result_vars = [self.cse.newvar(dtype=dtypes[i]) for i in range(n)] # type: ignore[attr-defined] self.compute.writeline( f"{csv(result_vars)} = {line}", ) for result_var, cache_key in zip(result_vars, cache_keys): if masks: result_var.mask_vars = masks # type: ignore[attr-defined] self.cse.put(cache_key, result_var) return tuple(result_vars) assert self.range_trees[-1].is_reduction rnumel = "None" if self._has_constant_mask(self.range_trees[-1]) else "rnumel" if len(values) == 2: line = ( f"triton_helpers.sort_with_index({broadcasted_values[0]}, {broadcasted_values[1]}," f" {rnumel}, {dim}, stable={stable}, descending={descending})" ) result_vars = cse_multiple(line, len(values), masks, dtypes) else: raise AssertionError("Unhandled sort") for result_var, input_var in zip(result_vars, values): result_var.mask_vars = masks # type: ignore[attr-defined] result_var.bounds = input_var.bounds return tuple(result_vars) def codegen_body(self): """ Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis. """ if not ( self.indexing_code or self.loads or self.stores or self.compute or self.post_loop_combine or self.post_loop_store ): return if self.inside_reduction and self.range_trees[-1].is_loop: if self.cooperative_reduction: self.body.writeline( "for roffset in range(rsplit_start, rsplit_end, RBLOCK):" ) else: self.body.writeline("for roffset in range(0, rnumel, RBLOCK):") with self.body.indent(): # last range tree is always reduction self.iteration_ranges_codegen_header(self.range_trees[-1], self.body) self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) # invalidate any caches that came from inside the reduction loop self.cse.invalidate(self.outside_loop_vars) self.range_trees[-1].cache_clear() else: self.body.splice(self.indexing_code) self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.body.splice(self.post_loop_combine) if self.cooperative_reduction and ( self.post_loop_combine or self.post_loop_store ): sem_ptr = f"{self.semaphores_name} + tl.program_id(1)" self.body.splice( f""" if RSPLIT > 1: triton_helpers.x_grid_barrier({sem_ptr}) """, strip=True, ) self.cooperative_reduction_workspace_cache.on_loop_end() self.body.splice(self.post_loop_store) self.indexing_code.clear() self.loads.clear() self.compute.clear() self.stores.clear() self.post_loop_combine.clear() self.post_loop_store.clear() def codegen_kernel_benchmark(self, num_gb, grid=None): result = IndentedBuffer() argdefs, call_args, signature, _ = self.args.python_argdefs() result.writelines(["", "", "def get_args():"]) with result.indent(): name_cnt = itertools.count() var_names = [] for arg_name, arg_sig in zip(call_args, signature): var_name = f"arg_{next(name_cnt)}" buf = V.graph.try_get_buffer(arg_name) if buf: result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(buf.get_size())}, {V.graph.sizevars.size_hints(buf.get_stride())}, device='{buf.get_device()}', dtype={buf.get_dtype()})" # noqa: B950 line too long ) elif arg_name in V.graph.constants: # note that random seed is put in V.graph.constants const_tensor = V.graph.constants[arg_name] result.writeline( f"{var_name} = rand_strided({V.graph.sizevars.size_hints(const_tensor.size())}, {V.graph.sizevars.size_hints(const_tensor.stride())}, device='{const_tensor.device}', dtype={const_tensor.dtype})" # type: ignore[arg-type] # noqa: B950 line too long ) elif isinstance(arg_sig, SizeArg): symval_hint = V.graph.sizevars.size_hint(arg_sig.expr) # Force the seed_offset to be 0 so calls to the same kernel # using different seed offset will have the same benchmark harness. # We can dedup kernel definitions in this case. if "seed_offset" in arg_sig.name: symval_hint = 0 result.writeline(f"{var_name} = {symval_hint}") elif isinstance(arg_sig, WorkspaceArg): device = V.graph.get_current_device_or_throw() count = V.graph.sizevars.size_hint(arg_sig.count) result.writeline( f"{var_name} = torch.zeros({count}, device='{device}', dtype={arg_sig.dtype})" ) else: raise KeyError( f"Don't find the buffer or const tensor for {arg_name}" ) var_names.append(var_name) result.writeline(f"return {', '.join(var_names)},") result.writelines(["\n", "\n", "def call(args):"]) if grid is None: grid = [] extra_args = [] extra_args_str = None for tree in self.active_range_trees(): expr = pexpr(V.graph.sizevars.size_hint(tree.numel)) extra_args.append(expr) if not tree.is_reduction: grid.append(expr) if self.need_numel_args(): extra_args_str = ", ".join(map(str, extra_args)) + ", " else: extra_args_str = "" grid_arg = f"{extra_args_str}grid=grid({', '.join(grid)})" else: grid_arg = f"grid={grid}" current_device = V.graph.get_current_device_or_throw() index = current_device.index with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context stream_name = f"stream{index}" result.writeline(f"{stream_name} = get_raw_stream({index})") result.writeline( f"{str(Placeholder.KERNEL_NAME)}.run(*args, {grid_arg}, stream={stream_name})" ) # benchmark all configs result.writelines(["\n", "\n", "def benchmark_all_configs(args):"]) with result.indent(): result.writeline(f"with {V.graph.device_ops.device_guard(index)}:") with result.indent(): result.writeline( V.graph.device_ops.set_device(index) ) # no-op to ensure context result.writeline( f"return {str(Placeholder.KERNEL_NAME)}.benchmark_all_configs(*args, {grid_arg})" ) result.writelines(["\n", "\n", "if __name__ == '__main__':"]) with result.indent(): result.writeline( "from torch._inductor.runtime.benchmarking import benchmarker" ) result.writeline("") result.writeline("args = get_args()") result.writeline( "ms = benchmarker.benchmark_gpu(lambda: call(args), rep=40)" ) result.writeline(f"num_gb = {num_gb}") result.writeline("gb_per_s = num_gb / (ms / 1e3)") result.writeline( 'print(f"{ms:.3f}ms {num_gb:.3f}GB {gb_per_s:.2f}GB/s")' ) return result def imports_for_benchmark_kernel(self): return textwrap.dedent( """ from torch._dynamo.testing import rand_strided {} import torch from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid """.format( V.graph.device_ops.import_get_raw_stream_as("get_raw_stream") ) ) def _get_heuristic(self): if self.fixed_config: return "fixed_config" elif self.cooperative_reduction: return "cooperative_reduction" elif self.persistent_reduction: assert self.inside_reduction return "persistent_reduction" elif self.inside_reduction: return "reduction" return "pointwise" @staticmethod def inductor_meta_common(): inductor_meta = { "backend_hash": torch.utils._triton.triton_hash_with_backend(), "are_deterministic_algorithms_enabled": torch.are_deterministic_algorithms_enabled(), "assert_indirect_indexing": config.assert_indirect_indexing, "autotune_local_cache": config.autotune_local_cache, "autotune_pointwise": config.triton.autotune_pointwise, "autotune_remote_cache": config.autotune_remote_cache, "force_disable_caches": config.force_disable_caches, "dynamic_scale_rblock": config.dynamic_scale_rblock, "max_autotune": config.max_autotune, "max_autotune_pointwise": config.max_autotune_pointwise, "min_split_scan_rblock": config.triton.min_split_scan_rblock, "spill_threshold": config.triton.spill_threshold, "store_cubin": config.triton.store_cubin, } if torch.version.hip is not None: inductor_meta["is_hip"] = True if config.is_fbcode(): inductor_meta["is_fbcode"] = True if config.profile_bandwidth: inductor_meta["profile_bandwidth"] = config.profile_bandwidth inductor_meta["profile_bandwidth_regex"] = config.profile_bandwidth_regex inductor_meta["profile_bandwidth_output"] = config.profile_bandwidth_output inductor_meta[ "profile_bandwidth_with_do_bench_using_profiling" ] = config.profile_bandwidth_with_do_bench_using_profiling if config.coordinate_descent_tuning: inductor_meta[ "coordinate_descent_tuning" ] = config.coordinate_descent_tuning inductor_meta[ "coordinate_descent_search_radius" ] = config.coordinate_descent_search_radius inductor_meta[ "coordinate_descent_check_all_directions" ] = config.coordinate_descent_check_all_directions return inductor_meta def codegen_kernel(self, name=None): code = IndentedBuffer() size_hints = [] for numel in self.numels.values(): numel_hint = V.graph.sizevars.symbolic_hint(numel) if not isinstance(numel_hint, (int, sympy.Integer)): # This default heuristic hint was picked carefully: it is # large, to ensure that we don't shrink the block size (since # if you don't have many elements, it'd be wasteful to pick a # large block size). Since we don't know how many elements we # might have, we should be OK with some inefficiency to make # sure we handle the large case well. 8192 is the largest # block size we support, so we pick that. # # If we have a better hint for unbacked SymInts (e.g., because # a user told us, or we are tracking upper bounds) we could # use that here. size_hint = 8192 else: size_hint = next_power_of_2(int(numel_hint)) size_hints.append(size_hint) if not self.inside_reduction: size_hints.pop() if name is None: code.splice(gen_common_triton_imports()) device_type = V.graph.get_current_device_or_throw().type if device_type == "cpu": code.splice("triton_helpers.set_driver_to_cpu()") else: code.splice("triton_helpers.set_driver_to_gpu()") if config.benchmark_kernel: code.splice(self.imports_for_benchmark_kernel()) argdefs, _, signature, _ = self.args.python_argdefs() # maps actual expression to SizeArg if it is in sizevars replacements for i, arg in enumerate(signature): if isinstance(arg, SizeArg): # mypy is unhappy about the sympy.Expr # type for the key of the dict below symbol = cast(sympy.Symbol, arg.expr) if symbol in V.graph.sizevars.inv_precomputed_replacements: signature[i] = SizeArg( arg.name, V.graph.sizevars.inv_precomputed_replacements[symbol] ) mutated_args: OrderedSet[str] = OrderedSet() for mutation in self.mutations: if mutation in self.args.input_buffers: mutated_args.add(self.args.input_buffers[mutation]) if ( mutation in self.args.inplace_buffers and mutation not in V.graph.removed_buffers and mutation not in self.removed_buffers ): mutated_args.add(self.args.inplace_buffers[mutation].inner_name) if mutation in self.args.output_buffers: mutated_args.add(self.args.output_buffers[mutation]) # Note: [Workspace Mutation] # workspace arguments are mutated, but are not marked as mutations in self.mutations # because their buffers are added during codegen, and aren't tracked during # lowering/scheduling. So we add them as mutated_args explicitly below. # # In the logic below, we only mark the workspaces a mutated if they are marked with # zero_fill: that's because, if we don't expect the buffer to be pre-filled with # zeros, then, although we still mutate the data, we don't care about those # mutations because we don't make any assumptions about the contents of the # workspace buffer. Similarly, ZERO_PER_GRAPH requires the kernel to return # the buffer back to its original state. for argname, arg in zip(argdefs, signature): if ( isinstance(arg, WorkspaceArg) and arg.zero_mode == WorkspaceZeroMode.ZERO_ON_CALL ): mutated_args.add(argname) mutated_args = sorted(mutated_args) triton_meta_signature = signature_to_meta( signature, size_dtype=self.index_dtype, argdefs=argdefs ) triton_meta = { "signature": triton_meta_signature, "device": DeviceProperties.create(V.graph.get_current_device_or_throw()), "constants": {}, } # Skip memory optimization for forward of the training loop where we expect # every new node will increase the peak memory and our greedy approach would # introduce a lot of unnecessary cpu copies. optimize_mem = V.graph.is_inference or V.graph.is_backward inductor_meta = { "autotune_hints": set(self.autotune_hints), "kernel_name": str(Placeholder.DESCRIPTIVE_NAME), "mutated_arg_names": mutated_args, "optimize_mem": optimize_mem, "no_x_dim": self.no_x_dim, "num_load": self.num_load, "num_reduction": self.num_reduction, **self.inductor_meta_common(), } if self.cooperative_reduction: inductor_meta["persistent_reduction"] = self.persistent_reduction num_gb = None if config.benchmark_kernel or config.profile_bandwidth: num_gb = self.estimate_kernel_num_bytes() / 1e9 inductor_meta["kernel_num_gb"] = num_gb for tree in self.active_range_trees(): sizearg = SizeArg(f"{tree.prefix}numel", tree.numel) signature.append(sizearg) triton_meta_signature[sizearg.name] = signature_of( sizearg, size_dtype=self.index_dtype ) argdefs.append(f"{tree.prefix}numel") # constexpr version causes issues, see # https://github.com/pytorch/torchdynamo/pull/1362 # triton_meta["constants"][len(argdefs)] = V.graph.sizevars.size_hint( # tree.numel # ) # argdefs.append(f"{tree.prefix}numel: tl.constexpr") triton_meta["configs"] = [config_of(signature)] # Triton compiler includes equal_to_1 args into constants even # when they are not constexpr. otherwise there may be a segfault # during launching the Inductor-compiled Triton kernel. # https://github.com/pytorch/pytorch/issues/120478#issuecomment-1962822307 # https://github.com/openai/triton/blob/231efe9ed2d200be0f69a07c298e4342b08efe3d/python/triton/runtime/jit.py#L384 for arg_num in triton_meta["configs"][0].equal_to_1: # type: ignore[index] triton_meta["constants"][signature[arg_num].name] = 1 # type: ignore[index] self.triton_meta = triton_meta for tree in self.range_trees: if tree.is_reduction and self.persistent_reduction: # RBLOCK for persistent_reduction is defined in codegen_static_numels continue if tree.tensor_dim is None: continue argdefs.append(f"{tree.prefix.upper()}BLOCK : tl.constexpr") if self.cooperative_reduction: argdefs.append("RSPLIT : tl.constexpr") self.codegen_body() for helper in self.helper_functions: code.writeline("") code.splice(helper) if self.fixed_config: heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( config={self.fixed_config.config!r}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ elif self.inside_reduction: reduction_hint = self.features.get_reduction_hint() heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, reduction_hint={reduction_hint}, filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r} ) @triton.jit """ else: tile_hint = "" if len(size_hints) == 2: if len(signature) == 4: # input, output and 2 args tile_hint = "tile_hint=TileHint.SQUARE," else: tile_hint = "tile_hint=TileHint.DEFAULT," heuristics_line = f""" @triton_heuristics.{self._get_heuristic()}( size_hints={size_hints!r}, {tile_hint} filename=__file__, triton_meta={triton_meta!r}, inductor_meta={inductor_meta!r}, min_elem_per_thread={self.min_elem_per_thread} ) @triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
@triton.jit """ code.splice(heuristics_line) code.writeline( f"def {name or str(Placeholder.KERNEL_NAME)}({', '.join(argdefs)}):" ) with code.indent(): self.codegen_static_numels(code) for old, new in self.args.aliases(): code.writeline(f"{old} = {new}") code.splice(self.body) if config.benchmark_kernel: code.splice(self.codegen_kernel_benchmark(num_gb)) return code.getvalue() @staticmethod def _get_persistent_RBLOCK(rnumel): rnumel = V.graph.sizevars.simplify(rnumel) if isinstance(rnumel, (sympy.Integer, int)): val = int(rnumel) val = next_power_of_2(val) else: val = 128 while not V.graph.sizevars.statically_known_leq(rnumel, val): if val > 16 * 1024: raise ValueError(f"Failed to find static RBLOCK for {rnumel}") val *= 2 return val @staticmethod def has_persistent_RBLOCK(rnumel): try: TritonKernel._get_persistent_RBLOCK(rnumel) return True except ValueError: return False def codegen_static_numels(self, code): """ We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): We would add xnumel = 4096 rnumel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel. """ for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if isinstance(simplified_tree_numel, (sympy.Integer, int)): code.writeline(f"{tree.prefix}numel = {int(simplified_tree_numel)}") if tree.is_reduction and self.persistent_reduction: val = self._get_persistent_RBLOCK(tree.numel) if self.cooperative_reduction: val = f"{val} // RSPLIT" code.writeline(f"RBLOCK: tl.constexpr = {val}") if tree.prefix == "x" and self.no_x_dim: code.writeline("XBLOCK: tl.constexpr = 1") def _get_grid_fn_str(self): return self._get_grid_fn().__name__ def _get_grid_fn(self): if self.cooperative_reduction: return cooperative_reduction_grid return default_grid_fn def add_numel_to_call_args_and_grid(self, name, call_args, arg_types, grid): # TODO(jansel): if there are constants, we shouldn't bother passing them as args for tree in self.range_trees: if isinstance(tree.numel, (sympy.Integer, sympy.Symbol)): expr = tree.numel else: expr = V.graph.wrapper_code.generate_numel_expr(name, tree) if not tree.is_reduction or self.inside_reduction: call_args.append(expr) arg_types.append(type(expr)) if tree.grid_dim is not None: grid.append(expr) def call_kernel(self, name: str, node: Optional[IRNode] = None): wrapper = V.graph.wrapper_code wrapper.write_triton_header_once() _, call_args, _, arg_types = self.args.python_argdefs() grid: List[Any] = [] self.add_numel_to_call_args_and_grid(name, call_args, arg_types, grid) current_device = V.graph.get_current_device_or_throw() for ws in self.args.workspace_args: wrapper.generate_workspace_allocation(ws) grid = wrapper.generate_default_grid( name, grid, grid_callable=self._get_grid_fn() ) wrapper.generate_kernel_call( name, call_args, grid, current_device.index, gpu=current_device.type != "cpu", triton=True, arg_types=arg_types, grid_fn=self._get_grid_fn_str(), triton_meta=self.triton_meta, ) for ws in reversed(self.args.workspace_args): wrapper.generate_workspace_deallocation(ws) def codegen_nan_check(self): wrapper = V.graph.wrapper_code _, call_args, arg_signatures, _ = self.args.python_argdefs() for arg, arg_signature in zip(call_args, arg_signatures): if isinstance(arg_signature, TensorArg): if V.graph.cpp_wrapper: wrapper.writeline( f'AOTI_TORCH_ERROR_CODE_CHECK(aoti_torch_check_inf_and_nan("{arg}", {arg}));' ) else: line = f"assert not {arg}.isnan().any().item()" wrapper.writeline(line) line = f"assert not {arg}.isinf().any().item()" wrapper.writeline(line) def create_cse_var(self, *args, **kwargs): return TritonCSEVariable(*args, **kwargs) def codegen_iteration_ranges_entry(self, entry: IterationRangesEntry): line = f"{entry.name} = {self.kexpr(self.rename_indexing(entry.expr))}" if entry.root.is_loop: self.indexing_code.writeline(line) else: # lift non-reduction stores outside loop self.body.writeline(line) def iteration_ranges_ranges_code(self, entry): assert entry.tensor_dim is not None size = self.indexing_size_str(entry.tensor_dim) index_dtype = self.index_dtype suffix = f".to({index_dtype})" if index_dtype != "tl.int32" else "" if ( self.cooperative_reduction and self.persistent_reduction and entry.is_reduction ): suffix = f"{suffix} + rsplit_start" return f"tl.arange(0, {entry.prefix.upper()}BLOCK){size}{suffix}" def iteration_ranges_scalar_code(self, entry, value): index_dtype = self.index_dtype ndim = self.triton_tensor_ndim() size = [1] * ndim return f"tl.full({size}, {value}, {index_dtype})" def iteration_ranges_get_pid(self, entry): assert entry.grid_dim is not None key = f"tl.program_id({entry.grid_dim})" # y_grid has a limit, so express it in terms of y and z in case of overflow. # z grid is only exercised when max_tiles == 3 (off by default). if ( entry.grid_dim == 1 and not entry.has_zdim and not self.cooperative_reduction and not V.graph.sizevars.statically_known_leq(entry.numel, get_max_y_grid()) ): # For ynumel larger than max_ygrid, we need to use zdim. # For each z dimension, there are tl.num_programs(1) yblocks which is passed by grad(x,y,z). # So, we need to add tl.program_id(z) * tl.num_programs(y) *YBLOCK to get the correct yoffset. key = f"({key} + tl.program_id({entry.grid_dim + 1}) * tl.num_programs({entry.grid_dim}))" pid = entry.pid_cache.get(key, key) if self.index_dtype != "tl.int32": return f"{pid}.to({self.index_dtype})" return pid def max_block(self, prefix): if self.fixed_config: return self.fixed_config[f"{prefix.upper()}BLOCK"] return TRITON_MAX_BLOCK[prefix.upper()] def _has_constant_mask(self, tree: IterationRangesRoot): if not self.optimize_mask: return False if V.graph.sizevars.statically_known_equals(tree.numel, 1): # type: ignore[arg-type] return True # Masks are superfluous if numel is a multiple of BLOCK # (We use the fact that BLOCK is required by triton to be a power of 2) if tree.is_reduction and self.persistent_reduction: max_block = self._get_persistent_RBLOCK(tree.numel) elif tree.prefix == "x" and self.no_x_dim: max_block = 1 else: max_block = self.max_block(tree.prefix) if tree.is_reduction and self.cooperative_reduction: max_block = max_block * self.max_rsplit() # Optional optimization: if block divides numel exactly, we will # never need to do a masked load to handle stragglers at the end. # If this tree is for the y dimension, we should only use a constant # mask if it can be guaranteed that: # 1. (ynumel / YBLOCK) < max_ygrid or # 2. (ynumel / YBLOCK) % max_ygrid == 0 # Because YBLOCK is not constant, use a conservative heuristic: # only use a constant mask if ynumel < max_ygrid. # It's faster to avoid masking at all. But it is sound to always # mask. if V.graph.sizevars.statically_known_multiple_of(tree.numel, max_block): return ( tree.grid_dim != 1 or tree.has_zdim or V.graph.sizevars.statically_known_leq(tree.numel, get_max_y_grid()) ) return False def filter_masks(self, mask_vars): for tree in self.range_trees: if self._has_constant_mask(tree): mask_vars.discard(f"{tree.prefix}mask") def iteration_ranges_codegen_header(self, entry, code): x = entry.prefix if entry.is_loop: code.writeline(f"{entry.name} = {x}offset + {x}base") elif entry.grid_dim is None: # no need to "{x}offset = " code.writeline(f"{entry.name} = {self.iteration_ranges_ranges_code(entry)}") code.writeline(f"{x}offset = 0") else: if entry.tensor_dim is not None: line = f"{x}offset + {self.iteration_ranges_ranges_code(entry)}" else: line = self.iteration_ranges_scalar_code(entry, f"{x}offset") code.writelines( [ f"{x}offset = {self.iteration_ranges_get_pid(entry)} * {x.upper()}BLOCK", f"{entry.name} = {line}", ] ) if self._has_constant_mask(entry): sizes = self.dense_size_str() code.writeline(f"{x}mask = tl.full({sizes}, True, tl.int1)") else: code.writeline(f"{x}mask = {entry.name} < {x}numel") class TritonScheduling(SIMDScheduling): kernel_type: Type[Any] = TritonKernel backend_features = dict.fromkeys( # dict for deterministic order [ BackendFeature.FOREACH, BackendFeature.BUCKETIZE, BackendFeature.INPLACE_BUFFERS, BackendFeature.MASKED_SCATTER_WITH_INDEX, BackendFeature.SCAN, BackendFeature.TRITON_TEMPLATES, ] ) if torch.version.hip is None: backend_features.update( dict.fromkeys( [ # TODO: Move this above when ROCm triton adds support for multiple inputs BackendFeature.TUPLE_REDUCTION, BackendFeature.SORT, ] ) ) def __init__(self, scheduler: Scheduler) -> None: super().__init__(scheduler) if scheduler is None or not hasattr(scheduler, "nodes"): return for node in scheduler.nodes: if isinstance(node, (SchedulerNode, FusedSchedulerNode)): node.debug_device_str = debug_triton_code @classmethod def get_backend_features(cls, device: torch.device): if ( config.triton.cooperative_reductions or config.triton.force_cooperative_reductions ): return { **cls.backend_features, BackendFeature.REDUCE_TO_SINGLE_ELEMENT: None, } return cls.backend_features def codegen_comment(self, node_schedule): wrapper = V.graph.wrapper_code origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) if origins: wrapper.writeline(origins) if config.debug_fusion: from torch._inductor.scheduler import ( BaseSchedulerNode, ForeachKernelSchedulerNode, ) if not any( isinstance(n, ForeachKernelSchedulerNode) for n in node_schedule ): # We probably should look what are the nodes inside a foreach # schedule node node_names = [ n.get_name() for n in node_schedule if isinstance(n, BaseSchedulerNode) ] wrapper.writeline( f"{wrapper.comment} Fused node name list: {', '.join(node_names)}" ) def define_kernel(self, src_code, node_schedule, kernel): wrapper = V.graph.wrapper_code if src_code in wrapper.src_to_kernel: kernel_name = wrapper.src_to_kernel[src_code] else: fused_name = ( get_fused_kernel_name(node_schedule, config.triton.descriptive_names) if config.triton.descriptive_names else "" ) kernel_category = get_kernel_category_by_source_code(src_code)[:3] kernel_name = "_".join( ["triton", kernel_category, fused_name, wrapper.next_kernel_suffix()] ) # use the original src_code as the key wrapper.src_to_kernel[src_code] = kernel_name subs_name = kernel_name if config.triton.unique_kernel_names else "triton_" # DESCRIPTIVE_NAME is used for profiling purposes; it shows the full kernel name # even when unique_kernel_names is turned off. Meanwhile, KERNEL_NAME is sometimes set # to "triton_" to maximize caching opportunities (when unique_kernel_names = False). src_code = src_code.replace(str(Placeholder.DESCRIPTIVE_NAME), kernel_name) src_code = src_code.replace(str(Placeholder.KERNEL_NAME), subs_name) # TODO(voz): Ostensibly, we should not need this. But there are cases where C++ codegen does # not use BracesBuffer, so we have no good indicator of a C++ buffer atm. src_code = src_code.replace("#pragma CMT", "#") basename, _, kernel_path = get_path(code_hash(src_code.strip()), "py") compile_wrapper = IndentedBuffer() compile_wrapper.writeline(f"async_compile.triton({subs_name!r}, '''") compile_wrapper.splice(src_code, strip=True) current_device = V.graph.get_current_device_or_throw() compile_wrapper.writeline(f"''', device_str='{current_device.type}')") metadata_comment = f"# kernel path: {kernel_path}" origins, detailed_origins = get_kernel_metadata(node_schedule, wrapper) metadata_comment += "\n" + origins + "\n" + detailed_origins wrapper.define_kernel( kernel_name, compile_wrapper.getvalue(), metadata_comment ) # log kernel metadata for offline analysis. # E.g. one can find all unaligned inner reduction and check if # padding helps with the perf kernel by kernel. if metrics.is_metric_table_enabled("kernel_metadata"): metrics.log_kernel_metadata(kernel_name, kernel_path, src_code) return kernel_name def benchmark_fused_nodes(self, nodes): with preserve_rng_state(), torch.cuda.device( V.graph.get_current_device_or_throw() ): src_code = self.generate_kernel_code_from_nodes( nodes, benchmark_kernel=True ) mod = PyCodeCache.load(src_code) def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return float(fd.read()) return None def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms)) log.debug( "kernel src code for %s written to: %s", {n.get_name() for n in nodes}, mod.__file__, ) ms = load_cache() if ms is not None: return ms, mod.__file__ args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation try: call(wrapped_jit_function.clone_args(*args)[0]) except Exception as e: log.debug( "Exception (%s) in compiling fused nodes %s", e, {n.get_name() for n in nodes}, ) ms = float("inf") store_cache() return ms, mod.__file__ launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) # overhead of cloning args gives bias for fusing the kernel # in the case of mutating/in-placeable second fusion # TODO - would be better as a hook in triton do_bench that reset # the input values between benchmarking if len(wrapped_jit_function.mutated_arg_names) > 0: ms = ms - benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args) ) log.debug( "The fused kernel for %s took %.3f ms to run", {n.get_name() for n in nodes}, ms, ) store_cache() return ms, mod.__file__ def create_kernel_choices( self, kernel_features, kernel_args, kernel_kwargs ) -> List[SIMDKernel]: is_scan = kernel_features.contains_op("scan") is_split_scan = is_scan and any( node.is_split_scan() for node in kernel_features.scheduler_nodes() ) kernel_type: Type[TritonKernel] = self.kernel_type if is_split_scan: from .triton_split_scan import TritonSplitScanKernel kernel_type = TritonSplitScanKernel if is_scan: # TODO(jansel): scan does not yet work with cooperative reductions kernel_kwargs["override_cooperative_reduction"] = False # ops.sort only works with persistent reduction, and is not bandwidth bound anyway # so taking the hit of non-coalesced loads is okay if kernel_features.contains_op("sort"): kernel_kwargs["override_persistent_reduction"] = True kernel_kwargs["override_cooperative_reduction"] = False if not TritonKernel.has_persistent_RBLOCK(kernel_features.reduction_numel): # Cannot use persistent reduction with unknown dynamic rnumel assert not kernel_kwargs.get("override_persistent_reduction") kernel_kwargs["override_persistent_reduction"] = False kernel_kwargs = V.choices.triton_kernel_kwargs( kernel_type, kernel_features, kernel_args, kernel_kwargs ) kernel = kernel_type(*kernel_args, **kernel_kwargs) return self.add_multi_kernel_choices(kernel, kernel_args, kernel_kwargs) def add_multi_kernel_choices( self, kernel: SIMDKernel, kernel_args: List[Any], kernel_kwargs: Dict[str, Any], ) -> List[SIMDKernel]: kernels: List[SIMDKernel] = [kernel] if not config.triton.multi_kernel: return kernels optional_persistent = kernel.persistent_reduction and not kernel_kwargs.get( "override_persistent_reduction" ) optional_cooperative = kernel.cooperative_reduction and not kernel_kwargs.get( "override_cooperative_reduction" ) if optional_persistent: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_persistent_reduction=False, ) ) if optional_cooperative: rnumel = kernel.numels["r"] # for larger sizes non-cooperative gets very slow if V.graph.sizevars.statically_known_leq(rnumel, 65536): kernels.append( other := self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, ) ) if optional_persistent and other.persistent_reduction: kernels.append( self.kernel_type( *kernel_args, **kernel_kwargs, override_cooperative_reduction=False, override_persistent_reduction=False, ) ) if len(kernels) > 1: for kernel2 in kernels[1:]: # Keep buffers needed by the non-persistent reduction so both kernels have the same arguments kernel2.must_keep_buffers = kernel.must_keep_buffers # persistent kernels must be generated last so must_keep_buffers works right kernels.sort(key=lambda k: k.persistent_reduction) return kernels def benchmark_combo_kernel(self, node_list): def cache_file_path(): assert mod.__file__ is not None return os.path.splitext(mod.__file__)[0] + ".kernel_perf" def load_cache(): path = cache_file_path() if os.path.exists(path): with open(path) as fd: return tuple(float(e) for e in fd.read().split()) return (None, None) def store_cache(): path = cache_file_path() with open(path, "w") as fd: fd.write(str(ms) + " " + str(ms_clone)) total_ms, file_list = 0, [] total_clone_ms = 0 removed_buffers_orig = V.graph.removed_buffers V.graph.removed_buffers = OrderedSet(removed_buffers_orig) inplaced_to_remove_orig = V.graph.inplaced_to_remove V.graph.inplaced_to_remove = OrderedSet(inplaced_to_remove_orig) enable_autotune = config.combo_kernels_autotune > 0 mixed_sizes = config.combo_kernel_allow_mixed_sizes > 0 kernel_code_list = self.generate_combo_kernel_code( subkernel_nodes=node_list, custom_part_algorithm=True, enable_autotune=enable_autotune, mixed_sizes=mixed_sizes, only_gen_src_code=True, ) for src_code, _, node_group in kernel_code_list: fused_node_lists = [node.get_nodes() for node in node_group] names = [n.get_name() for nodes in fused_node_lists for n in nodes] src_code = src_code.replace(str(Placeholder.KERNEL_NAME), "triton_") mod = PyCodeCache.load(src_code) log.debug( "kernel src code for %s written to: %s", names, mod.__file__, ) ms, ms_clone = load_cache() if ms is not None: total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) continue args = mod.get_args() call = mod.call wrapped_jit_function = mod.triton_ # call once to trigger the compilation call(wrapped_jit_function.clone_args(*args)[0]) launchers = wrapped_jit_function.launchers assert len(launchers) == 1 if launchers[0].n_spills > 0: # skip benchmarking the kernel if there are register spills ms = ms_clone = float("inf") else: # We have to clone the inplace updated arguments to avoid earlier calls # generating out of range indices for later calls. ms = benchmarker.benchmark_gpu( lambda: call(wrapped_jit_function.clone_args(*args)[0]) ) ms_clone = benchmarker.benchmark_gpu( lambda: wrapped_jit_function.clone_args(*args)[0] ) log.debug( "The fused kernel for %s took %.3f ms to run, %.3f ms to clone inputs", {n.get_name() for n in node_group}, ms, ms_clone, ) store_cache() total_ms += ms total_clone_ms += ms_clone file_list.append(mod.__file__) V.graph.removed_buffers = removed_buffers_orig V.graph.inplaced_to_remove = inplaced_to_remove_orig return total_ms, total_clone_ms, file_list def debug_triton_code(node: BaseSchedulerNode) -> List[str]: lines = [] multi_template = node.get_template_node() assert multi_template is None or isinstance(multi_template, ir.MultiTemplateBuffer) if multi_template and multi_template.make_kernel_render is None: lines.append(f"{node.get_name()} Unfinalized multi template buffer") else: from torch._inductor.codegen.cuda_combined_scheduling import ( CUDACombinedScheduling, ) device = node.get_device() assert device is not None backend = node.scheduler.get_backend(device) assert isinstance( backend, (SIMDScheduling, CUDACombinedScheduling) ), f"Scheduling backend should be SIMD or CUDACombined when generating debug Triton strings, got: {type(backend)}" with V.graph.set_current_device(device): # Don't increment kernel count when generating debug string. # This will confuse some unit tests that check the number of # generated kernels. old_generated_kernel_count = metrics.generated_kernel_count triton_code = backend.generate_kernel_code_from_nodes( node.get_nodes() ).strip() metrics.generated_kernel_count = old_generated_kernel_count lines.append(f"{node.get_name()} Triton code:") lines.append(textwrap.indent(triton_code, " ")) return lines
jq/learn-triton
convert.py
https://github.com/jq/learn-triton/blob/a5dee83d3063d7977fcdcaa7b61d580317d53fdc/convert.py
import torch import triton import triton.language as tl from typing import Optional import os os.environ['CUDA_LAUNCH_BLOCKING'] = '1' @torch.jit.script def e4m3_to_bf16_torch( fp8_tensor: torch.Tensor, lookup_table: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: assert fp8_tensor.dtype == torch.uint8 if out is not None: assert out.dtype == torch.bfloat16 assert out.shape == fp8_tensor.shape return out.copy_(lookup_table[fp8_tensor.int()]) return lookup_table[fp8_tensor.int()] @triton.jit def e4m3_to_bf16_triton_kernel_( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) lookup_table = tl.load(lookup_ptr + tl.arange(0, 256), dtype=tl.bfloat16) bf16 = tl.int16(0) asm_code = """ ld.shared.u16 {bf16}, [%lookup_table + %index * 2]; """ # 这里代码不对,因为需要asm 从reg里面读取数据,并不能直接用名字引用 # 然后 lookup_table 作为pointer,能否通过lookup_table 传入?以及数据类型都不知道。 bf16 = tl.inline_asm_elementwise( asm_code, args=[bf16, lookup_table, fp8_indices], dtype=(tl.bfloat16, tl.int32, tl.int32), is_pure=True, pack=4 ) tl.store(out_ptr + offsets, bf16, mask=mask) @triton.jit def e4m3_to_bf16_triton_kernel( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) bf16 = tl.load(lookup_ptr + fp8_indices, mask=mask, other=0.0) tl.store(out_ptr + offsets, bf16, mask=mask) def e4m3_to_bf16_triton( fp8_tensor: torch.Tensor, lookup_table: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: assert fp8_tensor.dtype == torch.uint8 if out is not None: assert out.dtype == torch.bfloat16 assert out.shape == fp8_tensor.shape else: out = torch.empty_like(fp8_tensor, dtype=torch.bfloat16) n_elements = fp8_tensor.numel() BLOCK_SIZE = 512 grid = (triton.cdiv(n_elements, BLOCK_SIZE),) e4m3_to_bf16_triton_kernel[grid](fp8_tensor, lookup_table, out, n_elements, BLOCK_SIZE=BLOCK_SIZE) return out def compare(): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') torch.manual_seed(0) size = 10 fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) output_torch = e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) print("Output (Torch):", output_torch) output_triton = e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) print("Output (Triton):", output_triton) max_diff = torch.max(torch.abs(output_torch - output_triton)).item() print(f'The maximum difference between torch and triton is {max_diff}') assert torch.allclose(output_torch, output_triton, atol=1e-3), "Outputs do not match!" compare() @triton.testing.perf_report( triton.testing.Benchmark( x_names=['size'], # Argument names to use as an x-axis for the plot. x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`. x_log=True, # x axis is logarithmic. line_arg='provider', # Argument name whose value corresponds to a different line in the plot. line_vals=['triton', 'torch'], # Possible values for `line_arg`. line_names=['Triton', 'Torch'], # Label name for the lines. styles=[('blue', '-'), ('green', '-')], # Line styles. ylabel='GB/s', # Label name for the y-axis. plot_name='e4m3_to_bf16-performance', # Name for the plot. Used also as a file name for saving the plot. args={}, # Values for function arguments not in `x_names` and `y_name`. )) def benchmark(size, provider): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) quantiles = [0.5, 0.2, 0.8] if provider == 'torch': func = lambda: e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) elif provider == 'triton': func = lambda: e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) else: raise ValueError(f"Unknown provider: {provider}") ms, min_ms, max_ms = triton.testing.do_bench(func, quantiles=quantiles) gbps = lambda ms: size * fp8.element_size() * 2 * 1e-9 / (ms * 1e-3) # Assuming read and write return gbps(ms), gbps(max_ms), gbps(min_ms) # benchmark.run(print_data=True, show_plots=True)
@triton.jit def e4m3_to_bf16_triton_kernel_( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) lookup_table = tl.load(lookup_ptr + tl.arange(0, 256), dtype=tl.bfloat16) bf16 = tl.int16(0) asm_code = """ ld.shared.u16 {bf16}, [%lookup_table + %index * 2]; """ # 这里代码不对,因为需要asm 从reg里面读取数据,并不能直接用名字引用 # 然后 lookup_table 作为pointer,能否通过lookup_table 传入?以及数据类型都不知道。 bf16 = tl.inline_asm_elementwise( asm_code, args=[bf16, lookup_table, fp8_indices], dtype=(tl.bfloat16, tl.int32, tl.int32), is_pure=True, pack=4 ) tl.store(out_ptr + offsets, bf16, mask=mask)
jq/learn-triton
convert.py
https://github.com/jq/learn-triton/blob/a5dee83d3063d7977fcdcaa7b61d580317d53fdc/convert.py
import torch import triton import triton.language as tl from typing import Optional import os os.environ['CUDA_LAUNCH_BLOCKING'] = '1' @torch.jit.script def e4m3_to_bf16_torch( fp8_tensor: torch.Tensor, lookup_table: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: assert fp8_tensor.dtype == torch.uint8 if out is not None: assert out.dtype == torch.bfloat16 assert out.shape == fp8_tensor.shape return out.copy_(lookup_table[fp8_tensor.int()]) return lookup_table[fp8_tensor.int()] @triton.jit def e4m3_to_bf16_triton_kernel_( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) lookup_table = tl.load(lookup_ptr + tl.arange(0, 256), dtype=tl.bfloat16) bf16 = tl.int16(0) asm_code = """ ld.shared.u16 {bf16}, [%lookup_table + %index * 2]; """ # 这里代码不对,因为需要asm 从reg里面读取数据,并不能直接用名字引用 # 然后 lookup_table 作为pointer,能否通过lookup_table 传入?以及数据类型都不知道。 bf16 = tl.inline_asm_elementwise( asm_code, args=[bf16, lookup_table, fp8_indices], dtype=(tl.bfloat16, tl.int32, tl.int32), is_pure=True, pack=4 ) tl.store(out_ptr + offsets, bf16, mask=mask) @triton.jit def e4m3_to_bf16_triton_kernel( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) bf16 = tl.load(lookup_ptr + fp8_indices, mask=mask, other=0.0) tl.store(out_ptr + offsets, bf16, mask=mask) def e4m3_to_bf16_triton( fp8_tensor: torch.Tensor, lookup_table: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: assert fp8_tensor.dtype == torch.uint8 if out is not None: assert out.dtype == torch.bfloat16 assert out.shape == fp8_tensor.shape else: out = torch.empty_like(fp8_tensor, dtype=torch.bfloat16) n_elements = fp8_tensor.numel() BLOCK_SIZE = 512 grid = (triton.cdiv(n_elements, BLOCK_SIZE),) e4m3_to_bf16_triton_kernel[grid](fp8_tensor, lookup_table, out, n_elements, BLOCK_SIZE=BLOCK_SIZE) return out def compare(): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') torch.manual_seed(0) size = 10 fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) output_torch = e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) print("Output (Torch):", output_torch) output_triton = e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) print("Output (Triton):", output_triton) max_diff = torch.max(torch.abs(output_torch - output_triton)).item() print(f'The maximum difference between torch and triton is {max_diff}') assert torch.allclose(output_torch, output_triton, atol=1e-3), "Outputs do not match!" compare() @triton.testing.perf_report( triton.testing.Benchmark( x_names=['size'], # Argument names to use as an x-axis for the plot. x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`. x_log=True, # x axis is logarithmic. line_arg='provider', # Argument name whose value corresponds to a different line in the plot. line_vals=['triton', 'torch'], # Possible values for `line_arg`. line_names=['Triton', 'Torch'], # Label name for the lines. styles=[('blue', '-'), ('green', '-')], # Line styles. ylabel='GB/s', # Label name for the y-axis. plot_name='e4m3_to_bf16-performance', # Name for the plot. Used also as a file name for saving the plot. args={}, # Values for function arguments not in `x_names` and `y_name`. )) def benchmark(size, provider): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) quantiles = [0.5, 0.2, 0.8] if provider == 'torch': func = lambda: e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) elif provider == 'triton': func = lambda: e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) else: raise ValueError(f"Unknown provider: {provider}") ms, min_ms, max_ms = triton.testing.do_bench(func, quantiles=quantiles) gbps = lambda ms: size * fp8.element_size() * 2 * 1e-9 / (ms * 1e-3) # Assuming read and write return gbps(ms), gbps(max_ms), gbps(min_ms) # benchmark.run(print_data=True, show_plots=True)
@triton.jit def e4m3_to_bf16_triton_kernel( fp8_ptr, # Pointer to input fp8 tensor lookup_ptr, # Pointer to lookup table out_ptr, # Pointer to output bf16 tensor n_elements, # Number of elements BLOCK_SIZE: tl.constexpr ): pid = tl.program_id(0) block_start = pid * BLOCK_SIZE offsets = block_start + tl.arange(0, BLOCK_SIZE) mask = offsets < n_elements fp8_indices = tl.load(fp8_ptr + offsets, mask=mask, other=0).to(tl.int32) bf16 = tl.load(lookup_ptr + fp8_indices, mask=mask, other=0.0) tl.store(out_ptr + offsets, bf16, mask=mask) def e4m3_to_bf16_triton( fp8_tensor: torch.Tensor, lookup_table: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: assert fp8_tensor.dtype == torch.uint8 if out is not None: assert out.dtype == torch.bfloat16 assert out.shape == fp8_tensor.shape else: out = torch.empty_like(fp8_tensor, dtype=torch.bfloat16) n_elements = fp8_tensor.numel() BLOCK_SIZE = 512 grid = (triton.cdiv(n_elements, BLOCK_SIZE),) e4m3_to_bf16_triton_kernel[grid](fp8_tensor, lookup_table, out, n_elements, BLOCK_SIZE=BLOCK_SIZE) return out def compare(): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') torch.manual_seed(0) size = 10 fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) output_torch = e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) print("Output (Torch):", output_torch) output_triton = e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) print("Output (Triton):", output_triton) max_diff = torch.max(torch.abs(output_torch - output_triton)).item() print(f'The maximum difference between torch and triton is {max_diff}') assert torch.allclose(output_torch, output_triton, atol=1e-3), "Outputs do not match!" compare() @triton.testing.perf_report( triton.testing.Benchmark( x_names=['size'], # Argument names to use as an x-axis for the plot. x_vals=[2**i for i in range(12, 28, 1)], # Different possible values for `x_name`. x_log=True, # x axis is logarithmic. line_arg='provider', # Argument name whose value corresponds to a different line in the plot. line_vals=['triton', 'torch'], # Possible values for `line_arg`. line_names=['Triton', 'Torch'], # Label name for the lines. styles=[('blue', '-'), ('green', '-')], # Line styles. ylabel='GB/s', # Label name for the y-axis. plot_name='e4m3_to_bf16-performance', # Name for the plot. Used also as a file name for saving the plot. args={}, # Values for function arguments not in `x_names` and `y_name`. )) def benchmark(size, provider): e4m3_to_bf16_tensor = torch.load("data/e4m3_to_bf16.pt", weights_only=True).to('cuda') fp8 = torch.randint(0, 256, (size,), device='cuda', dtype=torch.uint8) quantiles = [0.5, 0.2, 0.8] if provider == 'torch': func = lambda: e4m3_to_bf16_torch(fp8, e4m3_to_bf16_tensor) elif provider == 'triton': func = lambda: e4m3_to_bf16_triton(fp8, e4m3_to_bf16_tensor) else: raise ValueError(f"Unknown provider: {provider}") ms, min_ms, max_ms = triton.testing.do_bench(func, quantiles=quantiles) gbps = lambda ms: size * fp8.element_size() * 2 * 1e-9 / (ms * 1e-3) # Assuming read and write return gbps(ms), gbps(max_ms), gbps(min_ms) # benchmark.run(print_data=True, show_plots=True)
AI-CE-2023/nakta
nakta_model5/kernel/Emb/Rotary/rotary.py
https://github.com/AI-CE-2023/nakta/blob/94d99ffb2d25ad8b75fbf9149266e39c26b484b6/nakta_model5/kernel/Emb/Rotary/rotary.py
# Copyright (c) 2023, Tri Dao. from typing import Optional, Union import torch import triton import triton.language as tl # @triton.autotune( # configs=[ # triton.Config({"BLOCK_M": 2}), # triton.Config({"BLOCK_M": 4}), # triton.Config({"BLOCK_M": 8}), # triton.Config({"BLOCK_M": 16}), # ], # key=["CACHE_KEY_SEQLEN", "BLOCK_K", "INTERLEAVED"], # ) @triton.jit def rotary_kernel( OUT, # Pointers to matrices X, COS, SIN, CU_SEQLENS, SEQLEN_OFFSETS, # this could be int or a pointer # Matrix dimensions seqlen, nheads, rotary_dim, seqlen_ro, CACHE_KEY_SEQLEN, # strides stride_out_batch, stride_out_seqlen, stride_out_nheads, stride_out_headdim, stride_x_batch, stride_x_seqlen, stride_x_nheads, stride_x_headdim, # Meta-parameters BLOCK_K: tl.constexpr, IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr, IS_VARLEN: tl.constexpr, INTERLEAVED: tl.constexpr, CONJUGATE: tl.constexpr, BLOCK_M: tl.constexpr, ): pid_m = tl.program_id(axis=0) pid_batch = tl.program_id(axis=1) pid_head = tl.program_id(axis=2) rotary_dim_half = rotary_dim // 2 if not IS_VARLEN: X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads else: start_idx = tl.load(CU_SEQLENS + pid_batch) seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads OUT = OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads if pid_m * BLOCK_M >= seqlen: return rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) if not IS_SEQLEN_OFFSETS_TENSOR: rm_cs = rm + SEQLEN_OFFSETS else: rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch) rk = tl.arange(0, BLOCK_K) rk_half = tl.arange(0, BLOCK_K // 2) if not INTERLEAVED: # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) cos = tl.load( COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=1.0, ).to(tl.float32) sin = tl.load( SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x0 = tl.load( X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x1 = tl.load( X + rotary_dim_half * stride_x_headdim, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) if CONJUGATE: sin = -sin o0 = x0 * cos - x1 * sin o1 = x0 * sin + x1 * cos # write back result OUT = OUT + ( rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim ) tl.store( OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half) ) tl.store( OUT + rotary_dim_half * stride_out_headdim, o1, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), ) else: # We don't want to load X[0, 2, 4, ...] and X[1, 3, 5, ...] separately since both are slow. # Instead, we load x0 = X[0, 1, 2, 3, ...] and x1 = X[1, 0, 3, 2, ...]. # Loading x0 will be fast but x1 will be slow. # Then we load cos = COS[0, 0, 1, 1, ...] and sin = SIN[0, 0, 1, 1, ...]. # Then we do the calculation and use tl.where to pick put the right outputs for the even # and for the odd indices. rk_swap = rk + ((rk + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ... rk_repeat = tl.arange(0, BLOCK_K) // 2 X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim) X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) cos = tl.load( COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), other=1.0, ).to(tl.float32) sin = tl.load( SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x0 = tl.load( X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0 ).to(tl.float32) x1 = tl.load( X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0 ).to(tl.float32) if CONJUGATE: sin = -sin x0_cos = x0 * cos x1_sin = x1 * sin out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin) OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim) tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim)) def apply_rotary( x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, seqlen_offsets: Union[int, torch.Tensor] = 0, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, interleaved=False, inplace=False, conjugate=False, ) -> torch.Tensor: """ Arguments: x: (batch, seqlen, nheads, headdim) if cu_seqlens is None else (total_seqlen, nheads, headdim). cos: (seqlen_ro, rotary_dim / 2) sin: (seqlen_ro, rotary_dim / 2) seqlen_offsets: integer or integer tensor of size (batch,) cu_seqlens: (batch + 1,) or None max_seqlen: int Returns: y: (batch, seqlen, nheads, headdim) """ is_varlen = cu_seqlens is not None if not is_varlen: batch, seqlen, nheads, headdim = x.shape else: assert ( max_seqlen is not None ), "If cu_seqlens is passed in, then max_seqlen must be passed" total_seqlen, nheads, headdim = x.shape batch_p_1 = cu_seqlens.shape[0] batch = batch_p_1 - 1 seqlen = max_seqlen seqlen_ro, rotary_dim = cos.shape assert sin.shape == cos.shape rotary_dim *= 2 assert rotary_dim <= headdim, "rotary_dim must be <= headdim" assert headdim <= 256, "Only support headdim <= 256" assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen" assert ( cos.dtype == sin.dtype ), f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}" assert ( x.dtype == cos.dtype ), f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}" cos, sin = cos.contiguous(), sin.contiguous() if isinstance(seqlen_offsets, torch.Tensor): assert seqlen_offsets.shape == (batch,) assert seqlen_offsets.dtype in [torch.int32, torch.int64] seqlen_offsets = seqlen_offsets.contiguous() else: assert seqlen_offsets + seqlen <= seqlen_ro output = torch.empty_like(x) if not inplace else x if rotary_dim < headdim and not inplace: output[..., rotary_dim:].copy_(x[..., rotary_dim:]) BLOCK_K = ( 32 if rotary_dim <= 32 else (64 if rotary_dim <= 64 else (128 if rotary_dim <= 128 else 256)) ) grid = lambda META: (triton.cdiv(seqlen, META["BLOCK_M"]), batch, nheads) # noqa BLOCK_M = 4 if interleaved else (8 if rotary_dim <= 64 else 4) # Need this, otherwise Triton tries to launch from cuda:0 and we get # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?) with torch.cuda.device(x.device.index): rotary_kernel[grid]( output, # data ptrs x, cos, sin, cu_seqlens, seqlen_offsets, seqlen, # shapes nheads, rotary_dim, seqlen_ro, seqlen // 128, # key for triton cache (limit number of compilations) output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 output.stride(-3), # seqlen_stride or total_seqlen_stride output.stride(-2), # nheads_stride output.stride(-1), # headdim_stride x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 x.stride(-3), # seqlen stride or total_seqlen_stride x.stride(-2), # nheads stride x.stride(-1), # headdim stride BLOCK_K, isinstance(seqlen_offsets, torch.Tensor), is_varlen, interleaved, conjugate, BLOCK_M, ) return output
@triton.jit def rotary_kernel( OUT, # Pointers to matrices X, COS, SIN, CU_SEQLENS, SEQLEN_OFFSETS, # this could be int or a pointer # Matrix dimensions seqlen, nheads, rotary_dim, seqlen_ro, CACHE_KEY_SEQLEN, # strides stride_out_batch, stride_out_seqlen, stride_out_nheads, stride_out_headdim, stride_x_batch, stride_x_seqlen, stride_x_nheads, stride_x_headdim, # Meta-parameters BLOCK_K: tl.constexpr, IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr, IS_VARLEN: tl.constexpr, INTERLEAVED: tl.constexpr, CONJUGATE: tl.constexpr, BLOCK_M: tl.constexpr, ): pid_m = tl.program_id(axis=0) pid_batch = tl.program_id(axis=1) pid_head = tl.program_id(axis=2) rotary_dim_half = rotary_dim // 2 if not IS_VARLEN: X = X + pid_batch * stride_x_batch + pid_head * stride_x_nheads OUT = OUT + pid_batch * stride_out_batch + pid_head * stride_out_nheads else: start_idx = tl.load(CU_SEQLENS + pid_batch) seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx X = X + start_idx * stride_x_seqlen + pid_head * stride_x_nheads OUT = OUT + start_idx * stride_out_seqlen + pid_head * stride_out_nheads if pid_m * BLOCK_M >= seqlen: return rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M) if not IS_SEQLEN_OFFSETS_TENSOR: rm_cs = rm + SEQLEN_OFFSETS else: rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch) rk = tl.arange(0, BLOCK_K) rk_half = tl.arange(0, BLOCK_K // 2) if not INTERLEAVED: # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT X = X + (rm[:, None] * stride_x_seqlen + rk_half[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_half[None, :]) cos = tl.load( COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=1.0, ).to(tl.float32) sin = tl.load( SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x0 = tl.load( X, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x1 = tl.load( X + rotary_dim_half * stride_x_headdim, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) if CONJUGATE: sin = -sin o0 = x0 * cos - x1 * sin o1 = x0 * sin + x1 * cos # write back result OUT = OUT + ( rm[:, None] * stride_out_seqlen + rk_half[None, :] * stride_out_headdim ) tl.store( OUT, o0, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half) ) tl.store( OUT + rotary_dim_half * stride_out_headdim, o1, mask=(rm[:, None] < seqlen) & (rk_half[None, :] < rotary_dim_half), ) else: # We don't want to load X[0, 2, 4, ...] and X[1, 3, 5, ...] separately since both are slow. # Instead, we load x0 = X[0, 1, 2, 3, ...] and x1 = X[1, 0, 3, 2, ...]. # Loading x0 will be fast but x1 will be slow. # Then we load cos = COS[0, 0, 1, 1, ...] and sin = SIN[0, 0, 1, 1, ...]. # Then we do the calculation and use tl.where to pick put the right outputs for the even # and for the odd indices. rk_swap = rk + ((rk + 1) % 2) * 2 - 1 # 1, 0, 3, 2, 5, 4, ... rk_repeat = tl.arange(0, BLOCK_K) // 2 X0 = X + (rm[:, None] * stride_x_seqlen + rk[None, :] * stride_x_headdim) X1 = X + (rm[:, None] * stride_x_seqlen + rk_swap[None, :] * stride_x_headdim) COS = COS + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) SIN = SIN + (rm_cs[:, None] * rotary_dim_half + rk_repeat[None, :]) cos = tl.load( COS, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), other=1.0, ).to(tl.float32) sin = tl.load( SIN, mask=(rm_cs[:, None] < seqlen_ro) & (rk_repeat[None, :] < rotary_dim_half), other=0.0, ).to(tl.float32) x0 = tl.load( X0, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim), other=0.0 ).to(tl.float32) x1 = tl.load( X1, mask=(rm[:, None] < seqlen) & (rk_swap[None, :] < rotary_dim), other=0.0 ).to(tl.float32) if CONJUGATE: sin = -sin x0_cos = x0 * cos x1_sin = x1 * sin out = tl.where(rk[None, :] % 2 == 0, x0_cos - x1_sin, x0_cos + x1_sin) OUT = OUT + (rm[:, None] * stride_out_seqlen + rk[None, :] * stride_out_headdim) tl.store(OUT, out, mask=(rm[:, None] < seqlen) & (rk[None, :] < rotary_dim)) def apply_rotary( x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, seqlen_offsets: Union[int, torch.Tensor] = 0, cu_seqlens: Optional[torch.Tensor] = None, max_seqlen: Optional[int] = None, interleaved=False, inplace=False, conjugate=False, ) -> torch.Tensor: """ Arguments: x: (batch, seqlen, nheads, headdim) if cu_seqlens is None else (total_seqlen, nheads, headdim). cos: (seqlen_ro, rotary_dim / 2) sin: (seqlen_ro, rotary_dim / 2) seqlen_offsets: integer or integer tensor of size (batch,) cu_seqlens: (batch + 1,) or None max_seqlen: int Returns: y: (batch, seqlen, nheads, headdim) """ is_varlen = cu_seqlens is not None if not is_varlen: batch, seqlen, nheads, headdim = x.shape else: assert ( max_seqlen is not None ), "If cu_seqlens is passed in, then max_seqlen must be passed" total_seqlen, nheads, headdim = x.shape batch_p_1 = cu_seqlens.shape[0] batch = batch_p_1 - 1 seqlen = max_seqlen seqlen_ro, rotary_dim = cos.shape assert sin.shape == cos.shape rotary_dim *= 2 assert rotary_dim <= headdim, "rotary_dim must be <= headdim" assert headdim <= 256, "Only support headdim <= 256" assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen" assert ( cos.dtype == sin.dtype ), f"cos and sin must have the same dtype, got {cos.dtype} and {sin.dtype}" assert ( x.dtype == cos.dtype ), f"Input and cos/sin must have the same dtype, got {x.dtype} and {cos.dtype}" cos, sin = cos.contiguous(), sin.contiguous() if isinstance(seqlen_offsets, torch.Tensor): assert seqlen_offsets.shape == (batch,) assert seqlen_offsets.dtype in [torch.int32, torch.int64] seqlen_offsets = seqlen_offsets.contiguous() else: assert seqlen_offsets + seqlen <= seqlen_ro output = torch.empty_like(x) if not inplace else x if rotary_dim < headdim and not inplace: output[..., rotary_dim:].copy_(x[..., rotary_dim:]) BLOCK_K = ( 32 if rotary_dim <= 32 else (64 if rotary_dim <= 64 else (128 if rotary_dim <= 128 else 256)) ) grid = lambda META: (triton.cdiv(seqlen, META["BLOCK_M"]), batch, nheads) # noqa BLOCK_M = 4 if interleaved else (8 if rotary_dim <= 64 else 4) # Need this, otherwise Triton tries to launch from cuda:0 and we get # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?) with torch.cuda.device(x.device.index): rotary_kernel[grid]( output, # data ptrs x, cos, sin, cu_seqlens, seqlen_offsets, seqlen, # shapes nheads, rotary_dim, seqlen_ro, seqlen // 128, # key for triton cache (limit number of compilations) output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 output.stride(-3), # seqlen_stride or total_seqlen_stride output.stride(-2), # nheads_stride output.stride(-1), # headdim_stride x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0 x.stride(-3), # seqlen stride or total_seqlen_stride x.stride(-2), # nheads stride x.stride(-1), # headdim stride BLOCK_K, isinstance(seqlen_offsets, torch.Tensor), is_varlen, interleaved, conjugate, BLOCK_M, ) return output
thuml/learn_torch.compile
torchbench/timm_efficientnet_training_cuda/__compiled_fn_6 kernel 4.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/torchbench/timm_efficientnet_training_cuda/__compiled_fn_6%20kernel%204.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, persistent_reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @persistent_reduction( size_hints=[2048, 2], reduction_hint=ReductionHint.OUTER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(3,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_mul_native_batch_norm_backward_3', 'mutated_arg_names': ['in_out_ptr0']} ) @triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1280 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1280*r1)), rmask & xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = tl.math.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask)
@triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1280 rnumel = 2 RBLOCK: tl.constexpr = 2 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1280*r1)), rmask & xmask, other=0.0) tmp5 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last') tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tmp6 = 1e-05 tmp7 = tmp5 + tmp6 tmp8 = tl.math.rsqrt(tmp7) tmp9 = tmp4 * tmp8 tl.debug_barrier() tl.store(in_out_ptr0 + (x0), tmp9, xmask)
zinccat/TritonTrace
dynamic/level3/49_Mamba2ReturnFinalState/triton_poi_fused_clone_2.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/dynamic/level3/49_Mamba2ReturnFinalState/triton_poi_fused_clone_2.py
# From: 49_Mamba2ReturnFinalState import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[1048576], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32', 4: 'i32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, multi_processor_count=82), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '712B1D69F892A891D8FFA5075DCAB47CFF4E132D88BFC66744701CEAE226F127', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_(in_ptr0, out_ptr0, ks0, ks1, ks2, ks3, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % ks0 x1 = (xindex // ks0) % ks1 x2 = (xindex // ks2) % 8 x3 = (xindex // ks3) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (ks0*x2) + (8*ks0*x1) + (8*ks1*ks0*x3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, xmask)
@triton.jit def triton_(in_ptr0, out_ptr0, ks0, ks1, ks2, ks3, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % ks0 x1 = (xindex // ks0) % ks1 x2 = (xindex // ks2) % 8 x3 = (xindex // ks3) x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (ks0*x2) + (8*ks0*x1) + (8*ks1*ks0*x3)), xmask, eviction_policy='evict_last') tl.store(out_ptr0 + (x4), tmp0, xmask)
Zymrael/vortex
vortex/ops/hyena_se/_bwd_tma.py
https://github.com/Zymrael/vortex/blob/781672b65b099d1b8de84a35267bbec098b8478d/vortex/ops/hyena_se/_bwd_tma.py
import logging from typing import Any, Union import torch import triton import triton.language as tl from .bwd_kernels import _get_T_store_idx, _get_Tc_store_idx from .fwd_kernels import load_correction_toeplitz, load_toeplitz from .kernel_utils import ( create_2d_tma_descriptor, get_program_order, torch_dtype_to_triton, ) logger = logging.getLogger(__name__) # TODO: Threshold for using TMA for filters: g * hl * element_size >= 128 @triton.jit def _two_pass_bwd_grouped_tma_kernel( # TMA descriptors # Inputs dy_desc, x_desc, B_desc, C_desc, h_ptr, # Intermediate activations y2_desc, T_desc, T_hat_desc, # Outputs dx_desc, dB_desc, dC_desc, dhdT_ptr, dhdTc_ptr, # Strides input_batch_stride, input_row_stride, input_col_stride, dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, # Shapes bs, seqlen, g, dg, # Compile-time constants FILTER_LEN: tl.constexpr, CHUNK_SIZE: tl.constexpr, BLOCK_D: tl.constexpr, DTYPE: tl.constexpr, LOAD_TOEPLITZ: tl.constexpr, SINGLE_GROUP: tl.constexpr, NUM_PIPELINE_STAGES: tl.constexpr, THREADBLOCK_SWIZZLE: tl.constexpr, FLUSH: tl.constexpr = False, # Flush TMA cache # kwargs for tl.dot input_precision: tl.constexpr = "ieee", # "ieee", "tf32", "tf32x3" --> only for debugging, since dtype < fp32 max_num_imprecise_acc: tl.constexpr = None, out_dtype: tl.constexpr = tl.float32, # Common triton kernel params # num_stages: tl.constexpr = 2, # num_warps: tl.constexpr = 4, DEBUG: tl.constexpr = False, ): # tl.static_print("DTYPE", DTYPE,) # tl.static_print("DTYPE.value", DTYPE.value) if DEBUG: if tl.program_id(0) == 0: tl.static_print( "TWO_PASS CONSTEXPRS:\n", "FILTER_LEN:", FILTER_LEN, "CHUNK_SIZE:", CHUNK_SIZE, "BLOCK_D:", BLOCK_D, "SINGLE_GROUP:", SINGLE_GROUP, "THREADBLOCK_SWIZZLE:", THREADBLOCK_SWIZZLE, ) # tl.static_print("DTYPE", DTYPE,) # tl.static_print("DTYPE.value", DTYPE.value) d = g * dg num_programs = tl.num_programs(0) # TMA offsets pid = tl.program_id(axis=0) num_tiles_m = tl.cdiv(seqlen, CHUNK_SIZE) num_tiles_k = tl.cdiv(d, BLOCK_D) num_tiles_batch = num_tiles_m * num_tiles_k total_tiles = bs * num_tiles_batch chunks_per_batch = num_tiles_m tiles_per_filter_group = dg // BLOCK_D chunks_per_seq = tl.cdiv(seqlen, CHUNK_SIZE) for tile_id in tl.range(pid, total_tiles, num_programs, num_stages=NUM_PIPELINE_STAGES): pid_batch, pid_chunk, pid_d, pid_filter_group = get_program_order( tile_id, num_tiles_batch, num_tiles_k, chunks_per_batch, tiles_per_filter_group, THREADBLOCK_SWIZZLE, ) batch_offset = pid_batch * num_tiles_m * CHUNK_SIZE chunk_offset = pid_chunk * CHUNK_SIZE offset_m = batch_offset + chunk_offset offset_k = pid_d * BLOCK_D if False: if pid == 0: tl.device_print("num_pid_m", num_tiles_m) tl.device_print("num_pid_k", num_tiles_k) tl.device_print("num_pid_batch", num_tiles_batch) tl.device_print("total_tiles", total_tiles) tl.device_print("pid", pid) tl.device_print("pid_batch", pid_batch) tl.device_print("pid_chunk", pid_chunk) tl.device_print("pid_d", pid_d) tl.device_print("pid_filter_group", pid_filter_group) tl.device_print("offset_m", offset_m) tl.device_print("offset_k", offset_k) dy = tl._experimental_descriptor_load(dy_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) x = tl._experimental_descriptor_load(x_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) B = tl._experimental_descriptor_load(B_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) C = tl._experimental_descriptor_load(C_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) y2 = tl._experimental_descriptor_load(y2_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) # Start backprop dC = dy * y2 tl._experimental_descriptor_store(dC_desc, dC, [offset_m, offset_k]) # # Backprop through C dy = dy * C if LOAD_TOEPLITZ: T_group_stride = CHUNK_SIZE T_group_offset = pid_filter_group * T_group_stride T = tl._experimental_descriptor_load(T_desc, [T_group_offset, 0], [CHUNK_SIZE, CHUNK_SIZE], DTYPE.value) else: T = load_toeplitz( h_ptr, FILTER_LEN, CHUNK_SIZE, SINGLE_GROUP=SINGLE_GROUP, group_num=pid_filter_group, ) T = tl.trans(T) dy1 = tl.dot( T, dy, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dx = dy1 * B dB = dy1 * x Bx = tl.trans(B * x) dT = tl.dot( dy, Bx, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) # Correction term # In backwards, we roll in the opposite direction # Hence, the last chunk in the sequence does not need correction is_last_chunk = pid_chunk == chunks_per_seq - 1 is_first_chunk = pid_chunk == 0 if not is_last_chunk: offset_m_lead = ( offset_m + CHUNK_SIZE ) # offset_m = batch_offset + chunk_offset = batch_offset + (pid_chunk - 1) * CHUNK_SIZE dy_lead = tl._experimental_descriptor_load( dy_desc, [offset_m_lead, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) C_lead = tl._experimental_descriptor_load( C_desc, [offset_m_lead, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) dy_lead *= C_lead if LOAD_TOEPLITZ: T_c = tl._experimental_descriptor_load( T_hat_desc, [T_group_offset, 0], [CHUNK_SIZE, CHUNK_SIZE], DTYPE.value, ) else: T_c = load_correction_toeplitz( h_ptr, FILTER_LEN, CHUNK_SIZE, SINGLE_GROUP=SINGLE_GROUP, group_num=pid_filter_group, ) T_c = tl.trans(T_c) dcorrection = tl.dot( T_c, dy_lead, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dcorrection_dx = dcorrection * B dcorrection_dB = dcorrection * x dx += dcorrection_dx dB += dcorrection_dB tl._experimental_descriptor_store(dx_desc, dx, [offset_m, offset_k]) tl._experimental_descriptor_store(dB_desc, dB, [offset_m, offset_k]) # Store dhdT dhdT_idx, dhdT_mask = _get_T_store_idx(CHUNK_SIZE, FILTER_LEN, row_stride=dhdT_row_stride, col_stride=1) dhdT_offsets = ( pid_batch * dhdT_batch_stride + pid_chunk * dhdT_chunk_stride + pid_d * dhdT_block_stride + dhdT_idx ) tl.store(dhdT_ptr + dhdT_offsets, dT, mask=dhdT_mask) # num chunks per seq * num blocks per d # dT_batch_stride = num_tiles_m * num_tiles_k * FILTER_LEN # dT_chunk_stride = num_tiles_k * FILTER_LEN # dT_block_stride = FILTER_LEN # dT_offset_m = pid_batch * dT_batch_stride + pid_chunk * dT_chunk_stride + pid_d * dT_block_stride # tl._experimental_descriptor_store(dhdT_desc, dT, [dT_offset_m, 0]) if not is_first_chunk: offset_m_lag = offset_m - CHUNK_SIZE B_lag = tl._experimental_descriptor_load( B_desc, [offset_m_lag, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) x_lag = tl._experimental_descriptor_load( x_desc, [offset_m_lag, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) Bx_lag = B_lag * x_lag Bx_lag = tl.trans(Bx_lag) dTc = tl.dot( dy, Bx_lag, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dhdTc_idx, dhdTc_mask = _get_Tc_store_idx(CHUNK_SIZE, FILTER_LEN, row_stride=dhdT_row_stride, col_stride=1) dhdTc_offsets = ( pid_batch * dhdT_batch_stride + pid_chunk * dhdT_chunk_stride + pid_d * dhdT_block_stride + dhdTc_idx ) tl.store(dhdTc_ptr + dhdTc_offsets, dTc, mask=dhdTc_mask) def two_pass_bwd_grouped_tma( dy: torch.Tensor, x: torch.Tensor, B: torch.Tensor, C: torch.Tensor, h: torch.Tensor, y2: torch.Tensor, T: torch.Tensor = None, T_hat: torch.Tensor = None, version: str = "v1", schedule: str = "default", autotune: bool = False, CHUNK_SIZE: int = None, BLOCK_D: int = None, NUM_PIPELINE_STAGES: int = 0, # for tl.range THREADBLOCK_SWIZZLE: str = "row", num_warps: int = None, # TODO: Make sure to set match these defaults to those in CUDAOptions num_stages: int = 3, # for tl.dot, should default to 3 num_ctas: int = 1, maxnreg: int = None, warmup: bool = False, return_kernel: bool = False, ) -> Union[torch.tensor, tuple[triton.compiler.CompiledKernel, tuple[Any], tuple[Any]]]: """ Chunked two-pass backwards kernel with grouped filters with TMA enabled, only for sm90+ See `cgcg.triton.bwd_kernels.two_pass_bwd_grouped` for documentation on args and return values. """ bs, seqlen, g, dg = dy.shape filter_shape = h.shape hg, _in_channel_div_group, filter_len = filter_shape if autotune: raise NotImplementedError("Autotuning not implemented for bwd") else: assert all( [ CHUNK_SIZE, BLOCK_D, num_warps, num_stages is not None, NUM_PIPELINE_STAGES is not None, ] ), "Must specify all of CHUNK_SIZE, BLOCK_D, NUM_PIPELINE_STAGES, num_warps, num_stages, " if version == "v1": kernel: triton.runtime.JITFunction = _two_pass_bwd_grouped_tma_kernel elif version == "v2": raise NotImplementedError("v2 not implemented yet") else: raise ValueError(f"version {version} not implemented") if CHUNK_SIZE < filter_len: raise ValueError("CHUNK_SIZE must be >= filter_len") # basic shape checks assert dg >= 16, "dg must be >= 8 to use tensor-cores" assert x.shape == dy.shape == B.shape == C.shape == y2.shape assert hg == g assert _in_channel_div_group == 1 # hidden_dim d = g * dg x = x.reshape(bs, seqlen, d) B = B.reshape_as(x) C = C.reshape_as(x) dy = dy.reshape_as(x) # Intermediates from forward pass y2 = y2.reshape_as(x) batch_stride, row_stride, col_stride = dy.stride() if T is not None: assert T_hat is not None assert T.shape == T_hat.shape == torch.Size([g, CHUNK_SIZE, CHUNK_SIZE]) assert T.is_contiguous() assert T_hat.is_contiguous() # Kernel constexpr LOAD_TOEPLITZ = True # Create 2D TMA descriptors for loading T, T_hat since 1-D TMA descriptor can't be used when # block size must less than 256 elements, see https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html # Indexing logic will be only along M dimension # Stride by CHUNK_SIZE along M to advance each block, where each block maps to a filter group T_M = g * CHUNK_SIZE T_K = CHUNK_SIZE # logger.debug("Creating 2D TMA descriptors for T, T_hat") desc_T = create_2d_tma_descriptor(T.data_ptr(), T_M, T_K, CHUNK_SIZE, CHUNK_SIZE, T.element_size()) desc_T_hat = create_2d_tma_descriptor(T_hat.data_ptr(), T_M, T_K, CHUNK_SIZE, CHUNK_SIZE, T_hat.element_size()) else: LOAD_TOEPLITZ = False desc_T = None desc_T_hat = None # triton kernel pre-conditions assert dy.is_contiguous() assert x.is_contiguous() assert B.is_contiguous() assert C.is_contiguous() assert y2.is_contiguous() # Reshape h to a 2-D tensor # TODO: remove? h = h.reshape(g, filter_len) assert h.is_contiguous() # use_autotuner = not any([CHUNK_SIZE, BLOCK_D, num_warps, NUM_PIPELINE_STAGES]) assert not ( autotune and warmup ), "autotune and warmup are not supported, use return_kernel=True to get the kernel after autotuning" if schedule == "default": def _1d_grid(META): row_tiles = triton.cdiv(seqlen, META["CHUNK_SIZE"]) col_tiles = triton.cdiv(d, META["BLOCK_D"]) total_tiles = bs * row_tiles * col_tiles return (total_tiles,) # logger.info("Setting NUM_PIPELINE_STAGES = 0 since schedule is `default`") NUM_PIPELINE_STAGES = 0 grid = _1d_grid elif schedule == "persistent": raise NotImplementedError("Skip persistent for now") # grid = lambda META: ( # min( # DEVICE_PROPS.NUM_SM, # triton.cdiv(seqlen, META["CHUNK_SIZE"]) # * triton.cdiv(d, META["BLOCK_D"]) # * bs, # ), # ) else: raise ValueError(f"schedule {schedule} not implemented") # Create TMA descriptors M, K = bs * seqlen, d # Load # logger.debug("Creating 2D TMA descriptors for dy, x, B, C, y2") desc_dy = create_2d_tma_descriptor(dy.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dy.element_size()) desc_x = create_2d_tma_descriptor(x.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, x.element_size()) desc_B = create_2d_tma_descriptor(B.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, B.element_size()) desc_C = create_2d_tma_descriptor(C.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, C.element_size()) desc_y2 = create_2d_tma_descriptor(y2.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, y2.element_size()) # 1D TMA requirement: elementSize * numel() >= 128 # desc_h = create_1d_tma_descriptor(h.data_ptr(), g * hl, hl, h.element_size()) # Store dx = torch.zeros_like(x) dB = torch.zeros_like(B) dC = torch.zeros_like(C) desc_dx = create_2d_tma_descriptor(dx.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dx.element_size()) desc_dB = create_2d_tma_descriptor(dB.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dB.element_size()) desc_dC = create_2d_tma_descriptor(dC.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dC.element_size()) num_chunks = triton.cdiv(seqlen, CHUNK_SIZE) num_blocks = triton.cdiv(d, BLOCK_D) if version == "v1": dhdT = torch.zeros( bs, num_chunks, num_blocks, filter_len, CHUNK_SIZE, device=h.device, dtype=h.dtype, ) dhdT_hat = torch.zeros_like(dhdT) ( dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, ) = dhdT.stride() # NOTE: can't use TMA since we are slicing segments of dhdT/dhdT_hat during store # 1D TMA descriptor does not work once filter_len > 6, hence we use 2D TMA # M is equal to bs * num_chunks * num_blocks * filter_len # K is equal to CHUNK_SIZE # block sizes are filter_len x CHUNK_SIZE # This means we only to need to advance along m dimension when storing # Calculate bs, chunk, and block ids by pid # Advance by filter_len along M # offset along k dimension should be 0 # dh_M = bs * num_chunks * num_blocks * filter_len # dh_K = CHUNK_SIZE # # # desc_dhdT = create_2d_tma_descriptor( # dhdT.data_ptr(), dh_M, dh_K, filter_len, CHUNK_SIZE, dhdT.element_size() # ) # desc_dhdT_hat = create_2d_tma_descriptor( # dhdT_hat.data_ptr(), # dh_M, # dh_K, # filter_len, # CHUNK_SIZE, # dhdT_hat.element_size(), # ) kernel_args = ( # Inputs desc_dy, desc_x, desc_B, desc_C, h, # Intermediate activations desc_y2, desc_T, desc_T_hat, # Outputs desc_dx, desc_dB, desc_dC, dhdT, dhdT_hat, # Strides batch_stride, row_stride, col_stride, dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, # Shapes bs, seqlen, g, dg, ) else: dh_buffers = torch.zeros(bs, num_chunks, num_blocks, filter_len, device=x.device, dtype=h.dtype) dh_batch_stride, dh_chunk_stride, dh_block_stride, _ = dh_buffers.stride() kernel_args = ( dy, x, B, C, h, # Intermediate activations y2, T, T_hat, # Outputs dx, dB, dC, dh_buffers, # Strides batch_stride, row_stride, col_stride, dh_batch_stride, dh_chunk_stride, dh_block_stride, # Shapes bs, seqlen, g, dg, ) # TODO: check dtypes all the same kernel_constexprs = { "FILTER_LEN": filter_len, "SINGLE_GROUP": g == 1, "LOAD_TOEPLITZ": LOAD_TOEPLITZ, "DTYPE": torch_dtype_to_triton(x.dtype), } if not autotune: kernel_constexprs.update( { "CHUNK_SIZE": CHUNK_SIZE, "BLOCK_D": BLOCK_D, "THREADBLOCK_SWIZZLE": THREADBLOCK_SWIZZLE, "num_warps": num_warps, "num_stages": num_stages, "num_ctas": num_ctas, "NUM_PIPELINE_STAGES": NUM_PIPELINE_STAGES, } ) # Can actually run this with fake tensors (no need for actual kernel tensor args) if warmup: compiled_kernel: triton.compiler.CompiledKernel = kernel.warmup(*kernel_args, **kernel_constexprs, grid=(1,)) return compiled_kernel, kernel_args, kernel_constexprs else: # results = [] # logger.info( # f"Running backward kernel {version} with {schedule=} {kernel_constexprs=}" # ) # Run the kernel compiled_kernel: triton.compiler.CompiledKernel = kernel[grid](*kernel_args, **kernel_constexprs) dx = dx.reshape(bs, seqlen, g, dg) dB = dB.reshape(bs, seqlen, g, dg) dC = dC.reshape(bs, seqlen, g, dg) num_blocks_per_filter_group = dg // BLOCK_D # Run second final reduction kernel for dh # TODO: either `torch.compile`` or write custom triton kernel for this if version == "v1": dhdT = dhdT.reshape(bs, num_chunks, g, num_blocks_per_filter_group, filter_len, CHUNK_SIZE) dhdT_hat = dhdT_hat.reshape_as(dhdT) dhdT = dhdT.sum([0, 1, 3, 5]).reshape(*filter_shape) dhdTc = dhdT_hat.sum([0, 1, 3, 5]).reshape_as(dhdT) dh = dhdT + dhdTc elif version == "v2": dh_buffers = dh_buffers.reshape(bs, num_chunks, g, num_blocks_per_filter_group, filter_len) dh = dh_buffers.sum([0, 1, 3]).reshape(*filter_shape) if return_kernel: return dx, dB, dC, dh, compiled_kernel else: return dx, dB, dC, dh # if __name__ == "__main__": # from bwd_kernels import two_pass_bwd_grouped # from savanna.kernels.triton_src.cgcg.ref_fwd import gcg_fwd_ref_corrected, gcg_two_pass_chunked_fwd_corrected # from savanna.kernels.triton_src.cgcg.utils import correction_toeplitz, toeplitz # bs, seqlen, d = (2, 128, 128) # g = 2 # hl = 4 # CHUNK_SIZE = 32 # BLOCK_D = 32 # dtype = torch.float32 # LOAD_TOEPLITZ = False # schedule = "default" # dg = d // g # num_warps = 4 # if filter_size < 128 else 2 # num_stages = 2 # 1 if filter_size > 6 else 2 # swizzle = "row" # autotune = False # def setup_inputs(bs, seqlen, dg, g, filter_size, dtype, requires_grad=True): # device = "cuda" # x = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # B = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # C = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # h = torch.randn( # g * filter_size, device=device, dtype=dtype, requires_grad=requires_grad # ).reshape(g, 1, filter_size) # return x, B, C, h # x, B, C, h = setup_inputs(bs, seqlen, dg, g, hl, dtype) # # Ref grad # x_ref = x.detach().clone().requires_grad_() # B_ref = B.detach().clone().requires_grad_() # C_ref = C.detach().clone().requires_grad_() # h_ref = h.detach().clone().requires_grad_() # # We need y2 = T_local @ Bx + T_correction @ Bx_lag to calculate dC # # We can't use the chunked ref for calculating dh since h becomes detached when constructing T_local and T_c # _, _, _, y2, _ = gcg_two_pass_chunked_fwd_corrected( # x_ref.detach().clone(), # B_ref.detach().clone(), # C_ref.detach().clone(), # h_ref.detach().clone(), # gl=CHUNK_SIZE, # return_intermediates=True, # ) # if LOAD_TOEPLITZ: # h_ = h.flip(-1)[:, 0] # T = toeplitz(h_, CHUNK_SIZE) # T_hat = correction_toeplitz(h_, CHUNK_SIZE) # else: # T = None # T_hat = None # y_ref = gcg_fwd_ref_corrected(x_ref, B_ref, C_ref, h_ref) # # Backprop # dy = 0.1 * torch.randn_like(y_ref) # y_ref.backward(dy) # dx_ref = x_ref.grad # dB_ref = B_ref.grad # dC_ref = C_ref.grad # dh_ref = h_ref.grad # kernel_config = { # "CHUNK_SIZE": CHUNK_SIZE, # "BLOCK_D": BLOCK_D, # "num_warps": num_warps, # "NUM_PIPELINE_STAGES": 0 if schedule == "default" else 1, # "num_stages": num_stages, # "THREADBLOCK_SWIZZLE": swizzle, # } # dx_ref, dB_ref, dC_ref, dT_ref, dTc_ref, dh_ref_bwd = two_pass_bwd_grouped( # dy, # x_ref, # B_ref, # C_ref, # h_ref, # y2, # T=T, # T_hat=T_hat, # schedule=schedule, # autotune=autotune, # **kernel_config, # ) # # Test grad # dx, dB, dC, dT, dTc, dh = two_pass_bwd_grouped_tma( # dy, # x, # B, # C, # h, # y2, # T=T, # T_hat=T_hat, # schedule=schedule, # autotune=autotune, # **kernel_config, # ) # # print(f"dx_ref: {dx_ref}") # # print(f"dC_ref: {dC_ref}") # # print(f"dC: {dC}") # x_diff = (dx - dx_ref).abs().max() # print(f"x_diff: {x_diff}") # B_dff = (dB - dB_ref).abs().max() # print(f"B_diff: {B_dff}") # print(f"dC diff: {(dC - dC_ref).abs().max()}") # print(f"dT diff: {(dT - dT_ref).abs().max()}") # print(f"dTc diff: {(dTc - dTc_ref).abs().max()}") # print(f"dh diff: {(dh_ref_bwd - dh).abs().max()}") # print(f"dh_bwd diff: {(dh_ref - dh_ref_bwd).abs().max()}") # print(f"dh_bwd_tma diff: {(dh_ref - dh).abs().max()}") # # B_diff = (dB - B).abs().max() # # C_diff = (dC - C).abs().max() # # print(f"x_diff: {x_diff}, B_diff: {B_diff}, C_diff: {C_diff}")
@triton.jit def _two_pass_bwd_grouped_tma_kernel( # TMA descriptors # Inputs dy_desc, x_desc, B_desc, C_desc, h_ptr, # Intermediate activations y2_desc, T_desc, T_hat_desc, # Outputs dx_desc, dB_desc, dC_desc, dhdT_ptr, dhdTc_ptr, # Strides input_batch_stride, input_row_stride, input_col_stride, dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, # Shapes bs, seqlen, g, dg, # Compile-time constants FILTER_LEN: tl.constexpr, CHUNK_SIZE: tl.constexpr, BLOCK_D: tl.constexpr, DTYPE: tl.constexpr, LOAD_TOEPLITZ: tl.constexpr, SINGLE_GROUP: tl.constexpr, NUM_PIPELINE_STAGES: tl.constexpr, THREADBLOCK_SWIZZLE: tl.constexpr, FLUSH: tl.constexpr = False, # Flush TMA cache # kwargs for tl.dot input_precision: tl.constexpr = "ieee", # "ieee", "tf32", "tf32x3" --> only for debugging, since dtype < fp32 max_num_imprecise_acc: tl.constexpr = None, out_dtype: tl.constexpr = tl.float32, # Common triton kernel params # num_stages: tl.constexpr = 2, # num_warps: tl.constexpr = 4, DEBUG: tl.constexpr = False, ): # tl.static_print("DTYPE", DTYPE,) # tl.static_print("DTYPE.value", DTYPE.value) if DEBUG: if tl.program_id(0) == 0: tl.static_print( "TWO_PASS CONSTEXPRS:\n", "FILTER_LEN:", FILTER_LEN, "CHUNK_SIZE:", CHUNK_SIZE, "BLOCK_D:", BLOCK_D, "SINGLE_GROUP:", SINGLE_GROUP, "THREADBLOCK_SWIZZLE:", THREADBLOCK_SWIZZLE, ) # tl.static_print("DTYPE", DTYPE,) # tl.static_print("DTYPE.value", DTYPE.value) d = g * dg num_programs = tl.num_programs(0) # TMA offsets pid = tl.program_id(axis=0) num_tiles_m = tl.cdiv(seqlen, CHUNK_SIZE) num_tiles_k = tl.cdiv(d, BLOCK_D) num_tiles_batch = num_tiles_m * num_tiles_k total_tiles = bs * num_tiles_batch chunks_per_batch = num_tiles_m tiles_per_filter_group = dg // BLOCK_D chunks_per_seq = tl.cdiv(seqlen, CHUNK_SIZE) for tile_id in tl.range(pid, total_tiles, num_programs, num_stages=NUM_PIPELINE_STAGES): pid_batch, pid_chunk, pid_d, pid_filter_group = get_program_order( tile_id, num_tiles_batch, num_tiles_k, chunks_per_batch, tiles_per_filter_group, THREADBLOCK_SWIZZLE, ) batch_offset = pid_batch * num_tiles_m * CHUNK_SIZE chunk_offset = pid_chunk * CHUNK_SIZE offset_m = batch_offset + chunk_offset offset_k = pid_d * BLOCK_D if False: if pid == 0: tl.device_print("num_pid_m", num_tiles_m) tl.device_print("num_pid_k", num_tiles_k) tl.device_print("num_pid_batch", num_tiles_batch) tl.device_print("total_tiles", total_tiles) tl.device_print("pid", pid) tl.device_print("pid_batch", pid_batch) tl.device_print("pid_chunk", pid_chunk) tl.device_print("pid_d", pid_d) tl.device_print("pid_filter_group", pid_filter_group) tl.device_print("offset_m", offset_m) tl.device_print("offset_k", offset_k) dy = tl._experimental_descriptor_load(dy_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) x = tl._experimental_descriptor_load(x_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) B = tl._experimental_descriptor_load(B_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) C = tl._experimental_descriptor_load(C_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) y2 = tl._experimental_descriptor_load(y2_desc, [offset_m, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value) # Start backprop dC = dy * y2 tl._experimental_descriptor_store(dC_desc, dC, [offset_m, offset_k]) # # Backprop through C dy = dy * C if LOAD_TOEPLITZ: T_group_stride = CHUNK_SIZE T_group_offset = pid_filter_group * T_group_stride T = tl._experimental_descriptor_load(T_desc, [T_group_offset, 0], [CHUNK_SIZE, CHUNK_SIZE], DTYPE.value) else: T = load_toeplitz( h_ptr, FILTER_LEN, CHUNK_SIZE, SINGLE_GROUP=SINGLE_GROUP, group_num=pid_filter_group, ) T = tl.trans(T) dy1 = tl.dot( T, dy, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dx = dy1 * B dB = dy1 * x Bx = tl.trans(B * x) dT = tl.dot( dy, Bx, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) # Correction term # In backwards, we roll in the opposite direction # Hence, the last chunk in the sequence does not need correction is_last_chunk = pid_chunk == chunks_per_seq - 1 is_first_chunk = pid_chunk == 0 if not is_last_chunk: offset_m_lead = ( offset_m + CHUNK_SIZE ) # offset_m = batch_offset + chunk_offset = batch_offset + (pid_chunk - 1) * CHUNK_SIZE dy_lead = tl._experimental_descriptor_load( dy_desc, [offset_m_lead, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) C_lead = tl._experimental_descriptor_load( C_desc, [offset_m_lead, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) dy_lead *= C_lead if LOAD_TOEPLITZ: T_c = tl._experimental_descriptor_load( T_hat_desc, [T_group_offset, 0], [CHUNK_SIZE, CHUNK_SIZE], DTYPE.value, ) else: T_c = load_correction_toeplitz( h_ptr, FILTER_LEN, CHUNK_SIZE, SINGLE_GROUP=SINGLE_GROUP, group_num=pid_filter_group, ) T_c = tl.trans(T_c) dcorrection = tl.dot( T_c, dy_lead, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dcorrection_dx = dcorrection * B dcorrection_dB = dcorrection * x dx += dcorrection_dx dB += dcorrection_dB tl._experimental_descriptor_store(dx_desc, dx, [offset_m, offset_k]) tl._experimental_descriptor_store(dB_desc, dB, [offset_m, offset_k]) # Store dhdT dhdT_idx, dhdT_mask = _get_T_store_idx(CHUNK_SIZE, FILTER_LEN, row_stride=dhdT_row_stride, col_stride=1) dhdT_offsets = ( pid_batch * dhdT_batch_stride + pid_chunk * dhdT_chunk_stride + pid_d * dhdT_block_stride + dhdT_idx ) tl.store(dhdT_ptr + dhdT_offsets, dT, mask=dhdT_mask) # num chunks per seq * num blocks per d # dT_batch_stride = num_tiles_m * num_tiles_k * FILTER_LEN # dT_chunk_stride = num_tiles_k * FILTER_LEN # dT_block_stride = FILTER_LEN # dT_offset_m = pid_batch * dT_batch_stride + pid_chunk * dT_chunk_stride + pid_d * dT_block_stride # tl._experimental_descriptor_store(dhdT_desc, dT, [dT_offset_m, 0]) if not is_first_chunk: offset_m_lag = offset_m - CHUNK_SIZE B_lag = tl._experimental_descriptor_load( B_desc, [offset_m_lag, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) x_lag = tl._experimental_descriptor_load( x_desc, [offset_m_lag, offset_k], [CHUNK_SIZE, BLOCK_D], DTYPE.value ) Bx_lag = B_lag * x_lag Bx_lag = tl.trans(Bx_lag) dTc = tl.dot( dy, Bx_lag, input_precision=input_precision, max_num_imprecise_acc=max_num_imprecise_acc, out_dtype=dy.dtype, ) dhdTc_idx, dhdTc_mask = _get_Tc_store_idx(CHUNK_SIZE, FILTER_LEN, row_stride=dhdT_row_stride, col_stride=1) dhdTc_offsets = ( pid_batch * dhdT_batch_stride + pid_chunk * dhdT_chunk_stride + pid_d * dhdT_block_stride + dhdTc_idx ) tl.store(dhdTc_ptr + dhdTc_offsets, dTc, mask=dhdTc_mask) def two_pass_bwd_grouped_tma( dy: torch.Tensor, x: torch.Tensor, B: torch.Tensor, C: torch.Tensor, h: torch.Tensor, y2: torch.Tensor, T: torch.Tensor = None, T_hat: torch.Tensor = None, version: str = "v1", schedule: str = "default", autotune: bool = False, CHUNK_SIZE: int = None, BLOCK_D: int = None, NUM_PIPELINE_STAGES: int = 0, # for tl.range THREADBLOCK_SWIZZLE: str = "row", num_warps: int = None, # TODO: Make sure to set match these defaults to those in CUDAOptions num_stages: int = 3, # for tl.dot, should default to 3 num_ctas: int = 1, maxnreg: int = None, warmup: bool = False, return_kernel: bool = False, ) -> Union[torch.tensor, tuple[triton.compiler.CompiledKernel, tuple[Any], tuple[Any]]]: """ Chunked two-pass backwards kernel with grouped filters with TMA enabled, only for sm90+ See `cgcg.triton.bwd_kernels.two_pass_bwd_grouped` for documentation on args and return values. """ bs, seqlen, g, dg = dy.shape filter_shape = h.shape hg, _in_channel_div_group, filter_len = filter_shape if autotune: raise NotImplementedError("Autotuning not implemented for bwd") else: assert all( [ CHUNK_SIZE, BLOCK_D, num_warps, num_stages is not None, NUM_PIPELINE_STAGES is not None, ] ), "Must specify all of CHUNK_SIZE, BLOCK_D, NUM_PIPELINE_STAGES, num_warps, num_stages, " if version == "v1": kernel: triton.runtime.JITFunction = _two_pass_bwd_grouped_tma_kernel elif version == "v2": raise NotImplementedError("v2 not implemented yet") else: raise ValueError(f"version {version} not implemented") if CHUNK_SIZE < filter_len: raise ValueError("CHUNK_SIZE must be >= filter_len") # basic shape checks assert dg >= 16, "dg must be >= 8 to use tensor-cores" assert x.shape == dy.shape == B.shape == C.shape == y2.shape assert hg == g assert _in_channel_div_group == 1 # hidden_dim d = g * dg x = x.reshape(bs, seqlen, d) B = B.reshape_as(x) C = C.reshape_as(x) dy = dy.reshape_as(x) # Intermediates from forward pass y2 = y2.reshape_as(x) batch_stride, row_stride, col_stride = dy.stride() if T is not None: assert T_hat is not None assert T.shape == T_hat.shape == torch.Size([g, CHUNK_SIZE, CHUNK_SIZE]) assert T.is_contiguous() assert T_hat.is_contiguous() # Kernel constexpr LOAD_TOEPLITZ = True # Create 2D TMA descriptors for loading T, T_hat since 1-D TMA descriptor can't be used when # block size must less than 256 elements, see https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TENSOR__MEMORY.html # Indexing logic will be only along M dimension # Stride by CHUNK_SIZE along M to advance each block, where each block maps to a filter group T_M = g * CHUNK_SIZE T_K = CHUNK_SIZE # logger.debug("Creating 2D TMA descriptors for T, T_hat") desc_T = create_2d_tma_descriptor(T.data_ptr(), T_M, T_K, CHUNK_SIZE, CHUNK_SIZE, T.element_size()) desc_T_hat = create_2d_tma_descriptor(T_hat.data_ptr(), T_M, T_K, CHUNK_SIZE, CHUNK_SIZE, T_hat.element_size()) else: LOAD_TOEPLITZ = False desc_T = None desc_T_hat = None # triton kernel pre-conditions assert dy.is_contiguous() assert x.is_contiguous() assert B.is_contiguous() assert C.is_contiguous() assert y2.is_contiguous() # Reshape h to a 2-D tensor # TODO: remove? h = h.reshape(g, filter_len) assert h.is_contiguous() # use_autotuner = not any([CHUNK_SIZE, BLOCK_D, num_warps, NUM_PIPELINE_STAGES]) assert not ( autotune and warmup ), "autotune and warmup are not supported, use return_kernel=True to get the kernel after autotuning" if schedule == "default": def _1d_grid(META): row_tiles = triton.cdiv(seqlen, META["CHUNK_SIZE"]) col_tiles = triton.cdiv(d, META["BLOCK_D"]) total_tiles = bs * row_tiles * col_tiles return (total_tiles,) # logger.info("Setting NUM_PIPELINE_STAGES = 0 since schedule is `default`") NUM_PIPELINE_STAGES = 0 grid = _1d_grid elif schedule == "persistent": raise NotImplementedError("Skip persistent for now") # grid = lambda META: ( # min( # DEVICE_PROPS.NUM_SM, # triton.cdiv(seqlen, META["CHUNK_SIZE"]) # * triton.cdiv(d, META["BLOCK_D"]) # * bs, # ), # ) else: raise ValueError(f"schedule {schedule} not implemented") # Create TMA descriptors M, K = bs * seqlen, d # Load # logger.debug("Creating 2D TMA descriptors for dy, x, B, C, y2") desc_dy = create_2d_tma_descriptor(dy.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dy.element_size()) desc_x = create_2d_tma_descriptor(x.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, x.element_size()) desc_B = create_2d_tma_descriptor(B.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, B.element_size()) desc_C = create_2d_tma_descriptor(C.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, C.element_size()) desc_y2 = create_2d_tma_descriptor(y2.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, y2.element_size()) # 1D TMA requirement: elementSize * numel() >= 128 # desc_h = create_1d_tma_descriptor(h.data_ptr(), g * hl, hl, h.element_size()) # Store dx = torch.zeros_like(x) dB = torch.zeros_like(B) dC = torch.zeros_like(C) desc_dx = create_2d_tma_descriptor(dx.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dx.element_size()) desc_dB = create_2d_tma_descriptor(dB.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dB.element_size()) desc_dC = create_2d_tma_descriptor(dC.data_ptr(), M, K, CHUNK_SIZE, BLOCK_D, dC.element_size()) num_chunks = triton.cdiv(seqlen, CHUNK_SIZE) num_blocks = triton.cdiv(d, BLOCK_D) if version == "v1": dhdT = torch.zeros( bs, num_chunks, num_blocks, filter_len, CHUNK_SIZE, device=h.device, dtype=h.dtype, ) dhdT_hat = torch.zeros_like(dhdT) ( dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, ) = dhdT.stride() # NOTE: can't use TMA since we are slicing segments of dhdT/dhdT_hat during store # 1D TMA descriptor does not work once filter_len > 6, hence we use 2D TMA # M is equal to bs * num_chunks * num_blocks * filter_len # K is equal to CHUNK_SIZE # block sizes are filter_len x CHUNK_SIZE # This means we only to need to advance along m dimension when storing # Calculate bs, chunk, and block ids by pid # Advance by filter_len along M # offset along k dimension should be 0 # dh_M = bs * num_chunks * num_blocks * filter_len # dh_K = CHUNK_SIZE # # # desc_dhdT = create_2d_tma_descriptor( # dhdT.data_ptr(), dh_M, dh_K, filter_len, CHUNK_SIZE, dhdT.element_size() # ) # desc_dhdT_hat = create_2d_tma_descriptor( # dhdT_hat.data_ptr(), # dh_M, # dh_K, # filter_len, # CHUNK_SIZE, # dhdT_hat.element_size(), # ) kernel_args = ( # Inputs desc_dy, desc_x, desc_B, desc_C, h, # Intermediate activations desc_y2, desc_T, desc_T_hat, # Outputs desc_dx, desc_dB, desc_dC, dhdT, dhdT_hat, # Strides batch_stride, row_stride, col_stride, dhdT_batch_stride, dhdT_chunk_stride, dhdT_block_stride, dhdT_row_stride, dhdT_col_stride, # Shapes bs, seqlen, g, dg, ) else: dh_buffers = torch.zeros(bs, num_chunks, num_blocks, filter_len, device=x.device, dtype=h.dtype) dh_batch_stride, dh_chunk_stride, dh_block_stride, _ = dh_buffers.stride() kernel_args = ( dy, x, B, C, h, # Intermediate activations y2, T, T_hat, # Outputs dx, dB, dC, dh_buffers, # Strides batch_stride, row_stride, col_stride, dh_batch_stride, dh_chunk_stride, dh_block_stride, # Shapes bs, seqlen, g, dg, ) # TODO: check dtypes all the same kernel_constexprs = { "FILTER_LEN": filter_len, "SINGLE_GROUP": g == 1, "LOAD_TOEPLITZ": LOAD_TOEPLITZ, "DTYPE": torch_dtype_to_triton(x.dtype), } if not autotune: kernel_constexprs.update( { "CHUNK_SIZE": CHUNK_SIZE, "BLOCK_D": BLOCK_D, "THREADBLOCK_SWIZZLE": THREADBLOCK_SWIZZLE, "num_warps": num_warps, "num_stages": num_stages, "num_ctas": num_ctas, "NUM_PIPELINE_STAGES": NUM_PIPELINE_STAGES, } ) # Can actually run this with fake tensors (no need for actual kernel tensor args) if warmup: compiled_kernel: triton.compiler.CompiledKernel = kernel.warmup(*kernel_args, **kernel_constexprs, grid=(1,)) return compiled_kernel, kernel_args, kernel_constexprs else: # results = [] # logger.info( # f"Running backward kernel {version} with {schedule=} {kernel_constexprs=}" # ) # Run the kernel compiled_kernel: triton.compiler.CompiledKernel = kernel[grid](*kernel_args, **kernel_constexprs) dx = dx.reshape(bs, seqlen, g, dg) dB = dB.reshape(bs, seqlen, g, dg) dC = dC.reshape(bs, seqlen, g, dg) num_blocks_per_filter_group = dg // BLOCK_D # Run second final reduction kernel for dh # TODO: either `torch.compile`` or write custom triton kernel for this if version == "v1": dhdT = dhdT.reshape(bs, num_chunks, g, num_blocks_per_filter_group, filter_len, CHUNK_SIZE) dhdT_hat = dhdT_hat.reshape_as(dhdT) dhdT = dhdT.sum([0, 1, 3, 5]).reshape(*filter_shape) dhdTc = dhdT_hat.sum([0, 1, 3, 5]).reshape_as(dhdT) dh = dhdT + dhdTc elif version == "v2": dh_buffers = dh_buffers.reshape(bs, num_chunks, g, num_blocks_per_filter_group, filter_len) dh = dh_buffers.sum([0, 1, 3]).reshape(*filter_shape) if return_kernel: return dx, dB, dC, dh, compiled_kernel else: return dx, dB, dC, dh # if __name__ == "__main__": # from bwd_kernels import two_pass_bwd_grouped # from savanna.kernels.triton_src.cgcg.ref_fwd import gcg_fwd_ref_corrected, gcg_two_pass_chunked_fwd_corrected # from savanna.kernels.triton_src.cgcg.utils import correction_toeplitz, toeplitz # bs, seqlen, d = (2, 128, 128) # g = 2 # hl = 4 # CHUNK_SIZE = 32 # BLOCK_D = 32 # dtype = torch.float32 # LOAD_TOEPLITZ = False # schedule = "default" # dg = d // g # num_warps = 4 # if filter_size < 128 else 2 # num_stages = 2 # 1 if filter_size > 6 else 2 # swizzle = "row" # autotune = False # def setup_inputs(bs, seqlen, dg, g, filter_size, dtype, requires_grad=True): # device = "cuda" # x = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # B = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # C = torch.randn( # bs, seqlen, g, dg, device=device, dtype=dtype, requires_grad=requires_grad # ) # h = torch.randn( # g * filter_size, device=device, dtype=dtype, requires_grad=requires_grad # ).reshape(g, 1, filter_size) # return x, B, C, h # x, B, C, h = setup_inputs(bs, seqlen, dg, g, hl, dtype) # # Ref grad # x_ref = x.detach().clone().requires_grad_() # B_ref = B.detach().clone().requires_grad_() # C_ref = C.detach().clone().requires_grad_() # h_ref = h.detach().clone().requires_grad_() # # We need y2 = T_local @ Bx + T_correction @ Bx_lag to calculate dC # # We can't use the chunked ref for calculating dh since h becomes detached when constructing T_local and T_c # _, _, _, y2, _ = gcg_two_pass_chunked_fwd_corrected( # x_ref.detach().clone(), # B_ref.detach().clone(), # C_ref.detach().clone(), # h_ref.detach().clone(), # gl=CHUNK_SIZE, # return_intermediates=True, # ) # if LOAD_TOEPLITZ: # h_ = h.flip(-1)[:, 0] # T = toeplitz(h_, CHUNK_SIZE) # T_hat = correction_toeplitz(h_, CHUNK_SIZE) # else: # T = None # T_hat = None # y_ref = gcg_fwd_ref_corrected(x_ref, B_ref, C_ref, h_ref) # # Backprop # dy = 0.1 * torch.randn_like(y_ref) # y_ref.backward(dy) # dx_ref = x_ref.grad # dB_ref = B_ref.grad # dC_ref = C_ref.grad # dh_ref = h_ref.grad # kernel_config = { # "CHUNK_SIZE": CHUNK_SIZE, # "BLOCK_D": BLOCK_D, # "num_warps": num_warps, # "NUM_PIPELINE_STAGES": 0 if schedule == "default" else 1, # "num_stages": num_stages, # "THREADBLOCK_SWIZZLE": swizzle, # } # dx_ref, dB_ref, dC_ref, dT_ref, dTc_ref, dh_ref_bwd = two_pass_bwd_grouped( # dy, # x_ref, # B_ref, # C_ref, # h_ref, # y2, # T=T, # T_hat=T_hat, # schedule=schedule, # autotune=autotune, # **kernel_config, # ) # # Test grad # dx, dB, dC, dT, dTc, dh = two_pass_bwd_grouped_tma( # dy, # x, # B, # C, # h, # y2, # T=T, # T_hat=T_hat, # schedule=schedule, # autotune=autotune, # **kernel_config, # ) # # print(f"dx_ref: {dx_ref}") # # print(f"dC_ref: {dC_ref}") # # print(f"dC: {dC}") # x_diff = (dx - dx_ref).abs().max() # print(f"x_diff: {x_diff}") # B_dff = (dB - dB_ref).abs().max() # print(f"B_diff: {B_dff}") # print(f"dC diff: {(dC - dC_ref).abs().max()}") # print(f"dT diff: {(dT - dT_ref).abs().max()}") # print(f"dTc diff: {(dTc - dTc_ref).abs().max()}") # print(f"dh diff: {(dh_ref_bwd - dh).abs().max()}") # print(f"dh_bwd diff: {(dh_ref - dh_ref_bwd).abs().max()}") # print(f"dh_bwd_tma diff: {(dh_ref - dh).abs().max()}") # # B_diff = (dB - B).abs().max() # # C_diff = (dC - C).abs().max() # # print(f"x_diff: {x_diff}, B_diff: {B_diff}, C_diff: {C_diff}")
sustcsonglin/TN-LCFRS
parser/lcfrs_triton/merge_discontinuous.py
https://github.com/sustcsonglin/TN-LCFRS/blob/596de7771ce3069f0d13851a4ccb4c08aa8e3db9/parser/lcfrs_triton/merge_discontinuous.py
import pdb import statistics import torch import triton import triton.language as tl from torch.utils.checkpoint import checkpoint as ckp def checkpoint(func): def wrapper(*args, **kwargs): return ckp(func, *args, **kwargs) return wrapper @triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b) @triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask) @triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
@triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b)
sustcsonglin/TN-LCFRS
parser/lcfrs_triton/merge_discontinuous.py
https://github.com/sustcsonglin/TN-LCFRS/blob/596de7771ce3069f0d13851a4ccb4c08aa8e3db9/parser/lcfrs_triton/merge_discontinuous.py
import pdb import statistics import torch import triton import triton.language as tl from torch.utils.checkpoint import checkpoint as ckp def checkpoint(func): def wrapper(*args, **kwargs): return ckp(func, *args, **kwargs) return wrapper @triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b) @triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask) @triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
@triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask)
sustcsonglin/TN-LCFRS
parser/lcfrs_triton/merge_discontinuous.py
https://github.com/sustcsonglin/TN-LCFRS/blob/596de7771ce3069f0d13851a4ccb4c08aa8e3db9/parser/lcfrs_triton/merge_discontinuous.py
import pdb import statistics import torch import triton import triton.language as tl from torch.utils.checkpoint import checkpoint as ckp def checkpoint(func): def wrapper(*args, **kwargs): return ckp(func, *args, **kwargs) return wrapper @triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b) @triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask) @triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
@triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask)
sustcsonglin/TN-LCFRS
parser/lcfrs_triton/merge_discontinuous.py
https://github.com/sustcsonglin/TN-LCFRS/blob/596de7771ce3069f0d13851a4ccb4c08aa8e3db9/parser/lcfrs_triton/merge_discontinuous.py
import pdb import statistics import torch import triton import triton.language as tl from torch.utils.checkpoint import checkpoint as ckp def checkpoint(func): def wrapper(*args, **kwargs): return ckp(func, *args, **kwargs) return wrapper @triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b) @triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask) @triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
@triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask)
sustcsonglin/TN-LCFRS
parser/lcfrs_triton/merge_discontinuous.py
https://github.com/sustcsonglin/TN-LCFRS/blob/596de7771ce3069f0d13851a4ccb4c08aa8e3db9/parser/lcfrs_triton/merge_discontinuous.py
import pdb import statistics import torch import triton import triton.language as tl from torch.utils.checkpoint import checkpoint as ckp def checkpoint(func): def wrapper(*args, **kwargs): return ckp(func, *args, **kwargs) return wrapper @triton.jit def logaddexp(a, b): tmp = a - b return tl.where(tmp > 0, tl.log(tl.exp(b - a) + 1) + a, tl.log(tl.exp(a-b) + 1) + b) @triton.jit def _kernel_inside_merge_discontinuous_v1( alpha_c, tmp_merge, tmp_merge_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_normalizer1, stride_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) l_ptr = alpha_c + b_idx * stride_alpha_c1 + start * stride_alpha_c2 + gap_start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_end * stride_alpha_c2 + end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 child_l = tl.load(l_ptr, mask=mask, other=-1e9) child_r = tl.load(r_ptr, mask=mask, other=-1e9) acc1 = child_l + child_r acc_max = tl.max(acc1, 0) tl.store(tmp_merge_normalizer + b_idx * stride_normalizer1 + tl.program_id(1) * stride_normalizer2 + tl.program_id(2), acc_max) acc = tl.exp(acc1 - acc_max) tl.store(tmp_merge + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), acc, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v1( alpha_c, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) # acc3 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 # acc4 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 # acc5 = tl.zeros((BLOCK_R4,),dtype=tl.float32) - 1e9 ## discontinuous parent nodes with two continuous child nodes # [i, j], [m, n] -> [i, j, m, n] l_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + gap_start * stride_alpha_c2 + start * stride_alpha_c3 + 2*r1 + r2 + tl.arange(0, BLOCK_R3) r_bwd_ptr = alpha_c + b_idx * stride_alpha_c1 + end * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2*r1 + r2 + r3 + tl.arange(0, BLOCK_R3) mask = tl.arange(0, BLOCK_R3) < r3 do = tl.load( tmp_merge_normalized + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask=mask, other=0 ) do *= tl.load( tmp_merge_grad + b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R3), mask = mask, other=0 ) tl.atomic_add(l_bwd_ptr, do, mask=mask) tl.atomic_add(r_bwd_ptr, do, mask=mask) @triton.jit def _kernel_inside_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, stride_tmp_normalizer1, stride_tmp_normalizer2, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) acc2 = tl.zeros((BLOCK_R4,), dtype=tl.float32) - 1e9 ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr, mask, other=-1e9) child_d = tl.load(d_ptr, mask, other=-1e9) acc2 = logaddexp(acc2, child_c + child_d) # acc = tl.cat(acc1, acc2, acc3, acc4, acc5) # acc_max = tl.max(acc1, 0) acc_max = tl.max(acc2, 0) # acc_max = tl.maximum(acc_max, tl.max(acc3, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc4, 0)) # acc_max = tl.maximum(acc_max, tl.max(acc5, 0)) tl.store(tmp_normalizer + b_idx * stride_tmp_normalizer1 + tl.program_id(1) * stride_tmp_normalizer2 + tl.program_id(2), acc_max) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) out = tl.exp(acc2 - acc_max) tl.store(tmp_merge + ptr , acc2, mask=mask) tl.store(tmp_merge_normalized + ptr, out, mask=mask) @triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
@triton.jit def _kernel_bwd_merge_discontinuous_v2( alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_merge_grad, w, batch, L, stride_alpha_c1, stride_alpha_c2, stride_alpha_c3, stride_alpha_d1, stride_alpha_d2, stride_alpha_d3, stride_alpha_d4, stride_alpha_d5, stride_tmp_merge1, stride_tmp_merge2, stride_tmp_merge3, r1, r2, r3, r4, BLOCK_R3: tl.constexpr, BLOCK_R4: tl.constexpr, ): ## Find index. tl.program_id(1) is of size w-1, each indicates the length of the left continuous subspan given a discontinuous parent. # for each tl.program_id(1), the number of possible discontinuous spans is the same: len(tl.program_id(2)) := (L-w) + (L-w-1) + (L-w-2) + ... + 1 = (L-w)*(L-w+1)/2. ## (L-w-i) parts means that the start position is $i$, and each j \in [0, L-w-i] means the gap length (gap end - gap start) ## To avoid the waste of half amount of computation, I manually compute the index in the following way b_idx = tl.program_id(0) if b_idx >= batch: return span_length_left = tl.program_id(1) + 1 tid = tl.program_id(2) start = 0 # To find the group (L-w-start). tid is the gap length then while tid >= (L-w-start): tid -= (L-w-start) start += 1 gap_start = start + span_length_left gap_end = gap_start + (tid + 1) end = gap_end + (w - span_length_left) ptr = b_idx * stride_tmp_merge1 + tl.program_id(1) * stride_tmp_merge2 + tl.program_id(2) * stride_tmp_merge3 + tl.arange(0, BLOCK_R4) ## discontinuous parent nodes with one continuous child node and another discontinuous child node alpha_c_ptr = alpha_c + b_idx * stride_alpha_c1 + 2*r1 + r2 + 2 * r3 + tl.arange(0, BLOCK_R4) alpha_d_ptr = alpha_d + b_idx * stride_alpha_d1 + r2 + tl.arange(0, BLOCK_R4) mask = tl.arange(0, BLOCK_R4) < r4 parent_score = tl.load(tmp_merge + ptr, mask=mask, other=0) do = tl.load(tmp_merge_normalized + ptr, mask=mask, other=0) * tl.load(tmp_merge_grad + ptr, mask=mask, other=0) for split in range(start+1, gap_start): #### continuous [i, j], discontinuous [j, k, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + start * stride_alpha_c2 + split * stride_alpha_c3 d_ptr = alpha_d_ptr + split * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask, other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + start * stride_alpha_c3 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_start * stride_alpha_c3 + r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + split * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + gap_start * stride_alpha_c2 + split * stride_alpha_c3 + r4 d_bwd_ptr = alpha_d_ptr + split * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) for split in range(gap_end+1, end): #### continuous [m, j], discontinuous [i, k, j, n] -> discontinuous [i, k, m, n]. c_ptr = alpha_c_ptr + gap_end * stride_alpha_c2 + split * stride_alpha_c3 + 2* r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + split * stride_alpha_c2 + gap_end * stride_alpha_c3 + 2* r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + split * stride_alpha_d4 + end * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) #### continuous [j, k], discontinuous [i, j, m, n] -> discontinuous [i, k, m, n] c_ptr = alpha_c_ptr + split * stride_alpha_c2 + end * stride_alpha_c3 + 3 * r4 d_ptr = alpha_d_ptr + start * stride_alpha_d2 + gap_start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 child_c = tl.load(c_ptr,mask=mask,other=0) child_d = tl.load(d_ptr,mask=mask,other=0) new_grad = tl.exp(child_c + child_d - parent_score) * do c_bwd_ptr = alpha_c_ptr + end * stride_alpha_c2 + split * stride_alpha_c3 + 3 * r4 d_bwd_ptr = alpha_d_ptr + gap_start * stride_alpha_d2 + start * stride_alpha_d3 + gap_end * stride_alpha_d4 + split * stride_alpha_d5 tl.atomic_add(c_bwd_ptr, new_grad) tl.atomic_add(d_bwd_ptr, new_grad) ### The reason why not save tmp_merge is that it could be recomputed very easily w/o overhead ### while saving ``tmp_merge'' wastes lots of memory class MERGE_D1(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r3).fill_(0) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v1[grid](alpha_c, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3=triton.next_power_of_2(r3) ) ctx.save_for_backward(tmp_merge_normalized, alpha_c, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge_normalized, alpha_c, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v1[grid]( alpha_c, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), tmp_merge_normalized.stride(0), tmp_merge_normalized.stride(1), tmp_merge_normalized.stride(2), r1, r2, r3, r4, BLOCK_R3= triton.next_power_of_2(r3), BLOCK_R4= triton.next_power_of_2(r4) ) return alpha_c, None class MERGE_D2(torch.autograd.Function): @staticmethod def forward(ctx, alpha_c, alpha_d, dimension_info): B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) tmp_merge = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4).fill_(-1e9) tmp_merge_normalized = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2), r4) tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = ( triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_inside_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, tmp_normalizer, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), tmp_normalizer.stride(0), tmp_normalizer.stride(1), r1, r2, r3, r4, BLOCK_R3 =triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) ctx.save_for_backward(tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info) return tmp_merge_normalized, tmp_normalizer @staticmethod def backward(ctx, do, do2): tmp_merge, tmp_merge_normalized, alpha_c, alpha_d, dimension_info = ctx.saved_tensors B = alpha_c.shape[0] N = alpha_c.shape[1] - 1 w = int(dimension_info[0]) n = N - w r1 = int(dimension_info[1]) r2 = int(dimension_info[2]) r3 = int(dimension_info[3]) r4 = int(dimension_info[4]) # tmp_normalizer = alpha_c.new_zeros(B, w-1, int((N-w)*(N-w+1)/2)).fill_(-1e9) grid = (triton.next_power_of_2(B), (w-1), int((N-w)*(N-w+1)/2)) _kernel_bwd_merge_discontinuous_v2[grid](alpha_c, alpha_d, tmp_merge, tmp_merge_normalized, do, w, B, N, alpha_c.stride(0), alpha_c.stride(1), alpha_c.stride(2), alpha_d.stride(0), alpha_d.stride(1), alpha_d.stride(2), alpha_d.stride(3), alpha_d.stride(4), tmp_merge.stride(0), tmp_merge.stride(1), tmp_merge.stride(2), r1, r2, r3, r4, BLOCK_R3 = triton.next_power_of_2(r3), BLOCK_R4 = triton.next_power_of_2(r4) ) return alpha_c, alpha_d, None _merge_discontinuous_v1 = MERGE_D1.apply _merge_discontinuous_v2 = MERGE_D2.apply def merge_discontinuous_v1( alpha_c, f_d1, dimension_info, ): out, normalizer = _merge_discontinuous_v1(alpha_c, dimension_info) return ((out @ f_d1) + 1e-9).log() + normalizer[..., None] def merge_discontinuous_v2( alpha_c, alpha_d, f_d2, dimension_info, ): out, normalizer = _merge_discontinuous_v2(alpha_c, alpha_d, dimension_info) return ((out @ f_d2) + 1e-9).log() + normalizer[..., None] # else: # return # @checkpoint def _merge_discontinuous( alpha_c, alpha_d, f_d1, f_d2, dimension_info ): out1 = merge_discontinuous_v1(alpha_c, f_d1, dimension_info) out2 = merge_discontinuous_v2(alpha_c, alpha_d, f_d2, dimension_info) return torch.logaddexp(out1, out2)
iscaas/AFOSR-HAR-2021-2025
DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
https://github.com/iscaas/AFOSR-HAR-2021-2025/blob/4499edfcc1eadb88934cc3026ef4f771284296d6/DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
# © 2023 NVIDIA CORPORATION & AFFILIATES import triton import triton.language as tl def init_to_zero(name): return lambda nargs: nargs[name].zero_() def get_configs_fwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, ) ) return configs """ @triton.autotune( configs=get_configs_fwd(), key=['Z', 'H', 'N_CTX', 'H_DIM', 'IS_TRAINING'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _attention_core( Q, K, V, Mask, Bias, sm_scale, L, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, stride_mz, stride_mh, stride_mm, stride_mn, Z, H, N_CTX, H_DIM, BATCH, # 256 8 128 32 1 inf: tl.constexpr, IS_TRAINING: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_b = off_hz // H off_h = off_hz % H # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = ( off_b * stride_qz + off_h * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk ) off_k = ( off_b * stride_kz + off_h * stride_kh + offs_n[None, :] * stride_kn + offs_d[:, None] * stride_kk ) off_v = ( off_b * stride_vz + off_h * stride_vh + offs_n[:, None] * stride_vk + offs_d[None, :] * stride_vn ) # Initialize pointers to Q, K, V q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v # Initialize pointers to bias, mask if use_bias: batch_2 = Z // BATCH off_hz_bias = (off_hz // (batch_2 * H) * H) + (off_hz % H) offs_base_bias = ( off_hz_bias * (N_CTX * N_CTX) + offs_m[:, None] * N_CTX + offs_n[None, :] ) """ off_b = off_hz // H off_h = off_hz % H bias_ptrs = Bias + off_b * stride_bz + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :] * stride_bn) """ if use_mask: # off_hz_mask = (off_hz // H) # offs_base_mask = off_hz_mask * N_CTX off_b = off_hz // H off_h = off_hz % H mask_ptrs = ( Mask + off_b * stride_mz + off_h * stride_mh + (offs_m[:, None] * stride_mm + offs_n[None, :] * stride_mn) ) # initialize pointer to m and l m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_prev = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # load q: it will stay in SRAM throughout if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) # loop over k, v and update accumulator # (start_m + 1) * BLOCK_M for start_n in range(0, N_CTX, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: k = tl.load(k_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[:, None] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=(start_n + offs_n)[None, :] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=((start_n + offs_n)[None, :] < N_CTX) & (offs_d[:, None] < H_DIM), other=0.0, ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) if use_bias: qk += tl.dot(q * sm_scale.to(tl.bfloat16), k).to(tl.bfloat16) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf).to(tl.bfloat16) if EVEN_M & EVEN_N: bias_data = tl.load(Bias + offs_base_bias + start_n) else: bias_load_mask = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) bias_load_mask = tl.where(offs_m[:, None] >= N_CTX, 1.0, bias_load_mask) bias_load_mask = tl.where( (start_n + offs_n)[None, :] >= N_CTX, 1.0, bias_load_mask ) bias_data = tl.load( Bias + offs_base_bias + start_n, mask=(bias_load_mask == 0.0), other=0.0, ) qk = qk + bias_data else: qk += tl.dot(q, k) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf) qk = qk.to(tl.bfloat16) if use_mask: if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs + start_n).to(tl.int32) else: mask_data = tl.load( mask_ptrs + start_n, mask=(offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None, :] < N_CTX), other=0, ).to(tl.int32) qk += tl.where(mask_data == 0, -inf, 0.0) if use_bias: # compute new m m_curr = tl.maximum(tl.max(qk, 1), m_prev) # correct old l l_prev *= tl.exp(m_prev - m_curr) # attention weights p = tl.exp(qk - m_curr[:, None]) else: m_curr = tl.maximum(tl.max(qk, 1) * sm_scale, m_prev) l_prev *= tl.exp(m_prev - m_curr) p = tl.exp(qk * sm_scale - m_curr[:, None]) l_curr = tl.sum(p, 1) + l_prev # rescale operands of matmuls l_rcp = 1.0 / l_curr p *= l_rcp[:, None] acc *= (l_prev * l_rcp)[:, None] # update acc p = p.to(Q.dtype.element_ty) if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: v = tl.load(v_ptrs) else: v = tl.load(v_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: v = tl.load(v_ptrs, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) else: v = tl.load( v_ptrs, mask=((start_n + offs_n)[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) acc += tl.dot(p, v) # update m_i and l_i l_prev = l_curr m_prev = m_curr # update pointers k_ptrs += BLOCK_N * stride_kn v_ptrs += BLOCK_N * stride_vk # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m if IS_TRAINING: l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_prev) tl.store(m_ptrs, m_prev) # initialize pointers to output offs_n = tl.arange(0, BLOCK_DMODEL) off_o = ( off_b * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on ) out_ptrs = Out + off_o if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty)) else: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_n[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_m[:, None] < N_CTX) else: tl.store( out_ptrs, acc.to(Q.dtype.element_ty), mask=(offs_m[:, None] < N_CTX) & (offs_n[None, :] < H_DIM), ) # tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=out_store_mask) @triton.jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, stride_ob, stride_oh, stride_om, stride_ok, stride_dob, stride_doh, stride_dom, stride_dok, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) def get_configs_bwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero("DQ"), ) ) return configs """ @triton.autotune( configs=get_configs_bwd(), key=['Z', 'H', 'N_CTX', 'H_DIM'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _bwd_kernel( Q, K, V, Mask, Bias, sm_scale, Out, DO, DQ, DK, DV, DP, L, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_mz, stride_mh, stride_mm, stride_mn, stride_bz, stride_bh, stride_bm, stride_bn, stride_dpz, stride_dph, stride_dpm, stride_dpn, stride_dob, stride_doh, stride_dom, stride_dok, stride_dqb, stride_dqh, stride_dqm, stride_dqk, stride_dkb, stride_dkh, stride_dkn, stride_dkk, stride_dvb, stride_dvh, stride_dvn, stride_dvk, Z, H, N_CTX, H_DIM, # num_block, inf: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, ): off_hz = tl.program_id(0) off_b = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_b * stride_qz + off_h * stride_qh K += off_b * stride_kz + off_h * stride_kh V += off_b * stride_vz + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh DP += off_b * stride_dpz + off_h * stride_dph if use_bias: Bias += off_b * stride_bz + off_h * stride_bh if use_mask: # offs_base_mask = off_b * N_CTX Mask += off_b * stride_mz + off_h * stride_mh num_block_n = tl.cdiv(N_CTX, BLOCK_N) for start_n in range(0, num_block_n): # lo = start_n * BLOCK_M lo = 0 # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) # BLOCK_M offs_m = tl.arange(0, BLOCK_M) # BLOCK_N offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_k[None, :] * stride_dok) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) dp_ptrs = DP + (offs_qm[:, None] * stride_dpm + offs_n[None, :] * stride_dpn) if use_bias: b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :] * stride_bn) if use_mask: mask_ptrs = Mask + ( offs_qm[:, None] * stride_mm + offs_n[None, :] * stride_mn ) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M # k and v stay in SRAM throughout if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) v = tl.load(v_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) v = tl.load( v_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # loop over rows num_block_m = tl.cdiv(N_CTX, BLOCK_M) for start_m in range(lo, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m # load q, k, v, do on-chip if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) if use_bias: tl.debug_barrier() # Race condition otherwise if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load( b_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk = qk * sm_scale + bias if use_mask: # tl.debug_barrier() # Race condition otherwise # qk = tl.where(offs_m_curr[:, None] >= N_CTX, float("-1e20"), qk) # qk = tl.where(offs_n[None, :] >= N_CTX, float("-1e20"), qk) # mask_data = tl.load(Mask + offs_base_mask + offs_n) # qk = tl.where(mask_data[None, :] == 0., float("-1e20"), qk) if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs).to(tl.float32) else: mask_data = tl.load( mask_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk += tl.where(mask_data == 0.0, -inf, 0.0) # qk = tl.where(mask_data == 0., -inf, qk) m = tl.load(m_ptrs + offs_m_curr) if use_bias: p = tl.exp(qk - m[:, None]) else: p = tl.exp(qk * sm_scale - m[:, None]) # compute dv if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) # .to(tl.float32) else: do = tl.load( do_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp += tl.dot(do, tl.trans(v)) # compute ds = p * (dp - delta[:, None]) ds = p * dp if use_bias: tl.store(dp_ptrs, ds) ds = ds * sm_scale # compute dk = dot(ds.T, q) dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) # compute dq # can we remove .to(tl.float32) if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M dq = tl.load(dq_ptrs).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq) else: if EVEN_HEADDIM: dq = tl.load( dq_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0 ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < N_CTX) else: dq = tl.load( dq_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store( dq_ptrs, dq, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) # increment pointers dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom dp_ptrs += BLOCK_M * stride_dpm if use_bias: b_ptrs += BLOCK_M * stride_bm if use_mask: mask_ptrs += BLOCK_M * stride_mm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: tl.store(dv_ptrs, dv, mask=offs_k[None, :] < H_DIM) tl.store(dk_ptrs, dk, mask=offs_k[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX) tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX) else: tl.store( dv_ptrs, dv, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) tl.store( dk_ptrs, dk, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), )
@triton.jit def _attention_core( Q, K, V, Mask, Bias, sm_scale, L, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, stride_mz, stride_mh, stride_mm, stride_mn, Z, H, N_CTX, H_DIM, BATCH, # 256 8 128 32 1 inf: tl.constexpr, IS_TRAINING: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_b = off_hz // H off_h = off_hz % H # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = ( off_b * stride_qz + off_h * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk ) off_k = ( off_b * stride_kz + off_h * stride_kh + offs_n[None, :] * stride_kn + offs_d[:, None] * stride_kk ) off_v = ( off_b * stride_vz + off_h * stride_vh + offs_n[:, None] * stride_vk + offs_d[None, :] * stride_vn ) # Initialize pointers to Q, K, V q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v # Initialize pointers to bias, mask if use_bias: batch_2 = Z // BATCH off_hz_bias = (off_hz // (batch_2 * H) * H) + (off_hz % H) offs_base_bias = ( off_hz_bias * (N_CTX * N_CTX) + offs_m[:, None] * N_CTX + offs_n[None, :] ) """ off_b = off_hz // H off_h = off_hz % H bias_ptrs = Bias + off_b * stride_bz + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :] * stride_bn) """ if use_mask: # off_hz_mask = (off_hz // H) # offs_base_mask = off_hz_mask * N_CTX off_b = off_hz // H off_h = off_hz % H mask_ptrs = ( Mask + off_b * stride_mz + off_h * stride_mh + (offs_m[:, None] * stride_mm + offs_n[None, :] * stride_mn) ) # initialize pointer to m and l m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_prev = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # load q: it will stay in SRAM throughout if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) # loop over k, v and update accumulator # (start_m + 1) * BLOCK_M for start_n in range(0, N_CTX, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: k = tl.load(k_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[:, None] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=(start_n + offs_n)[None, :] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=((start_n + offs_n)[None, :] < N_CTX) & (offs_d[:, None] < H_DIM), other=0.0, ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) if use_bias: qk += tl.dot(q * sm_scale.to(tl.bfloat16), k).to(tl.bfloat16) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf).to(tl.bfloat16) if EVEN_M & EVEN_N: bias_data = tl.load(Bias + offs_base_bias + start_n) else: bias_load_mask = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) bias_load_mask = tl.where(offs_m[:, None] >= N_CTX, 1.0, bias_load_mask) bias_load_mask = tl.where( (start_n + offs_n)[None, :] >= N_CTX, 1.0, bias_load_mask ) bias_data = tl.load( Bias + offs_base_bias + start_n, mask=(bias_load_mask == 0.0), other=0.0, ) qk = qk + bias_data else: qk += tl.dot(q, k) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf) qk = qk.to(tl.bfloat16) if use_mask: if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs + start_n).to(tl.int32) else: mask_data = tl.load( mask_ptrs + start_n, mask=(offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None, :] < N_CTX), other=0, ).to(tl.int32) qk += tl.where(mask_data == 0, -inf, 0.0) if use_bias: # compute new m m_curr = tl.maximum(tl.max(qk, 1), m_prev) # correct old l l_prev *= tl.exp(m_prev - m_curr) # attention weights p = tl.exp(qk - m_curr[:, None]) else: m_curr = tl.maximum(tl.max(qk, 1) * sm_scale, m_prev) l_prev *= tl.exp(m_prev - m_curr) p = tl.exp(qk * sm_scale - m_curr[:, None]) l_curr = tl.sum(p, 1) + l_prev # rescale operands of matmuls l_rcp = 1.0 / l_curr p *= l_rcp[:, None] acc *= (l_prev * l_rcp)[:, None] # update acc p = p.to(Q.dtype.element_ty) if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: v = tl.load(v_ptrs) else: v = tl.load(v_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: v = tl.load(v_ptrs, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) else: v = tl.load( v_ptrs, mask=((start_n + offs_n)[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) acc += tl.dot(p, v) # update m_i and l_i l_prev = l_curr m_prev = m_curr # update pointers k_ptrs += BLOCK_N * stride_kn v_ptrs += BLOCK_N * stride_vk # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m if IS_TRAINING: l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_prev) tl.store(m_ptrs, m_prev) # initialize pointers to output offs_n = tl.arange(0, BLOCK_DMODEL) off_o = ( off_b * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on ) out_ptrs = Out + off_o if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty)) else: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_n[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_m[:, None] < N_CTX) else: tl.store( out_ptrs, acc.to(Q.dtype.element_ty), mask=(offs_m[:, None] < N_CTX) & (offs_n[None, :] < H_DIM), ) # tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=out_store_mask)
iscaas/AFOSR-HAR-2021-2025
DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
https://github.com/iscaas/AFOSR-HAR-2021-2025/blob/4499edfcc1eadb88934cc3026ef4f771284296d6/DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
# © 2023 NVIDIA CORPORATION & AFFILIATES import triton import triton.language as tl def init_to_zero(name): return lambda nargs: nargs[name].zero_() def get_configs_fwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, ) ) return configs """ @triton.autotune( configs=get_configs_fwd(), key=['Z', 'H', 'N_CTX', 'H_DIM', 'IS_TRAINING'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _attention_core( Q, K, V, Mask, Bias, sm_scale, L, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, stride_mz, stride_mh, stride_mm, stride_mn, Z, H, N_CTX, H_DIM, BATCH, # 256 8 128 32 1 inf: tl.constexpr, IS_TRAINING: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_b = off_hz // H off_h = off_hz % H # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = ( off_b * stride_qz + off_h * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk ) off_k = ( off_b * stride_kz + off_h * stride_kh + offs_n[None, :] * stride_kn + offs_d[:, None] * stride_kk ) off_v = ( off_b * stride_vz + off_h * stride_vh + offs_n[:, None] * stride_vk + offs_d[None, :] * stride_vn ) # Initialize pointers to Q, K, V q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v # Initialize pointers to bias, mask if use_bias: batch_2 = Z // BATCH off_hz_bias = (off_hz // (batch_2 * H) * H) + (off_hz % H) offs_base_bias = ( off_hz_bias * (N_CTX * N_CTX) + offs_m[:, None] * N_CTX + offs_n[None, :] ) """ off_b = off_hz // H off_h = off_hz % H bias_ptrs = Bias + off_b * stride_bz + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :] * stride_bn) """ if use_mask: # off_hz_mask = (off_hz // H) # offs_base_mask = off_hz_mask * N_CTX off_b = off_hz // H off_h = off_hz % H mask_ptrs = ( Mask + off_b * stride_mz + off_h * stride_mh + (offs_m[:, None] * stride_mm + offs_n[None, :] * stride_mn) ) # initialize pointer to m and l m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_prev = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # load q: it will stay in SRAM throughout if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) # loop over k, v and update accumulator # (start_m + 1) * BLOCK_M for start_n in range(0, N_CTX, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: k = tl.load(k_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[:, None] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=(start_n + offs_n)[None, :] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=((start_n + offs_n)[None, :] < N_CTX) & (offs_d[:, None] < H_DIM), other=0.0, ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) if use_bias: qk += tl.dot(q * sm_scale.to(tl.bfloat16), k).to(tl.bfloat16) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf).to(tl.bfloat16) if EVEN_M & EVEN_N: bias_data = tl.load(Bias + offs_base_bias + start_n) else: bias_load_mask = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) bias_load_mask = tl.where(offs_m[:, None] >= N_CTX, 1.0, bias_load_mask) bias_load_mask = tl.where( (start_n + offs_n)[None, :] >= N_CTX, 1.0, bias_load_mask ) bias_data = tl.load( Bias + offs_base_bias + start_n, mask=(bias_load_mask == 0.0), other=0.0, ) qk = qk + bias_data else: qk += tl.dot(q, k) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf) qk = qk.to(tl.bfloat16) if use_mask: if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs + start_n).to(tl.int32) else: mask_data = tl.load( mask_ptrs + start_n, mask=(offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None, :] < N_CTX), other=0, ).to(tl.int32) qk += tl.where(mask_data == 0, -inf, 0.0) if use_bias: # compute new m m_curr = tl.maximum(tl.max(qk, 1), m_prev) # correct old l l_prev *= tl.exp(m_prev - m_curr) # attention weights p = tl.exp(qk - m_curr[:, None]) else: m_curr = tl.maximum(tl.max(qk, 1) * sm_scale, m_prev) l_prev *= tl.exp(m_prev - m_curr) p = tl.exp(qk * sm_scale - m_curr[:, None]) l_curr = tl.sum(p, 1) + l_prev # rescale operands of matmuls l_rcp = 1.0 / l_curr p *= l_rcp[:, None] acc *= (l_prev * l_rcp)[:, None] # update acc p = p.to(Q.dtype.element_ty) if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: v = tl.load(v_ptrs) else: v = tl.load(v_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: v = tl.load(v_ptrs, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) else: v = tl.load( v_ptrs, mask=((start_n + offs_n)[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) acc += tl.dot(p, v) # update m_i and l_i l_prev = l_curr m_prev = m_curr # update pointers k_ptrs += BLOCK_N * stride_kn v_ptrs += BLOCK_N * stride_vk # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m if IS_TRAINING: l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_prev) tl.store(m_ptrs, m_prev) # initialize pointers to output offs_n = tl.arange(0, BLOCK_DMODEL) off_o = ( off_b * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on ) out_ptrs = Out + off_o if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty)) else: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_n[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_m[:, None] < N_CTX) else: tl.store( out_ptrs, acc.to(Q.dtype.element_ty), mask=(offs_m[:, None] < N_CTX) & (offs_n[None, :] < H_DIM), ) # tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=out_store_mask) @triton.jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, stride_ob, stride_oh, stride_om, stride_ok, stride_dob, stride_doh, stride_dom, stride_dok, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) def get_configs_bwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero("DQ"), ) ) return configs """ @triton.autotune( configs=get_configs_bwd(), key=['Z', 'H', 'N_CTX', 'H_DIM'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _bwd_kernel( Q, K, V, Mask, Bias, sm_scale, Out, DO, DQ, DK, DV, DP, L, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_mz, stride_mh, stride_mm, stride_mn, stride_bz, stride_bh, stride_bm, stride_bn, stride_dpz, stride_dph, stride_dpm, stride_dpn, stride_dob, stride_doh, stride_dom, stride_dok, stride_dqb, stride_dqh, stride_dqm, stride_dqk, stride_dkb, stride_dkh, stride_dkn, stride_dkk, stride_dvb, stride_dvh, stride_dvn, stride_dvk, Z, H, N_CTX, H_DIM, # num_block, inf: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, ): off_hz = tl.program_id(0) off_b = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_b * stride_qz + off_h * stride_qh K += off_b * stride_kz + off_h * stride_kh V += off_b * stride_vz + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh DP += off_b * stride_dpz + off_h * stride_dph if use_bias: Bias += off_b * stride_bz + off_h * stride_bh if use_mask: # offs_base_mask = off_b * N_CTX Mask += off_b * stride_mz + off_h * stride_mh num_block_n = tl.cdiv(N_CTX, BLOCK_N) for start_n in range(0, num_block_n): # lo = start_n * BLOCK_M lo = 0 # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) # BLOCK_M offs_m = tl.arange(0, BLOCK_M) # BLOCK_N offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_k[None, :] * stride_dok) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) dp_ptrs = DP + (offs_qm[:, None] * stride_dpm + offs_n[None, :] * stride_dpn) if use_bias: b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :] * stride_bn) if use_mask: mask_ptrs = Mask + ( offs_qm[:, None] * stride_mm + offs_n[None, :] * stride_mn ) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M # k and v stay in SRAM throughout if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) v = tl.load(v_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) v = tl.load( v_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # loop over rows num_block_m = tl.cdiv(N_CTX, BLOCK_M) for start_m in range(lo, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m # load q, k, v, do on-chip if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) if use_bias: tl.debug_barrier() # Race condition otherwise if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load( b_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk = qk * sm_scale + bias if use_mask: # tl.debug_barrier() # Race condition otherwise # qk = tl.where(offs_m_curr[:, None] >= N_CTX, float("-1e20"), qk) # qk = tl.where(offs_n[None, :] >= N_CTX, float("-1e20"), qk) # mask_data = tl.load(Mask + offs_base_mask + offs_n) # qk = tl.where(mask_data[None, :] == 0., float("-1e20"), qk) if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs).to(tl.float32) else: mask_data = tl.load( mask_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk += tl.where(mask_data == 0.0, -inf, 0.0) # qk = tl.where(mask_data == 0., -inf, qk) m = tl.load(m_ptrs + offs_m_curr) if use_bias: p = tl.exp(qk - m[:, None]) else: p = tl.exp(qk * sm_scale - m[:, None]) # compute dv if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) # .to(tl.float32) else: do = tl.load( do_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp += tl.dot(do, tl.trans(v)) # compute ds = p * (dp - delta[:, None]) ds = p * dp if use_bias: tl.store(dp_ptrs, ds) ds = ds * sm_scale # compute dk = dot(ds.T, q) dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) # compute dq # can we remove .to(tl.float32) if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M dq = tl.load(dq_ptrs).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq) else: if EVEN_HEADDIM: dq = tl.load( dq_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0 ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < N_CTX) else: dq = tl.load( dq_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store( dq_ptrs, dq, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) # increment pointers dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom dp_ptrs += BLOCK_M * stride_dpm if use_bias: b_ptrs += BLOCK_M * stride_bm if use_mask: mask_ptrs += BLOCK_M * stride_mm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: tl.store(dv_ptrs, dv, mask=offs_k[None, :] < H_DIM) tl.store(dk_ptrs, dk, mask=offs_k[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX) tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX) else: tl.store( dv_ptrs, dv, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) tl.store( dk_ptrs, dk, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), )
@triton.jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, stride_ob, stride_oh, stride_om, stride_ok, stride_dob, stride_doh, stride_dom, stride_dok, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) def get_configs_bwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero("DQ"), ) ) return configs """ @triton.autotune( configs=get_configs_bwd(), key=['Z', 'H', 'N_CTX', 'H_DIM'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } )
iscaas/AFOSR-HAR-2021-2025
DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
https://github.com/iscaas/AFOSR-HAR-2021-2025/blob/4499edfcc1eadb88934cc3026ef4f771284296d6/DVFL-Net/apex/apex/contrib/openfold_triton/_mha_kernel.py
# © 2023 NVIDIA CORPORATION & AFFILIATES import triton import triton.language as tl def init_to_zero(name): return lambda nargs: nargs[name].zero_() def get_configs_fwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, ) ) return configs """ @triton.autotune( configs=get_configs_fwd(), key=['Z', 'H', 'N_CTX', 'H_DIM', 'IS_TRAINING'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _attention_core( Q, K, V, Mask, Bias, sm_scale, L, M, Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_oz, stride_oh, stride_om, stride_on, stride_bz, stride_bh, stride_bm, stride_bn, stride_mz, stride_mh, stride_mm, stride_mn, Z, H, N_CTX, H_DIM, BATCH, # 256 8 128 32 1 inf: tl.constexpr, IS_TRAINING: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_DMODEL: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, ): start_m = tl.program_id(0) off_hz = tl.program_id(1) off_b = off_hz // H off_h = off_hz % H # initialize offsets offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) offs_n = tl.arange(0, BLOCK_N) offs_d = tl.arange(0, BLOCK_DMODEL) off_q = ( off_b * stride_qz + off_h * stride_qh + offs_m[:, None] * stride_qm + offs_d[None, :] * stride_qk ) off_k = ( off_b * stride_kz + off_h * stride_kh + offs_n[None, :] * stride_kn + offs_d[:, None] * stride_kk ) off_v = ( off_b * stride_vz + off_h * stride_vh + offs_n[:, None] * stride_vk + offs_d[None, :] * stride_vn ) # Initialize pointers to Q, K, V q_ptrs = Q + off_q k_ptrs = K + off_k v_ptrs = V + off_v # Initialize pointers to bias, mask if use_bias: batch_2 = Z // BATCH off_hz_bias = (off_hz // (batch_2 * H) * H) + (off_hz % H) offs_base_bias = ( off_hz_bias * (N_CTX * N_CTX) + offs_m[:, None] * N_CTX + offs_n[None, :] ) """ off_b = off_hz // H off_h = off_hz % H bias_ptrs = Bias + off_b * stride_bz + off_h * stride_bh + (offs_m[:, None] * stride_bm + offs_n[None, :] * stride_bn) """ if use_mask: # off_hz_mask = (off_hz // H) # offs_base_mask = off_hz_mask * N_CTX off_b = off_hz // H off_h = off_hz % H mask_ptrs = ( Mask + off_b * stride_mz + off_h * stride_mh + (offs_m[:, None] * stride_mm + offs_n[None, :] * stride_mn) ) # initialize pointer to m and l m_prev = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_prev = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) # load q: it will stay in SRAM throughout if EVEN_M & EVEN_N: if EVEN_HEADDIM: q = tl.load(q_ptrs) else: q = tl.load(q_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) # loop over k, v and update accumulator # (start_m + 1) * BLOCK_M for start_n in range(0, N_CTX, BLOCK_N): start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: k = tl.load(k_ptrs) else: k = tl.load(k_ptrs, mask=offs_d[:, None] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=(start_n + offs_n)[None, :] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=((start_n + offs_n)[None, :] < N_CTX) & (offs_d[:, None] < H_DIM), other=0.0, ) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) # qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf")) if use_bias: qk += tl.dot(q * sm_scale.to(tl.bfloat16), k).to(tl.bfloat16) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf).to(tl.bfloat16) if EVEN_M & EVEN_N: bias_data = tl.load(Bias + offs_base_bias + start_n) else: bias_load_mask = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) bias_load_mask = tl.where(offs_m[:, None] >= N_CTX, 1.0, bias_load_mask) bias_load_mask = tl.where( (start_n + offs_n)[None, :] >= N_CTX, 1.0, bias_load_mask ) bias_data = tl.load( Bias + offs_base_bias + start_n, mask=(bias_load_mask == 0.0), other=0.0, ) qk = qk + bias_data else: qk += tl.dot(q, k) qk += tl.where((start_n + offs_n)[None, :] < N_CTX, 0, -inf) qk = qk.to(tl.bfloat16) if use_mask: if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs + start_n).to(tl.int32) else: mask_data = tl.load( mask_ptrs + start_n, mask=(offs_m[:, None] < N_CTX) & ((start_n + offs_n)[None, :] < N_CTX), other=0, ).to(tl.int32) qk += tl.where(mask_data == 0, -inf, 0.0) if use_bias: # compute new m m_curr = tl.maximum(tl.max(qk, 1), m_prev) # correct old l l_prev *= tl.exp(m_prev - m_curr) # attention weights p = tl.exp(qk - m_curr[:, None]) else: m_curr = tl.maximum(tl.max(qk, 1) * sm_scale, m_prev) l_prev *= tl.exp(m_prev - m_curr) p = tl.exp(qk * sm_scale - m_curr[:, None]) l_curr = tl.sum(p, 1) + l_prev # rescale operands of matmuls l_rcp = 1.0 / l_curr p *= l_rcp[:, None] acc *= (l_prev * l_rcp)[:, None] # update acc p = p.to(Q.dtype.element_ty) if ( EVEN_N & EVEN_M ): # If we just do "if EVEN_N", there seems to be some race condition if EVEN_HEADDIM: v = tl.load(v_ptrs) else: v = tl.load(v_ptrs, mask=offs_d[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: v = tl.load(v_ptrs, mask=(start_n + offs_n)[:, None] < N_CTX, other=0.0) else: v = tl.load( v_ptrs, mask=((start_n + offs_n)[:, None] < N_CTX) & (offs_d[None, :] < H_DIM), other=0.0, ) acc += tl.dot(p, v) # update m_i and l_i l_prev = l_curr m_prev = m_curr # update pointers k_ptrs += BLOCK_N * stride_kn v_ptrs += BLOCK_N * stride_vk # rematerialize offsets to save registers start_m = tl.program_id(0) offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M) # write back l and m if IS_TRAINING: l_ptrs = L + off_hz * N_CTX + offs_m m_ptrs = M + off_hz * N_CTX + offs_m tl.store(l_ptrs, l_prev) tl.store(m_ptrs, m_prev) # initialize pointers to output offs_n = tl.arange(0, BLOCK_DMODEL) off_o = ( off_b * stride_oz + off_h * stride_oh + offs_m[:, None] * stride_om + offs_n[None, :] * stride_on ) out_ptrs = Out + off_o if EVEN_M: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty)) else: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_n[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=offs_m[:, None] < N_CTX) else: tl.store( out_ptrs, acc.to(Q.dtype.element_ty), mask=(offs_m[:, None] < N_CTX) & (offs_n[None, :] < H_DIM), ) # tl.store(out_ptrs, acc.to(Q.dtype.element_ty), mask=out_store_mask) @triton.jit def _bwd_preprocess( Out, DO, L, NewDO, Delta, stride_ob, stride_oh, stride_om, stride_ok, stride_dob, stride_doh, stride_dom, stride_dok, BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr, ): off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M) off_n = tl.arange(0, D_HEAD) # load o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32) denom = tl.load(L + off_m).to(tl.float32) # compute do = do / denom[:, None] delta = tl.sum(o * do, axis=1) # write-back tl.store(NewDO + off_m[:, None] * D_HEAD + off_n[None, :], do) tl.store(Delta + off_m, delta) def get_configs_bwd(): configs = [] for num_stages in [0, 1, 2, 3, 4]: for block_m in [32, 64, 128]: for block_n in [16, 32, 64, 128]: if block_n > block_m: continue for num_warps in [1, 2, 4, 8]: if 32 * num_warps * 32 > block_m * block_n: continue configs.append( triton.Config( {"BLOCK_M": block_m, "BLOCK_N": block_n}, num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero("DQ"), ) ) return configs """ @triton.autotune( configs=get_configs_bwd(), key=['Z', 'H', 'N_CTX', 'H_DIM'], ) """ @triton.heuristics( { "EVEN_M": lambda args: args["N_CTX"] % args["BLOCK_M"] == 0, "EVEN_N": lambda args: args["N_CTX"] % args["BLOCK_N"] == 0, "EVEN_HEADDIM": lambda args: args["H_DIM"] == args["BLOCK_DMODEL"], } ) @triton.jit def _bwd_kernel( Q, K, V, Mask, Bias, sm_scale, Out, DO, DQ, DK, DV, DP, L, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_mz, stride_mh, stride_mm, stride_mn, stride_bz, stride_bh, stride_bm, stride_bn, stride_dpz, stride_dph, stride_dpm, stride_dpn, stride_dob, stride_doh, stride_dom, stride_dok, stride_dqb, stride_dqh, stride_dqm, stride_dqk, stride_dkb, stride_dkh, stride_dkn, stride_dkk, stride_dvb, stride_dvh, stride_dvn, stride_dvk, Z, H, N_CTX, H_DIM, # num_block, inf: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, ): off_hz = tl.program_id(0) off_b = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_b * stride_qz + off_h * stride_qh K += off_b * stride_kz + off_h * stride_kh V += off_b * stride_vz + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh DP += off_b * stride_dpz + off_h * stride_dph if use_bias: Bias += off_b * stride_bz + off_h * stride_bh if use_mask: # offs_base_mask = off_b * N_CTX Mask += off_b * stride_mz + off_h * stride_mh num_block_n = tl.cdiv(N_CTX, BLOCK_N) for start_n in range(0, num_block_n): # lo = start_n * BLOCK_M lo = 0 # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) # BLOCK_M offs_m = tl.arange(0, BLOCK_M) # BLOCK_N offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_k[None, :] * stride_dok) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) dp_ptrs = DP + (offs_qm[:, None] * stride_dpm + offs_n[None, :] * stride_dpn) if use_bias: b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :] * stride_bn) if use_mask: mask_ptrs = Mask + ( offs_qm[:, None] * stride_mm + offs_n[None, :] * stride_mn ) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M # k and v stay in SRAM throughout if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) v = tl.load(v_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) v = tl.load( v_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # loop over rows num_block_m = tl.cdiv(N_CTX, BLOCK_M) for start_m in range(lo, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m # load q, k, v, do on-chip if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) if use_bias: tl.debug_barrier() # Race condition otherwise if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load( b_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk = qk * sm_scale + bias if use_mask: # tl.debug_barrier() # Race condition otherwise # qk = tl.where(offs_m_curr[:, None] >= N_CTX, float("-1e20"), qk) # qk = tl.where(offs_n[None, :] >= N_CTX, float("-1e20"), qk) # mask_data = tl.load(Mask + offs_base_mask + offs_n) # qk = tl.where(mask_data[None, :] == 0., float("-1e20"), qk) if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs).to(tl.float32) else: mask_data = tl.load( mask_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk += tl.where(mask_data == 0.0, -inf, 0.0) # qk = tl.where(mask_data == 0., -inf, qk) m = tl.load(m_ptrs + offs_m_curr) if use_bias: p = tl.exp(qk - m[:, None]) else: p = tl.exp(qk * sm_scale - m[:, None]) # compute dv if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) # .to(tl.float32) else: do = tl.load( do_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp += tl.dot(do, tl.trans(v)) # compute ds = p * (dp - delta[:, None]) ds = p * dp if use_bias: tl.store(dp_ptrs, ds) ds = ds * sm_scale # compute dk = dot(ds.T, q) dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) # compute dq # can we remove .to(tl.float32) if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M dq = tl.load(dq_ptrs).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq) else: if EVEN_HEADDIM: dq = tl.load( dq_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0 ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < N_CTX) else: dq = tl.load( dq_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store( dq_ptrs, dq, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) # increment pointers dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom dp_ptrs += BLOCK_M * stride_dpm if use_bias: b_ptrs += BLOCK_M * stride_bm if use_mask: mask_ptrs += BLOCK_M * stride_mm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: tl.store(dv_ptrs, dv, mask=offs_k[None, :] < H_DIM) tl.store(dk_ptrs, dk, mask=offs_k[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX) tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX) else: tl.store( dv_ptrs, dv, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) tl.store( dk_ptrs, dk, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), )
@triton.jit def _bwd_kernel( Q, K, V, Mask, Bias, sm_scale, Out, DO, DQ, DK, DV, DP, L, M, D, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, stride_vz, stride_vh, stride_vk, stride_vn, stride_mz, stride_mh, stride_mm, stride_mn, stride_bz, stride_bh, stride_bm, stride_bn, stride_dpz, stride_dph, stride_dpm, stride_dpn, stride_dob, stride_doh, stride_dom, stride_dok, stride_dqb, stride_dqh, stride_dqm, stride_dqk, stride_dkb, stride_dkh, stride_dkn, stride_dkk, stride_dvb, stride_dvh, stride_dvn, stride_dvk, Z, H, N_CTX, H_DIM, # num_block, inf: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr, BLOCK_N: tl.constexpr, use_mask: tl.constexpr, use_bias: tl.constexpr, EVEN_M: tl.constexpr, EVEN_N: tl.constexpr, EVEN_HEADDIM: tl.constexpr, SEQUENCE_PARALLEL: tl.constexpr, ): off_hz = tl.program_id(0) off_b = off_hz // H off_h = off_hz % H # offset pointers for batch/head Q += off_b * stride_qz + off_h * stride_qh K += off_b * stride_kz + off_h * stride_kh V += off_b * stride_vz + off_h * stride_vh DO += off_b * stride_dob + off_h * stride_doh DQ += off_b * stride_dqb + off_h * stride_dqh DK += off_b * stride_dkb + off_h * stride_dkh DV += off_b * stride_dvb + off_h * stride_dvh DP += off_b * stride_dpz + off_h * stride_dph if use_bias: Bias += off_b * stride_bz + off_h * stride_bh if use_mask: # offs_base_mask = off_b * N_CTX Mask += off_b * stride_mz + off_h * stride_mh num_block_n = tl.cdiv(N_CTX, BLOCK_N) for start_n in range(0, num_block_n): # lo = start_n * BLOCK_M lo = 0 # initialize row/col offsets offs_qm = lo + tl.arange(0, BLOCK_M) offs_n = start_n * BLOCK_N + tl.arange(0, BLOCK_N) # BLOCK_M offs_m = tl.arange(0, BLOCK_M) # BLOCK_N offs_k = tl.arange(0, BLOCK_DMODEL) # initialize pointers to value-like data q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk) k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk) v_ptrs = V + (offs_n[:, None] * stride_vk + offs_k[None, :] * stride_vn) do_ptrs = DO + (offs_qm[:, None] * stride_dom + offs_k[None, :] * stride_dok) dq_ptrs = DQ + (offs_qm[:, None] * stride_dqm + offs_k[None, :] * stride_dqk) dp_ptrs = DP + (offs_qm[:, None] * stride_dpm + offs_n[None, :] * stride_dpn) if use_bias: b_ptrs = Bias + (offs_qm[:, None] * stride_bm + offs_n[None, :] * stride_bn) if use_mask: mask_ptrs = Mask + ( offs_qm[:, None] * stride_mm + offs_n[None, :] * stride_mn ) # pointer to row-wise quantities in value-like data D_ptrs = D + off_hz * N_CTX m_ptrs = M + off_hz * N_CTX # initialize dv amd dk dv = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M dk = tl.zeros([BLOCK_N, BLOCK_DMODEL], dtype=tl.float32) # BLOCK_M # k and v stay in SRAM throughout if EVEN_N & EVEN_M: if EVEN_HEADDIM: k = tl.load(k_ptrs) v = tl.load(v_ptrs) else: k = tl.load(k_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) v = tl.load(v_ptrs, mask=offs_k[None, :] < H_DIM, other=0.0) else: if EVEN_HEADDIM: k = tl.load(k_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) v = tl.load(v_ptrs, mask=offs_n[:, None] < N_CTX, other=0.0) else: k = tl.load( k_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) v = tl.load( v_ptrs, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # loop over rows num_block_m = tl.cdiv(N_CTX, BLOCK_M) for start_m in range(lo, num_block_m * BLOCK_M, BLOCK_M): start_m = tl.multiple_of(start_m, BLOCK_M) offs_m_curr = start_m + offs_m # load q, k, v, do on-chip if EVEN_M & EVEN_HEADDIM: q = tl.load(q_ptrs) else: if EVEN_HEADDIM: q = tl.load(q_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0) else: q = tl.load( q_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) qk += tl.dot(q, tl.trans(k)) if use_bias: tl.debug_barrier() # Race condition otherwise if EVEN_M & EVEN_N: bias = tl.load(b_ptrs).to(tl.float32) else: bias = tl.load( b_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk = qk * sm_scale + bias if use_mask: # tl.debug_barrier() # Race condition otherwise # qk = tl.where(offs_m_curr[:, None] >= N_CTX, float("-1e20"), qk) # qk = tl.where(offs_n[None, :] >= N_CTX, float("-1e20"), qk) # mask_data = tl.load(Mask + offs_base_mask + offs_n) # qk = tl.where(mask_data[None, :] == 0., float("-1e20"), qk) if EVEN_M & EVEN_N: mask_data = tl.load(mask_ptrs).to(tl.float32) else: mask_data = tl.load( mask_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_n[None, :] < N_CTX), other=0.0, ).to(tl.float32) qk += tl.where(mask_data == 0.0, -inf, 0.0) # qk = tl.where(mask_data == 0., -inf, qk) m = tl.load(m_ptrs + offs_m_curr) if use_bias: p = tl.exp(qk - m[:, None]) else: p = tl.exp(qk * sm_scale - m[:, None]) # compute dv if EVEN_M & EVEN_HEADDIM: do = tl.load(do_ptrs) # .to(tl.float32) else: do = tl.load( do_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ) dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] dp += tl.dot(do, tl.trans(v)) # compute ds = p * (dp - delta[:, None]) ds = p * dp if use_bias: tl.store(dp_ptrs, ds) ds = ds * sm_scale # compute dk = dot(ds.T, q) dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q) # compute dq # can we remove .to(tl.float32) if EVEN_M & EVEN_HEADDIM: # Race condition if we just do EVEN_M dq = tl.load(dq_ptrs).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq) else: if EVEN_HEADDIM: dq = tl.load( dq_ptrs, mask=offs_m_curr[:, None] < N_CTX, other=0.0 ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store(dq_ptrs, dq, mask=offs_m_curr[:, None] < N_CTX) else: dq = tl.load( dq_ptrs, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), other=0.0, ).to(tl.float32) dq += tl.dot(ds.to(Q.dtype.element_ty), k) tl.store( dq_ptrs, dq, mask=(offs_m_curr[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) # increment pointers dq_ptrs += BLOCK_M * stride_dqm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_dom dp_ptrs += BLOCK_M * stride_dpm if use_bias: b_ptrs += BLOCK_M * stride_bm if use_mask: mask_ptrs += BLOCK_M * stride_mm # write-back dv_ptrs = DV + (offs_n[:, None] * stride_dvn + offs_k[None, :] * stride_dvk) dk_ptrs = DK + (offs_n[:, None] * stride_dkn + offs_k[None, :] * stride_dkk) if EVEN_N & EVEN_M: if EVEN_HEADDIM: tl.store(dv_ptrs, dv) tl.store(dk_ptrs, dk) else: tl.store(dv_ptrs, dv, mask=offs_k[None, :] < H_DIM) tl.store(dk_ptrs, dk, mask=offs_k[None, :] < H_DIM) else: if EVEN_HEADDIM: tl.store(dv_ptrs, dv, mask=offs_n[:, None] < N_CTX) tl.store(dk_ptrs, dk, mask=offs_n[:, None] < N_CTX) else: tl.store( dv_ptrs, dv, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), ) tl.store( dk_ptrs, dk, mask=(offs_n[:, None] < N_CTX) & (offs_k[None, :] < H_DIM), )
usnistgov/atomgpt
atomgpt/inverse_models/kernels/rms_layernorm.py
https://github.com/usnistgov/atomgpt/blob/f95233523a2f50f1bc6dd28430fdcee626f2142b/atomgpt/inverse_models/kernels/rms_layernorm.py
import triton import triton.language as tl import torch from atomgpt.inverse_models.kernels.utils import calculate_settings @triton.jit def _rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr ): """ Fast RMS Layernorm kernel Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var normed = normed.to(W_row.dtype) # Exact copy from HF output = normed * W_row tl.store(Y + col_offsets, output, mask = mask) pass @triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel for the backward pass Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) # Get saved row variance inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var if GEMMA: dY_W = dY_row * (W_row + 1.0) else: dY_W = dY_row * W_row rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dY + col_offsets, output, mask = mask) pass @triton.jit def _gemma_rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr, ): # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 # and https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L33 # exactly. Essentially all in float32! row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl tl.store(r, inv_var) normed = X_row * inv_var output = normed * (W_row + 1.0) tl.store(Y + col_offsets, output, mask = mask) pass class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod def forward(ctx, X, W, eps, gemma = False): shape = X.shape dim = shape[-1] X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), r, r.stride(0), n_cols, eps, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) pass @staticmethod def backward(ctx, dY): shape = dY.shape dim = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors n_rows, n_cols = dY.shape dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) dX = dY.view(*shape) return dX, None, None, None pass pass def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight eps = layernorm.variance_epsilon out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass
@triton.jit def _rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr ): """ Fast RMS Layernorm kernel Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var normed = normed.to(W_row.dtype) # Exact copy from HF output = normed * W_row tl.store(Y + col_offsets, output, mask = mask) pass @triton.heuristics({"GEMMA": lambda args: args["GEMMA"],})
usnistgov/atomgpt
atomgpt/inverse_models/kernels/rms_layernorm.py
https://github.com/usnistgov/atomgpt/blob/f95233523a2f50f1bc6dd28430fdcee626f2142b/atomgpt/inverse_models/kernels/rms_layernorm.py
import triton import triton.language as tl import torch from atomgpt.inverse_models.kernels.utils import calculate_settings @triton.jit def _rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr ): """ Fast RMS Layernorm kernel Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var normed = normed.to(W_row.dtype) # Exact copy from HF output = normed * W_row tl.store(Y + col_offsets, output, mask = mask) pass @triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel for the backward pass Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) # Get saved row variance inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var if GEMMA: dY_W = dY_row * (W_row + 1.0) else: dY_W = dY_row * W_row rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dY + col_offsets, output, mask = mask) pass @triton.jit def _gemma_rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr, ): # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 # and https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L33 # exactly. Essentially all in float32! row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl tl.store(r, inv_var) normed = X_row * inv_var output = normed * (W_row + 1.0) tl.store(Y + col_offsets, output, mask = mask) pass class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod def forward(ctx, X, W, eps, gemma = False): shape = X.shape dim = shape[-1] X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), r, r.stride(0), n_cols, eps, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) pass @staticmethod def backward(ctx, dY): shape = dY.shape dim = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors n_rows, n_cols = dY.shape dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) dX = dY.view(*shape) return dX, None, None, None pass pass def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight eps = layernorm.variance_epsilon out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass
@triton.jit def _rms_layernorm_backward( dY, dY_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel for the backward pass Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) # Get saved row variance inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var if GEMMA: dY_W = dY_row * (W_row + 1.0) else: dY_W = dY_row * W_row rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dY + col_offsets, output, mask = mask) pass
usnistgov/atomgpt
atomgpt/inverse_models/kernels/rms_layernorm.py
https://github.com/usnistgov/atomgpt/blob/f95233523a2f50f1bc6dd28430fdcee626f2142b/atomgpt/inverse_models/kernels/rms_layernorm.py
import triton import triton.language as tl import torch from atomgpt.inverse_models.kernels.utils import calculate_settings @triton.jit def _rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr ): """ Fast RMS Layernorm kernel Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0)#.to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = tl.math.rsqrt(row_var + eps) tl.store(r, inv_var) normed = X_row * inv_var normed = normed.to(W_row.dtype) # Exact copy from HF output = normed * W_row tl.store(Y + col_offsets, output, mask = mask) pass @triton.heuristics({"GEMMA": lambda args: args["GEMMA"],}) @triton.jit def _rms_layernorm_backward( dY, dY_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, dW, dW_row_stride, n_cols, eps, GEMMA : tl.constexpr, BLOCK_SIZE : tl.constexpr, ): """ Fast RMS Layernorm kernel for the backward pass Inspiration from a Triton tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html """ row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols dY += row_idx * dY_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride dY_row = tl.load(dY + col_offsets, mask = mask, other = 0).to(tl.float32) X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) # Get saved row variance inv_var = tl.load(r).to(tl.float32) normed = X_row * inv_var if GEMMA: dY_W = dY_row * (W_row + 1.0) else: dY_W = dY_row * W_row rowsum_dY_normed = tl.sum(dY_W * normed, axis = 0) output = inv_var/n_cols * (n_cols*dY_W - normed*rowsum_dY_normed) tl.store(dY + col_offsets, output, mask = mask) pass @triton.jit def _gemma_rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr, ): # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 # and https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L33 # exactly. Essentially all in float32! row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl tl.store(r, inv_var) normed = X_row * inv_var output = normed * (W_row + 1.0) tl.store(Y + col_offsets, output, mask = mask) pass class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod def forward(ctx, X, W, eps, gemma = False): shape = X.shape dim = shape[-1] X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), r, r.stride(0), n_cols, eps, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) pass @staticmethod def backward(ctx, dY): shape = dY.shape dim = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors n_rows, n_cols = dY.shape dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) dX = dY.view(*shape) return dX, None, None, None pass pass def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight eps = layernorm.variance_epsilon out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass
@triton.jit def _gemma_rms_layernorm_forward( Y, Y_row_stride, X, X_row_stride, W, W_row_stride, r, r_row_stride, n_cols, eps, BLOCK_SIZE : tl.constexpr, ): # Copies https://github.com/google-deepmind/gemma/blob/main/gemma/layers.py#L31 # and https://github.com/keras-team/keras-nlp/blob/v0.8.2/keras_nlp/models/gemma/rms_normalization.py#L33 # exactly. Essentially all in float32! row_idx = tl.program_id(0) col_offsets = tl.arange(0, BLOCK_SIZE) mask = col_offsets < n_cols Y += row_idx * Y_row_stride X += row_idx * X_row_stride r += row_idx * r_row_stride X_row = tl.load(X + col_offsets, mask = mask, other = 0).to(tl.float32) W_row = tl.load(W + col_offsets, mask = mask, other = 0).to(tl.float32) row_var = tl.sum(X_row * X_row, axis = 0) / n_cols inv_var = 1.0 / tl.sqrt(row_var + eps) # Must be 1/sqrt to match Deepmind's impl tl.store(r, inv_var) normed = X_row * inv_var output = normed * (W_row + 1.0) tl.store(Y + col_offsets, output, mask = mask) pass class Fast_RMS_Layernorm(torch.autograd.Function): @staticmethod def forward(ctx, X, W, eps, gemma = False): shape = X.shape dim = shape[-1] X = X.view(-1, dim) n_rows, n_cols = X.shape BLOCK_SIZE, num_warps = calculate_settings(n_cols) Y = torch.empty((n_rows, n_cols), dtype = X.dtype, device = "cuda") r = torch.empty(n_rows, dtype = torch.float32, device = "cuda") fx = _gemma_rms_layernorm_forward if gemma else _rms_layernorm_forward fx[(n_rows,)]( Y, Y.stride(0), X, X.stride(0), W, W.stride(0), r, r.stride(0), n_cols, eps, BLOCK_SIZE = BLOCK_SIZE, num_warps = num_warps, ) ctx.eps = eps ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.GEMMA = gemma ctx.save_for_backward(X, W, r) return Y.view(*shape) pass @staticmethod def backward(ctx, dY): shape = dY.shape dim = shape[-1] dY = dY.view(-1, dim) X, W, r = ctx.saved_tensors n_rows, n_cols = dY.shape dW = X _rms_layernorm_backward[(n_rows,)]( dY, dY.stride(0), X, X .stride(0), W, W .stride(0), r, r .stride(0), dW, dW.stride(0), n_cols, ctx.eps, GEMMA = ctx.GEMMA, BLOCK_SIZE = ctx.BLOCK_SIZE, num_warps = ctx.num_warps, ) dX = dY.view(*shape) return dX, None, None, None pass pass def fast_rms_layernorm(layernorm, X, gemma = False): W = layernorm.weight eps = layernorm.variance_epsilon out = Fast_RMS_Layernorm.apply(X, W, eps, gemma) return out pass
liyucheng09/longvila
llava/model/coat/activation/real_quantization/silu_bwd_legacy.py
https://github.com/liyucheng09/longvila/blob/d2478383b3088f2e14be2793dfaff9e9a5d1c062/llava/model/coat/activation/real_quantization/silu_bwd_legacy.py
# Copyright 2024 NVIDIA CORPORATION & AFFILIATES # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 import torch # 4 block import triton import triton.language as tl from triton.language.extra.cuda import libdevice from .common import FP8_MAX_VALUE, SCALE_MIN_THRES, get_configs_io_block """SiLU Activation Backward""" """Input uses 1 * 16 group quantization""" """Grad uses 1 * 16 group quantization""" """Output uses per-tensor quantization, but should be quantized outside this function""" """The input can be 2D or 3D, but the calculation is performed in 2D""" @triton.autotune( configs=[] + get_configs_io_block(), key=[ "N", ], ) @triton.heuristics( { "BLOCK_SN": lambda args: args["BLOCK_N"] // args["QB"], } ) @triton.jit def _fp8_silu_backward_legacy_kernel( output_ptr, output_scale_ptr, # output input_ptr, input_scale_ptr, # input grad_ptr, grad_scale_ptr, # input M, N, SN, QB: tl.constexpr, fp8_max, # shape input_stride_0, input_stride_1, # input stride s_input_stride_0, s_input_stride_1, # scale of input stride grad_stride_0, grad_stride_1, # input stride s_grad_stride_0, s_grad_stride_1, # scale of input stride output_stride_0, output_stride_1, # output stride s_output_stride_0, s_output_stride_1, # scale of output stride SCALE_MIN_THRES: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_SN: tl.constexpr, ): # CUDA block size # Block PID pid = tl.program_id(0) NUM_BLOCK_N = tl.cdiv(N, BLOCK_N) pid_dim0 = pid // NUM_BLOCK_N pid_dim1 = pid % NUM_BLOCK_N # pointers input_block_ptr = tl.make_block_ptr( base=input_ptr, shape=(M, N), strides=(input_stride_0, input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) # input ptr scale_input_ptr = tl.make_block_ptr( base=input_scale_ptr, shape=(M, SN), strides=(s_input_stride_0, s_input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) input = tl.load(input_block_ptr) scale_input = tl.load(scale_input_ptr) input = input.to(tl.float32) scale_input = scale_input.to(tl.float32) # Dequantize and silu calculation scale_input = tl.reshape(scale_input, (BLOCK_M, BLOCK_SN, 1)) input = tl.reshape(input, (BLOCK_M, BLOCK_SN, QB)) input = input * scale_input # pointers of gradient grad_block_ptr = tl.make_block_ptr( base=grad_ptr, shape=(M, N), strides=(grad_stride_0, grad_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) # grad ptr scale_grad_ptr = tl.make_block_ptr( base=grad_scale_ptr, shape=(M, SN), strides=(s_grad_stride_0, s_grad_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) grad = tl.load(grad_block_ptr) scale_grad = tl.load(scale_grad_ptr) grad = grad.to(tl.float32) scale_grad = scale_grad.to(tl.float32) # Dequantize and silu calculation scale_grad = tl.reshape(scale_grad, (BLOCK_M, BLOCK_SN, 1)) grad = tl.reshape(grad, (BLOCK_M, BLOCK_SN, QB)) grad = grad * scale_grad # Actual Calculation of SiLU's backward sigmoid = 1 / (1.0 + libdevice.exp(-input)) silu_output = sigmoid + input * sigmoid * (1 - sigmoid) silu_output = silu_output * grad # Quantize Scale calculation abs_output = tl.abs(silu_output) max_val = tl.max(abs_output, axis=2) + SCALE_MIN_THRES scale_output = max_val / fp8_max scale_output = tl.reshape(scale_output, (BLOCK_M, BLOCK_SN, 1)) # Quantize # silu_output = tl.fdiv(silu_output, scale_output) silu_output = silu_output.to(output_ptr.type.element_ty) scale_output = scale_output.to(output_scale_ptr.type.element_ty) scale_output = tl.reshape(scale_output, (BLOCK_M, BLOCK_SN)) silu_output = tl.reshape(silu_output, (BLOCK_M, BLOCK_N)) # debug # silu_output = input # scale_output = scale_input # pointers output_block_ptr = tl.make_block_ptr( base=output_ptr, shape=(M, N), strides=(output_stride_0, output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) scale_output_ptr = tl.make_block_ptr( base=output_scale_ptr, shape=(M, SN), strides=(s_output_stride_0, s_output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) tl.store(output_block_ptr, silu_output, boundary_check=(0, 1)) tl.store(scale_output_ptr, scale_output, boundary_check=(0, 1)) def fp8_silu_backward_legacy(x, s_x, g, s_g, QB, stochastic=False): # Stochastic Rounding is left outside this function # Change batched 3D input to 2D batched = False if len(x.shape) == 3: assert len(s_x.shape) == 3 batched = True BS = x.shape[0] x = x.reshape(-1, x.shape[-1]) s_x = s_x.reshape(-1, s_x.shape[-1]) g = g.reshape(-1, g.shape[-1]) s_g = s_g.reshape(-1, s_g.shape[-1]) # defining the input and output tensor M, N = x.shape _, SN = s_x.shape # assume the shape of quantization block size is always 1 * G y = torch.empty_like(g, dtype=torch.bfloat16) s_y = torch.empty_like(s_g, dtype=s_g.dtype) fp8MaxValue = FP8_MAX_VALUE[g.dtype] # E4M3 and E5M2 have different max value grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) _fp8_silu_backward_legacy_kernel[grid]( y, s_y, x, s_x, g, s_g, M, N, SN, QB, fp8MaxValue, x.stride(0), x.stride(1), s_x.stride(0), s_x.stride(1), g.stride(0), g.stride(1), s_g.stride(0), s_g.stride(1), y.stride(0), y.stride(1), s_y.stride(0), s_y.stride(1), SCALE_MIN_THRES=SCALE_MIN_THRES, ) # Recover 2D to 3D if batched: y = y.reshape(BS, -1, y.shape[-1]) s_y = s_y.reshape(BS, -1, s_y.shape[-1]) return y, s_y
@triton.jit def _fp8_silu_backward_legacy_kernel( output_ptr, output_scale_ptr, # output input_ptr, input_scale_ptr, # input grad_ptr, grad_scale_ptr, # input M, N, SN, QB: tl.constexpr, fp8_max, # shape input_stride_0, input_stride_1, # input stride s_input_stride_0, s_input_stride_1, # scale of input stride grad_stride_0, grad_stride_1, # input stride s_grad_stride_0, s_grad_stride_1, # scale of input stride output_stride_0, output_stride_1, # output stride s_output_stride_0, s_output_stride_1, # scale of output stride SCALE_MIN_THRES: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, BLOCK_SN: tl.constexpr, ): # CUDA block size # Block PID pid = tl.program_id(0) NUM_BLOCK_N = tl.cdiv(N, BLOCK_N) pid_dim0 = pid // NUM_BLOCK_N pid_dim1 = pid % NUM_BLOCK_N # pointers input_block_ptr = tl.make_block_ptr( base=input_ptr, shape=(M, N), strides=(input_stride_0, input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) # input ptr scale_input_ptr = tl.make_block_ptr( base=input_scale_ptr, shape=(M, SN), strides=(s_input_stride_0, s_input_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) input = tl.load(input_block_ptr) scale_input = tl.load(scale_input_ptr) input = input.to(tl.float32) scale_input = scale_input.to(tl.float32) # Dequantize and silu calculation scale_input = tl.reshape(scale_input, (BLOCK_M, BLOCK_SN, 1)) input = tl.reshape(input, (BLOCK_M, BLOCK_SN, QB)) input = input * scale_input # pointers of gradient grad_block_ptr = tl.make_block_ptr( base=grad_ptr, shape=(M, N), strides=(grad_stride_0, grad_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) # grad ptr scale_grad_ptr = tl.make_block_ptr( base=grad_scale_ptr, shape=(M, SN), strides=(s_grad_stride_0, s_grad_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) grad = tl.load(grad_block_ptr) scale_grad = tl.load(scale_grad_ptr) grad = grad.to(tl.float32) scale_grad = scale_grad.to(tl.float32) # Dequantize and silu calculation scale_grad = tl.reshape(scale_grad, (BLOCK_M, BLOCK_SN, 1)) grad = tl.reshape(grad, (BLOCK_M, BLOCK_SN, QB)) grad = grad * scale_grad # Actual Calculation of SiLU's backward sigmoid = 1 / (1.0 + libdevice.exp(-input)) silu_output = sigmoid + input * sigmoid * (1 - sigmoid) silu_output = silu_output * grad # Quantize Scale calculation abs_output = tl.abs(silu_output) max_val = tl.max(abs_output, axis=2) + SCALE_MIN_THRES scale_output = max_val / fp8_max scale_output = tl.reshape(scale_output, (BLOCK_M, BLOCK_SN, 1)) # Quantize # silu_output = tl.fdiv(silu_output, scale_output) silu_output = silu_output.to(output_ptr.type.element_ty) scale_output = scale_output.to(output_scale_ptr.type.element_ty) scale_output = tl.reshape(scale_output, (BLOCK_M, BLOCK_SN)) silu_output = tl.reshape(silu_output, (BLOCK_M, BLOCK_N)) # debug # silu_output = input # scale_output = scale_input # pointers output_block_ptr = tl.make_block_ptr( base=output_ptr, shape=(M, N), strides=(output_stride_0, output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_N), block_shape=(BLOCK_M, BLOCK_N), order=(1, 0), ) scale_output_ptr = tl.make_block_ptr( base=output_scale_ptr, shape=(M, SN), strides=(s_output_stride_0, s_output_stride_1), offsets=(pid_dim0 * BLOCK_M, pid_dim1 * BLOCK_SN), block_shape=(BLOCK_M, BLOCK_SN), order=(1, 0), ) tl.store(output_block_ptr, silu_output, boundary_check=(0, 1)) tl.store(scale_output_ptr, scale_output, boundary_check=(0, 1)) def fp8_silu_backward_legacy(x, s_x, g, s_g, QB, stochastic=False): # Stochastic Rounding is left outside this function # Change batched 3D input to 2D batched = False if len(x.shape) == 3: assert len(s_x.shape) == 3 batched = True BS = x.shape[0] x = x.reshape(-1, x.shape[-1]) s_x = s_x.reshape(-1, s_x.shape[-1]) g = g.reshape(-1, g.shape[-1]) s_g = s_g.reshape(-1, s_g.shape[-1]) # defining the input and output tensor M, N = x.shape _, SN = s_x.shape # assume the shape of quantization block size is always 1 * G y = torch.empty_like(g, dtype=torch.bfloat16) s_y = torch.empty_like(s_g, dtype=s_g.dtype) fp8MaxValue = FP8_MAX_VALUE[g.dtype] # E4M3 and E5M2 have different max value grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) _fp8_silu_backward_legacy_kernel[grid]( y, s_y, x, s_x, g, s_g, M, N, SN, QB, fp8MaxValue, x.stride(0), x.stride(1), s_x.stride(0), s_x.stride(1), g.stride(0), g.stride(1), s_g.stride(0), s_g.stride(1), y.stride(0), y.stride(1), s_y.stride(0), s_y.stride(1), SCALE_MIN_THRES=SCALE_MIN_THRES, ) # Recover 2D to 3D if batched: y = y.reshape(BS, -1, y.shape[-1]) s_y = s_y.reshape(BS, -1, s_y.shape[-1]) return y, s_y
yynil/RWKVinLLAMA
rwkv/fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T=T, D=D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T=T, D=D, BT=BT ) dg = torch.empty_like(g, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = (initial_state * dx[:, :, 0] * g[:, :, 0].float().exp()).to(dg.dtype) return dx.to(o.dtype), dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: return ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
@triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D
yynil/RWKVinLLAMA
rwkv/fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T=T, D=D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T=T, D=D, BT=BT ) dg = torch.empty_like(g, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = (initial_state * dx[:, :, 0] * g[:, :, 0].float().exp()).to(dg.dtype) return dx.to(o.dtype), dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: return ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
@triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] )
yynil/RWKVinLLAMA
rwkv/fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T=T, D=D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T=T, D=D, BT=BT ) dg = torch.empty_like(g, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = (initial_state * dx[:, :, 0] * g[:, :, 0].float().exp()).to(dg.dtype) return dx.to(o.dtype), dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: return ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
@triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D
yynil/RWKVinLLAMA
rwkv/fla/ops/hgrn/chunk.py
https://github.com/yynil/RWKVinLLAMA/blob/6fa0a05a76b513dc6f0e11a32aaf1d89b8678376/rwkv/fla/ops/hgrn/chunk.py
# -*- coding: utf-8 -*- # Copyright (c) 2024, Yu Zhang, Songlin Yang # this function implements the chunkwise form of HGRN, inspired by # [Volodymyr Kyrylov in his blog post](https://proger.github.io/posts/scan/chunk.html) # also refer to the `accelerated-scan` lib: https://github.com/proger/accelerated-scan # from tests on H800, with B, H, D = 16, 4, 128, we see that the chunk can be greatly faster than the recurrent: # # Performance: # seq_len chunk recurrent chunk_bwd recurrent_bwd # 0 128.0 0.039360 0.061056 0.312160 0.205008 # 1 256.0 0.045824 0.123712 0.308784 0.297696 # 2 512.0 0.058688 0.241952 0.310720 0.626528 # 3 1024.0 0.088288 0.476992 0.313184 1.333152 # 4 2048.0 0.169472 0.943264 0.452464 2.724864 # 5 4096.0 0.329920 1.886144 0.881600 5.551520 # 6 8192.0 0.647872 3.755040 1.740496 11.117184 # 7 16384.0 1.272064 7.520576 3.446608 22.362528 from typing import Tuple import torch import triton import triton.language as tl from fla.utils import contiguous @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_fwd_kernel_h( x, g, gc, o, h0, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr, USE_INITIAL_STATE: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D p_x = x + i_bh * T * D + i_t * BT * D + o_d p_g = g + i_bh * T * D + i_t * BT * D + o_d p_gc = gc + i_bh * T * D + i_t * BT * D + o_d p_o = o + i_bh * T * D + i_t * BT * D + o_d b_h = tl.zeros([BD], dtype=tl.float32) b_gc = tl.zeros([BD], dtype=tl.float32) if USE_INITIAL_STATE: if i_t == 0: b_h += tl.load(h0 + i_bh * D + o_d, mask=mask, other=0).to(tl.float32) for i in range(0, BT): mask_t = mask & ((i_t * BT + i) < T) b_x = tl.load(p_x, mask=mask_t, other=0).to(tl.float32) b_g = tl.load(p_g, mask=mask_t, other=0).to(tl.float32) b_h = tl.exp(b_g) * b_h + b_x b_gc = b_gc + b_g tl.store(p_gc, b_gc.to(p_o.dtype.element_ty), mask=mask_t) tl.store(p_o, b_h.to(p_o.dtype.element_ty), mask=mask_t) p_x += D p_g += D p_gc += D p_o += D @triton.jit def chunk_hgrn_fwd_kernel_o( gc, o, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(1, tl.cdiv(T, BT)): p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] b_h0 = tl.load(o + i_bh * T * D + i_t * BT * D - D + o_d, mask=mask, other=0).to(tl.float32) # [BT, BD] b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_o = b_o + tl.exp(b_gc) * b_h0[None, :] tl.store(p_o, b_o.to(p_o.dtype.element_ty), boundary_check=(0, 1)) @triton.autotune( configs=[ triton.Config({'BD': 32}, num_warps=1), triton.Config({'BD': 32}, num_warps=2), triton.Config({'BD': 32}, num_warps=4), triton.Config({'BD': 32}, num_warps=8), triton.Config({'BD': 64}, num_warps=1), triton.Config({'BD': 64}, num_warps=2), triton.Config({'BD': 64}, num_warps=4), triton.Config({'BD': 64}, num_warps=8), triton.Config({'BD': 128}, num_warps=1), triton.Config({'BD': 128}, num_warps=2), triton.Config({'BD': 128}, num_warps=4), triton.Config({'BD': 128}, num_warps=8), ], key=['D'] ) @triton.jit def chunk_hgrn_bwd_kernel_h( g, gc, dx, do, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_t, i_bh = tl.program_id(0), tl.program_id(1), tl.program_id(2) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D BC = min(BT, T - i_t * BT) NT = tl.num_programs(1) p_g = g + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_gc = gc + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_dx = dx + (i_bh * T + i_t * BT + BC - 1) * D + o_d p_do = do + (i_bh * T + i_t * BT + BC - 1) * D + o_d if i_t == NT - 1: b_gc = tl.zeros([BD], dtype=tl.float32) else: b_gc = tl.load(g + (i_bh * T + i_t * BT + BT) * D + o_d, mask=mask, other=0).to(tl.float32) b_dh = tl.zeros([BD], dtype=tl.float32) for _ in range(BC - 1, -1, -1): tl.store(p_gc, b_gc.to(p_gc.dtype.element_ty), mask=mask) b_g = tl.load(p_g, mask=mask, other=0).to(tl.float32) b_do = tl.load(p_do, mask=mask, other=0).to(tl.float32) b_gc = b_gc + b_g b_dh = b_dh + b_do b_dx = b_dh b_dh = b_dh * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), mask=mask) p_g -= D p_gc -= D p_dx -= D p_do -= D @triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T=T, D=D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T=T, D=D, BT=BT ) dg = torch.empty_like(g, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = (initial_state * dx[:, :, 0] * g[:, :, 0].float().exp()).to(dg.dtype) return dx.to(o.dtype), dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: return ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
@triton.jit def chunk_hgrn_bwd_kernel_o( g, gc, o, dx, dg, s_h, s_t, s_d, T: tl.constexpr, D: tl.constexpr, BT: tl.constexpr, BD: tl.constexpr ): i_d, i_bh = tl.program_id(0), tl.program_id(1) o_d = i_d * BD + tl.arange(0, BD) mask = o_d < D for i_t in range(tl.cdiv(T, BT) - 1, -1, -1): p_g = tl.make_block_ptr(g + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_gc = tl.make_block_ptr(gc + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_o = tl.make_block_ptr(o + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT - 1, i_d * BD), (BT, BD), (1, 0)) p_dx = tl.make_block_ptr(dx + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) p_dg = tl.make_block_ptr(dg + i_bh * s_h, (T, D), (s_t, s_d), (i_t * BT, i_d * BD), (BT, BD), (1, 0)) # [BD,] mask_t = mask & ((i_t + 1) * BT < T) b_ht = tl.load(dx + i_bh * T * D + (i_t + 1) * BT * D + o_d, mask=mask_t, other=0).to(tl.float32) # [BT, BD] b_g = tl.load(p_g, boundary_check=(0, 1)).to(tl.float32) b_gc = tl.load(p_gc, boundary_check=(0, 1)).to(tl.float32) b_o = tl.load(p_o, boundary_check=(0, 1)).to(tl.float32) b_dx = tl.load(p_dx, boundary_check=(0, 1)).to(tl.float32) b_dx = b_dx + tl.exp(b_gc) * b_ht[None, :] b_dg = b_o * b_dx * tl.exp(b_g) tl.store(p_dx, b_dx.to(p_dx.dtype.element_ty), boundary_check=(0, 1)) tl.store(p_dg, b_dg.to(p_dg.dtype.element_ty), boundary_check=(0, 1)) class ChunkHGRNFunction(torch.autograd.Function): @staticmethod @contiguous def forward(ctx, x, g, initial_state=None, output_final_state=False): B, H, T, D = x.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) o = torch.empty_like(x, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_fwd_kernel_h[grid]( x, g, gc, o, initial_state, T=T, D=D, BT=BT, USE_INITIAL_STATE=initial_state is not None ) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_fwd_kernel_o[grid]( gc, o, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) final_state = None if output_final_state: final_state = o[:, :, -1].clone() o = o.to(x.dtype) ctx.save_for_backward(g, o, initial_state) return o, final_state @staticmethod @contiguous def backward(ctx, do, dht=None): g, o, initial_state = ctx.saved_tensors B, H, T, D = do.shape BT, BD = 128, min(64, triton.next_power_of_2(D)) num_warps = 8 if BD == 64 else 4 gc = torch.empty_like(g, dtype=torch.float) dx = torch.empty_like(o, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), triton.cdiv(T, meta['BT']), B * H) chunk_hgrn_bwd_kernel_h[grid]( g, gc, dx, do, T=T, D=D, BT=BT ) dg = torch.empty_like(g, dtype=torch.float) def grid(meta): return (triton.cdiv(D, meta['BD']), B * H) chunk_hgrn_bwd_kernel_o[grid]( g, gc, o, dx, dg, o.stride(1), o.stride(2), o.stride(3), T=T, D=D, BT=BT, BD=BD, num_warps=num_warps ) if initial_state is not None: dg[:, :, 0] = (initial_state * dx[:, :, 0] * g[:, :, 0].float().exp()).to(dg.dtype) return dx.to(o.dtype), dg, None, None def chunk_hgrn( x: torch.Tensor, g: torch.Tensor, initial_state: torch.Tensor = None, output_final_state: bool = False ) -> Tuple[torch.Tensor, torch.Tensor]: return ChunkHGRNFunction.apply(x, g, initial_state, output_final_state)
zinccat/TritonTrace
dynamic/level2/1_Conv2D_ReLU_BiasAdd/triton_poi_fused_threshold_backward_0.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/dynamic/level2/1_Conv2D_ReLU_BiasAdd/triton_poi_fused_threshold_backward_0.py
# From: 1_Conv2D_ReLU_BiasAdd import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties triton_helpers.set_driver_to_gpu() @triton_heuristics.pointwise( size_hints={'x': 2097152}, filename=__file__, triton_meta={'signature': {'in_ptr0': '*i1', 'in_ptr1': '*fp32', 'out_ptr0': '*fp32', 'xnumel': 'i32'}, 'device': DeviceProperties(type='cuda', index=0, multi_processor_count=82, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, warp_size=32), 'constants': {}, 'configs': [AttrsDescriptor.from_dict({'arg_properties': {'tt.divisibility': (0, 1, 2, 3), 'tt.equal_to': ()}, 'cls': 'AttrsDescriptor'})]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_0', 'mutated_arg_names': [], 'optimize_mem': True, 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': '5A06A9183D03767BDAB0FC92F89F8279B36CCC7C4B95A264F6D3CCE126D2D3A0', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 0.0 tmp3 = tl.where(tmp0, tmp2, tmp1) tl.store(out_ptr0 + (x0), tmp3, xmask)
@triton.jit def triton_poi_fused_threshold_backward_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask).to(tl.int1) tmp1 = tl.load(in_ptr1 + (x0), xmask) tmp2 = 0.0 tmp3 = tl.where(tmp0, tmp2, tmp1) tl.store(out_ptr0 + (x0), tmp3, xmask)
thuml/learn_torch.compile
timm/convit_base_training_cuda/__compiled_fn_6 kernel 27.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/convit_base_training_cuda/__compiled_fn_6%20kernel%2027.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @reduction( size_hints=[1024, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(10, 11))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_div_mul_sum_26', 'mutated_arg_names': []} ) @triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 608 rnumel = 8088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 38 x1 = (xindex // 38) _tmp13 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = r2 + (8088*x0) tmp1 = tl.full([1, 1], 307328, tl.int32) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 / tmp4 tmp6 = tl.load(in_ptr2 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr3 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.full(tmp9.shape, 0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = _tmp13 + tmp12 _tmp13 = tl.where(rmask & xmask, tmp14, _tmp13) tmp15 = tl.load(in_ptr4 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp7 * tmp15 tmp17 = tl.full(tmp16.shape, 0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp22 = tl.load(in_ptr5 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr6 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp8 * tmp23 tmp25 = tmp22 - tmp24 tmp26 = tl.full(tmp25.shape, 0, tmp25.dtype) tmp27 = tl.where(tmp2, tmp25, tmp26) tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp13 = tl.sum(_tmp13, 1)[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + (x3), tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + (x3), tmp29, xmask)
@triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 608 rnumel = 8088 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 38 x1 = (xindex // 38) _tmp13 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex _tmp20 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) _tmp29 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = r2 + (8088*x0) tmp1 = tl.full([1, 1], 307328, tl.int32) tmp2 = tmp0 < tmp1 tmp3 = tl.load(in_ptr0 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr1 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp5 = tmp3 / tmp4 tmp6 = tl.load(in_ptr2 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp7 = tmp5 + tmp6 tmp8 = tl.load(in_ptr3 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp9 = tmp7 * tmp8 tmp10 = tl.full(tmp9.shape, 0, tmp9.dtype) tmp11 = tl.where(tmp2, tmp9, tmp10) tmp12 = tl.broadcast_to(tmp11, [XBLOCK, RBLOCK]) tmp14 = _tmp13 + tmp12 _tmp13 = tl.where(rmask & xmask, tmp14, _tmp13) tmp15 = tl.load(in_ptr4 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp16 = tmp7 * tmp15 tmp17 = tl.full(tmp16.shape, 0, tmp16.dtype) tmp18 = tl.where(tmp2, tmp16, tmp17) tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK]) tmp21 = _tmp20 + tmp19 _tmp20 = tl.where(rmask & xmask, tmp21, _tmp20) tmp22 = tl.load(in_ptr5 + ((38416*x1) + (614656*(((r2 + (8088*x0)) // 38416) % 8)) + ((r2 + (8088*x0)) % 38416)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp23 = tl.load(in_ptr6 + ((196*x1) + (3136*(((r2 + (8088*x0)) // 38416) % 8)) + (((r2 + (8088*x0)) // 196) % 196)), rmask & tmp2 & xmask, eviction_policy='evict_last', other=0.0) tmp24 = tmp8 * tmp23 tmp25 = tmp22 - tmp24 tmp26 = tl.full(tmp25.shape, 0, tmp25.dtype) tmp27 = tl.where(tmp2, tmp25, tmp26) tmp28 = tl.broadcast_to(tmp27, [XBLOCK, RBLOCK]) tmp30 = _tmp29 + tmp28 _tmp29 = tl.where(rmask & xmask, tmp30, _tmp29) tmp13 = tl.sum(_tmp13, 1)[:, None] tl.store(out_ptr0 + (x3), tmp13, xmask) tmp20 = tl.sum(_tmp20, 1)[:, None] tl.store(out_ptr1 + (x3), tmp20, xmask) tmp29 = tl.sum(_tmp29, 1)[:, None] tl.store(out_ptr2 + (x3), tmp29, xmask)
zinccat/TritonTrace
dynamic/level3/11_VGG16/triton_poi_fused_threshold_backward_19.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/dynamic/level3/11_VGG16/triton_poi_fused_threshold_backward_19.py
# From: 11_VGG16 import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[33554432], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, multi_processor_count=82), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_threshold_backward_19', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': '712B1D69F892A891D8FFA5075DCAB47CFF4E132D88BFC66744701CEAE226F127', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tmp4 = tl.where(tmp2, tmp1, tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask)
@triton.jit def triton_(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = tl.load(in_ptr0 + (x0), xmask) tmp3 = tl.load(in_out_ptr0 + (x0), xmask) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tmp4 = tl.where(tmp2, tmp1, tmp3) tl.store(in_out_ptr0 + (x0), tmp4, xmask)
zinccat/TritonTrace
dynamic/level3/16_DenseNet201/triton_poi_fused_avg_pool2d_backward_convolution_backward_283.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/dynamic/level3/16_DenseNet201/triton_poi_fused_avg_pool2d_backward_convolution_backward_283.py
# From: 16_DenseNet201 import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties @triton_heuristics.pointwise( size_hints=[4194304], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32', 4: 'i32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, multi_processor_count=82), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 6), equal_to_1=())]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_backward_convolution_backward_283', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': '712B1D69F892A891D8FFA5075DCAB47CFF4E132D88BFC66744701CEAE226F127', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_(in_ptr0, out_ptr0, ks0, ks1, ks2, ks3, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % ks0 x1 = (xindex // ks0) % ks0 x2 = (xindex // ks1) x4 = xindex tmp0 = tl.load(in_ptr0 + (x2 + (x2*((triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))*(triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2)))) + ((triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))*((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) * ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) < (((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))))))) + (2*x2*(triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))) + ((((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))) * ((((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2)))) < (((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))))) + ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) * ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) < (((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))))))), xmask, eviction_policy='evict_last') tmp1 = tmp0 / 4 tmp2 = ((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))) tmp3 = ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))) tmp4 = tmp2 < tmp3 tmp5 = ((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0))) tmp6 = ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2))) tmp7 = tmp5 < tmp6 tmp8 = tmp4 & tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp8, tmp1, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask)
@triton.jit def triton_(in_ptr0, out_ptr0, ks0, ks1, ks2, ks3, xnumel, XBLOCK : tl.constexpr): xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex % ks0 x1 = (xindex // ks0) % ks0 x2 = (xindex // ks1) x4 = xindex tmp0 = tl.load(in_ptr0 + (x2 + (x2*((triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))*(triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2)))) + ((triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))*((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) * ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) < (((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))))))) + (2*x2*(triton_helpers.div_floor_integer((-1) + (triton_helpers.div_floor_integer((-1) + ks3, 4)), 2))) + ((((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))) * ((((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2)))) < (((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0)))))) + ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) * ((((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0)))) <= ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))))) + ((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) * (((-1) + ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2)))) < (((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))))))), xmask, eviction_policy='evict_last') tmp1 = tmp0 / 4 tmp2 = ((0) * ((0) >= ((x1 // 2))) + ((x1 // 2)) * (((x1 // 2)) > (0))) tmp3 = ((ks2) * ((ks2) <= (1 + (x1 // 2))) + (1 + (x1 // 2)) * ((1 + (x1 // 2)) < (ks2))) tmp4 = tmp2 < tmp3 tmp5 = ((0) * ((0) >= ((x0 // 2))) + ((x0 // 2)) * (((x0 // 2)) > (0))) tmp6 = ((ks2) * ((ks2) <= (1 + (x0 // 2))) + (1 + (x0 // 2)) * ((1 + (x0 // 2)) < (ks2))) tmp7 = tmp5 < tmp6 tmp8 = tmp4 & tmp7 tmp9 = 0.0 tmp10 = tl.where(tmp8, tmp1, tmp9) tl.store(out_ptr0 + (x4), tmp10, xmask)
thuml/learn_torch.compile
timm/tf_mixnet_l_training_cuda/__compiled_fn_6 kernel 59.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/tf_mixnet_l_training_cuda/__compiled_fn_6%20kernel%2059.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, pointwise from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @pointwise( size_hints=[2048, 256], tile_hint=TileHint.DEFAULT, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(9,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_cat_native_batch_norm_backward_58', 'mutated_arg_names': ['in_out_ptr0']}, min_elem_per_thread=0 ) @triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1280 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 160 y1 = (yindex // 160) tmp0 = tl.load(in_out_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (y0 + (160*x2) + (31360*y1)), xmask & ymask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr5 + (y0), ymask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr6 + (y0), ymask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr7 + (y0), ymask, eviction_policy='evict_last') tmp1 = y0 tmp2 = tl.full([1, 1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1, 1], 80, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + (x2 + (196*y0) + (15680*y1)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp5, tmp6, tmp7) tmp9 = tmp1 >= tmp4 tmp10 = tl.full([1, 1], 160, tl.int64) tmp11 = tmp1 < tmp10 tmp12 = tl.load(in_ptr1 + ((-15680) + x2 + (196*y0) + (15680*y1)), tmp9 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tl.where(tmp5, tmp8, tmp14) tmp16 = tmp0 + tmp15 tmp19 = tmp17 - tmp18 tmp21 = 0.0006377551020408163 tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 * tmp24 tmp26 = tmp19 * tmp25 tmp27 = tmp16 - tmp26 tmp29 = tmp28 * tmp21 tmp30 = tmp27 - tmp29 tmp32 = tmp23 * tmp31 tmp33 = tmp30 * tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + (196*y3)), tmp33, xmask & ymask)
@triton.jit def triton_(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 1280 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 160 y1 = (yindex // 160) tmp0 = tl.load(in_out_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tmp17 = tl.load(in_ptr2 + (y0 + (160*x2) + (31360*y1)), xmask & ymask, eviction_policy='evict_last') tmp18 = tl.load(in_ptr3 + (y0), ymask, eviction_policy='evict_last') tmp20 = tl.load(in_ptr4 + (y0), ymask, eviction_policy='evict_last') tmp23 = tl.load(in_ptr5 + (y0), ymask, eviction_policy='evict_last') tmp28 = tl.load(in_ptr6 + (y0), ymask, eviction_policy='evict_last') tmp31 = tl.load(in_ptr7 + (y0), ymask, eviction_policy='evict_last') tmp1 = y0 tmp2 = tl.full([1, 1], 0, tl.int64) tmp3 = tmp1 >= tmp2 tmp4 = tl.full([1, 1], 80, tl.int64) tmp5 = tmp1 < tmp4 tmp6 = tl.load(in_ptr0 + (x2 + (196*y0) + (15680*y1)), tmp5 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp7 = tl.full(tmp6.shape, 0.0, tmp6.dtype) tmp8 = tl.where(tmp5, tmp6, tmp7) tmp9 = tmp1 >= tmp4 tmp10 = tl.full([1, 1], 160, tl.int64) tmp11 = tmp1 < tmp10 tmp12 = tl.load(in_ptr1 + ((-15680) + x2 + (196*y0) + (15680*y1)), tmp9 & xmask & ymask, eviction_policy='evict_last', other=0.0) tmp13 = tl.full(tmp12.shape, 0.0, tmp12.dtype) tmp14 = tl.where(tmp9, tmp12, tmp13) tmp15 = tl.where(tmp5, tmp8, tmp14) tmp16 = tmp0 + tmp15 tmp19 = tmp17 - tmp18 tmp21 = 0.0006377551020408163 tmp22 = tmp20 * tmp21 tmp24 = tmp23 * tmp23 tmp25 = tmp22 * tmp24 tmp26 = tmp19 * tmp25 tmp27 = tmp16 - tmp26 tmp29 = tmp28 * tmp21 tmp30 = tmp27 - tmp29 tmp32 = tmp23 * tmp31 tmp33 = tmp30 * tmp32 tl.debug_barrier() tl.store(in_out_ptr0 + (x2 + (196*y3)), tmp33, xmask & ymask)
zinccat/TritonTrace
labeled/level3/16_DenseNet201/triton_poi_fused_cat_227.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/labeled/level3/16_DenseNet201/triton_poi_fused_cat_227.py
# From: 16_DenseNet201 import triton import triton.language as tl from torch._inductor.runtime import triton_helpers triton_helpers.set_driver_to_gpu() @triton.jit def triton_poi_fused_cat_227poi_fused_cat_227(input_ptr, output_ptr0, output_ptr1, output_ptr2, output_ptr3, num_elements, BLOCK_SIZE : tl.constexpr): num_elements = 15680 block_offset = tl.program_id(0) * BLOCK_SIZE block_indices = block_offset + tl.arange(0, BLOCK_SIZE)[:] valid_mask = block_indices < num_elements global_indices = block_indices local_indices = global_indices % 1568 batch_indices = global_indices // 1568 temp_data = tl.load(input_ptr + (global_indices), valid_mask) tl.store(output_ptr0 + (local_indices + 62720 * batch_indices), temp_data, valid_mask) tl.store(output_ptr1 + (local_indices + 64288 * batch_indices), temp_data, valid_mask) tl.store(output_ptr2 + (local_indices + 65856 * batch_indices), temp_data, valid_mask) tl.store(output_ptr3 + (local_indices + 67424 * batch_indices), temp_data, valid_mask)
@triton.jit def triton_poi_fused_cat_227poi_fused_cat_227(input_ptr, output_ptr0, output_ptr1, output_ptr2, output_ptr3, num_elements, BLOCK_SIZE : tl.constexpr): num_elements = 15680 block_offset = tl.program_id(0) * BLOCK_SIZE block_indices = block_offset + tl.arange(0, BLOCK_SIZE)[:] valid_mask = block_indices < num_elements global_indices = block_indices local_indices = global_indices % 1568 batch_indices = global_indices // 1568 temp_data = tl.load(input_ptr + (global_indices), valid_mask) tl.store(output_ptr0 + (local_indices + 62720 * batch_indices), temp_data, valid_mask) tl.store(output_ptr1 + (local_indices + 64288 * batch_indices), temp_data, valid_mask) tl.store(output_ptr2 + (local_indices + 65856 * batch_indices), temp_data, valid_mask) tl.store(output_ptr3 + (local_indices + 67424 * batch_indices), temp_data, valid_mask)
tth37/sp-gated-mlp-kernels
sparsemm_kernels/utils.py
https://github.com/tth37/sp-gated-mlp-kernels/blob/ff9e5061dc72ec40a70f6b1747a82f3f3d7da66d/sparsemm_kernels/utils.py
import triton import triton.language as tl import torch @triton.jit def act_fn(x, act_type: tl.constexpr): if act_type == "relu": return tl.maximum(x, 0.0) elif act_type == "fatrelu": threshold = 0.01 return tl.where(x > threshold.to(tl.float16), x, 0.0) else: return x # Default case: no activation @triton.jit def get_m_n(pid, M, N, GROUP_SIZE_M): num_groups_m = (M + GROUP_SIZE_M - 1) // GROUP_SIZE_M # Ceiling division max_group_index = num_groups_m - 1 # Maximum valid group index group_pids_capacity = N * GROUP_SIZE_M g = min(pid // group_pids_capacity, max_group_index) cumulative_pids_before_group_g = group_pids_capacity * g group_start_row = GROUP_SIZE_M * g group_size_m = min(GROUP_SIZE_M, M - group_start_row) pid_in_group = pid - cumulative_pids_before_group_g n = pid_in_group // group_size_m delta_m = pid_in_group % group_size_m m = group_start_row + delta_m return (m, n) # equiv to blockIdx.m, blockIdx.n def idx_to_mask(IDX, Q, HIDDEN_DIM): assert IDX.shape[1] == Q, "IDX must have the same number of columns as Q" IDX_int64 = IDX.to(torch.int64) MASK = torch.zeros((IDX.shape[0], HIDDEN_DIM), dtype=torch.int32, device=IDX.device) for i in range(IDX.shape[0]): MASK[i].scatter_(0, IDX_int64[i], 1) return MASK def mask_to_idx(MASK, HIDDEN_DIM): IDX = torch.nonzero(MASK, as_tuple=False) return IDX[:, 1]
@triton.jit def act_fn(x, act_type: tl.constexpr): if act_type == "relu": return tl.maximum(x, 0.0) elif act_type == "fatrelu": threshold = 0.01 return tl.where(x > threshold.to(tl.float16), x, 0.0) else: return x # Default case: no activation
tth37/sp-gated-mlp-kernels
sparsemm_kernels/utils.py
https://github.com/tth37/sp-gated-mlp-kernels/blob/ff9e5061dc72ec40a70f6b1747a82f3f3d7da66d/sparsemm_kernels/utils.py
import triton import triton.language as tl import torch @triton.jit def act_fn(x, act_type: tl.constexpr): if act_type == "relu": return tl.maximum(x, 0.0) elif act_type == "fatrelu": threshold = 0.01 return tl.where(x > threshold.to(tl.float16), x, 0.0) else: return x # Default case: no activation @triton.jit def get_m_n(pid, M, N, GROUP_SIZE_M): num_groups_m = (M + GROUP_SIZE_M - 1) // GROUP_SIZE_M # Ceiling division max_group_index = num_groups_m - 1 # Maximum valid group index group_pids_capacity = N * GROUP_SIZE_M g = min(pid // group_pids_capacity, max_group_index) cumulative_pids_before_group_g = group_pids_capacity * g group_start_row = GROUP_SIZE_M * g group_size_m = min(GROUP_SIZE_M, M - group_start_row) pid_in_group = pid - cumulative_pids_before_group_g n = pid_in_group // group_size_m delta_m = pid_in_group % group_size_m m = group_start_row + delta_m return (m, n) # equiv to blockIdx.m, blockIdx.n def idx_to_mask(IDX, Q, HIDDEN_DIM): assert IDX.shape[1] == Q, "IDX must have the same number of columns as Q" IDX_int64 = IDX.to(torch.int64) MASK = torch.zeros((IDX.shape[0], HIDDEN_DIM), dtype=torch.int32, device=IDX.device) for i in range(IDX.shape[0]): MASK[i].scatter_(0, IDX_int64[i], 1) return MASK def mask_to_idx(MASK, HIDDEN_DIM): IDX = torch.nonzero(MASK, as_tuple=False) return IDX[:, 1]
@triton.jit def get_m_n(pid, M, N, GROUP_SIZE_M): num_groups_m = (M + GROUP_SIZE_M - 1) // GROUP_SIZE_M # Ceiling division max_group_index = num_groups_m - 1 # Maximum valid group index group_pids_capacity = N * GROUP_SIZE_M g = min(pid // group_pids_capacity, max_group_index) cumulative_pids_before_group_g = group_pids_capacity * g group_start_row = GROUP_SIZE_M * g group_size_m = min(GROUP_SIZE_M, M - group_start_row) pid_in_group = pid - cumulative_pids_before_group_g n = pid_in_group // group_size_m delta_m = pid_in_group % group_size_m m = group_start_row + delta_m return (m, n) # equiv to blockIdx.m, blockIdx.n def idx_to_mask(IDX, Q, HIDDEN_DIM): assert IDX.shape[1] == Q, "IDX must have the same number of columns as Q" IDX_int64 = IDX.to(torch.int64) MASK = torch.zeros((IDX.shape[0], HIDDEN_DIM), dtype=torch.int32, device=IDX.device) for i in range(IDX.shape[0]): MASK[i].scatter_(0, IDX_int64[i], 1) return MASK def mask_to_idx(MASK, HIDDEN_DIM): IDX = torch.nonzero(MASK, as_tuple=False) return IDX[:, 1]
thuml/learn_torch.compile
timm/tf_efficientnet_b0_training_cuda/__compiled_fn_3 kernel 85.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/tf_efficientnet_b0_training_cuda/__compiled_fn_3%20kernel%2085.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, pointwise from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @pointwise( size_hints=[1024, 256], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_84', 'mutated_arg_names': []}, min_elem_per_thread=0 ) @triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 640 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 80 y1 = (yindex // 80) tmp0 = tl.load(in_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (80*x2) + (15680*y1)), tmp0, xmask & ymask)
@triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 640 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 80 y1 = (yindex // 80) tmp0 = tl.load(in_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (80*x2) + (15680*y1)), tmp0, xmask & ymask)
thuml/learn_torch.compile
torchbench/hf_Bert_training_cuda/__compiled_fn_6 kernel 22.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/torchbench/hf_Bert_training_cuda/__compiled_fn_6%20kernel%2022.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, pointwise from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @pointwise( size_hints=[2048], filename=__file__, triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(1,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_dense_backward_21', 'mutated_arg_names': []}, min_elem_per_thread=0 ) @triton.jit def triton_(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask)
@triton.jit def triton_(out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1536 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = xindex tmp0 = 0.0 tl.store(out_ptr0 + (x0), tmp0, xmask)
thuml/learn_torch.compile
timm/swin_base_patch4_window7_224_training_cuda/__compiled_fn_6 kernel 83.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/swin_base_patch4_window7_224_training_cuda/__compiled_fn_6%20kernel%2083.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, persistent_reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @persistent_reduction( size_hints=[1024, 16], reduction_hint=ReductionHint.OUTER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_layer_norm_backward_82', 'mutated_arg_names': []} ) @triton.jit def triton_(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1024 rnumel = 13 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1024*r1)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask)
@triton.jit def triton_(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr): xnumel = 1024 rnumel = 13 RBLOCK: tl.constexpr = 16 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rindex = tl.arange(0, RBLOCK)[None, :] rmask = rindex < rnumel r1 = rindex x0 = xindex tmp0 = tl.load(in_ptr0 + (x0 + (1024*r1)), rmask & xmask, other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = tl.where(rmask & xmask, tmp1, 0) tmp4 = tl.sum(tmp3, 1)[:, None] tl.store(out_ptr0 + (x0), tmp4, xmask)
thuml/learn_torch.compile
torchbench/resnet18_training_cuda/__compiled_fn_6 kernel 33.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/torchbench/resnet18_training_cuda/__compiled_fn_6%20kernel%2033.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @reduction( size_hints=[128, 8192], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(4, 5))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_add_native_batch_norm_backward_threshold_backward_32', 'mutated_arg_names': []} ) @triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 6272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 64 x1 = (xindex // 64) _tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (64*r2) + (401408*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + ((3136*x0) + (200704*(r2 // 3136)) + (401408*x1) + (r2 % 3136)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr2 + ((3136*x0) + (200704*(r2 // 3136)) + (401408*x1) + (r2 % 3136)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.where(tmp2, tmp1, tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = _tmp8 + tmp7 _tmp8 = tl.where(rmask & xmask, tmp9, _tmp8) tmp8 = tl.sum(_tmp8, 1)[:, None] tl.store(out_ptr0 + (x3), tmp8, xmask)
@triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 128 rnumel = 6272 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex % 64 x1 = (xindex // 64) _tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) x3 = xindex for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r2 = rindex tmp0 = tl.load(in_ptr0 + (x0 + (64*r2) + (401408*x1)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp3 = tl.load(in_ptr1 + ((3136*x0) + (200704*(r2 // 3136)) + (401408*x1) + (r2 % 3136)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp4 = tl.load(in_ptr2 + ((3136*x0) + (200704*(r2 // 3136)) + (401408*x1) + (r2 % 3136)), rmask & xmask, eviction_policy='evict_last', other=0.0) tmp1 = 0.0 tmp2 = tmp0 <= tmp1 tmp5 = tmp3 + tmp4 tmp6 = tl.where(tmp2, tmp1, tmp5) tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK]) tmp9 = _tmp8 + tmp7 _tmp8 = tl.where(rmask & xmask, tmp9, _tmp8) tmp8 = tl.sum(_tmp8, 1)[:, None] tl.store(out_ptr0 + (x3), tmp8, xmask)
zinccat/TritonTrace
original/level3/29_SwinMLP/triton_poi_fused_clone_16.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/original/level3/29_SwinMLP/triton_poi_fused_clone_16.py
# From: 29_SwinMLP import triton import triton.language as tl from triton.compiler.compiler import AttrsDescriptor from torch._inductor.runtime import triton_helpers, triton_heuristics from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, DeviceProperties triton_helpers.set_driver_to_gpu() @triton_heuristics.pointwise( size_hints={'x': 2097152}, filename=__file__, triton_meta={'signature': {'in_ptr0': '*fp32', 'in_ptr1': '*fp32', 'in_ptr2': '*fp32', 'in_ptr3': '*fp32', 'in_ptr4': '*fp32', 'out_ptr0': '*fp32', 'xnumel': 'i32'}, 'device': DeviceProperties(type='cuda', index=0, multi_processor_count=82, cc=86, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=1536, warp_size=32), 'constants': {}, 'configs': [AttrsDescriptor.from_dict({'arg_properties': {'tt.divisibility': (0, 1, 2, 3, 4, 5, 6), 'tt.equal_to': ()}, 'cls': 'AttrsDescriptor'})]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_16', 'mutated_arg_names': [], 'optimize_mem': False, 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': '5A06A9183D03767BDAB0FC92F89F8279B36CCC7C4B95A264F6D3CCE126D2D3A0', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}, min_elem_per_thread=0 ) @triton.jit def triton_poi_fused_clone_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1505280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = (xindex % 32) x1 = ((xindex // 32) % 49) x2 = ((xindex // 1568) % 6) x3 = xindex // 9408 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32*x2 + 192*((x1 % 7)) + 1344*((x3 % 4)) + 5376*(x1 // 7) + 37632*(x3 // 4)), xmask) tmp1 = tl.load(in_ptr1 + (7*((x3 % 4)) + 28*(x1 // 7) + 196*(x3 // 4) + ((x1 % 7))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (7*((x3 % 4)) + 28*(x1 // 7) + 196*(x3 // 4) + ((x1 % 7))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + 32*x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0 + 32*x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x4), tmp8, xmask)
@triton.jit def triton_poi_fused_clone_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr): xnumel = 1505280 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:] xmask = xindex < xnumel x0 = (xindex % 32) x1 = ((xindex // 32) % 49) x2 = ((xindex // 1568) % 6) x3 = xindex // 9408 x4 = xindex tmp0 = tl.load(in_ptr0 + (x0 + 32*x2 + 192*((x1 % 7)) + 1344*((x3 % 4)) + 5376*(x1 // 7) + 37632*(x3 // 4)), xmask) tmp1 = tl.load(in_ptr1 + (7*((x3 % 4)) + 28*(x1 // 7) + 196*(x3 // 4) + ((x1 % 7))), xmask, eviction_policy='evict_last') tmp3 = tl.load(in_ptr2 + (7*((x3 % 4)) + 28*(x1 // 7) + 196*(x3 // 4) + ((x1 % 7))), xmask, eviction_policy='evict_last') tmp5 = tl.load(in_ptr3 + (x0 + 32*x2), xmask, eviction_policy='evict_last') tmp7 = tl.load(in_ptr4 + (x0 + 32*x2), xmask, eviction_policy='evict_last') tmp2 = tmp0 - tmp1 tmp4 = tmp2 * tmp3 tmp6 = tmp4 * tmp5 tmp8 = tmp6 + tmp7 tl.store(out_ptr0 + (x4), tmp8, xmask)
thuml/learn_torch.compile
timm/visformer_small_training_cuda/__compiled_fn_6 kernel 34.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/visformer_small_training_cuda/__compiled_fn_6%20kernel%2034.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, reduction from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @reduction( size_hints=[512, 2048], reduction_hint=ReductionHint.INNER, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(7, 8))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_native_batch_norm_backward_33', 'mutated_arg_names': []} ) @triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 384 rnumel = 1568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp2 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex % 196 r2 = (rindex // 196) tmp0 = tl.load(in_ptr0 + (r1 + (196*x0) + (75264*r2)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp4 = tl.load(in_ptr1 + (r1 + (196*x0) + (75264*r2)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = _tmp2 + tmp1 _tmp2 = tl.where(rmask & xmask, tmp3, _tmp2) tmp6 = tmp4 - tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp2 = tl.sum(_tmp2, 1)[:, None] tl.store(out_ptr0 + (x0), tmp2, xmask) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr1 + (x0), tmp9, xmask) tmp11 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tmp9 * tmp11 tl.store(out_ptr2 + (x0), tmp12, xmask)
@triton.jit def triton_(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr): xnumel = 384 rnumel = 1568 xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel rbase = tl.arange(0, RBLOCK)[None, :] x0 = xindex _tmp2 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) tmp5 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last') _tmp9 = tl.full([XBLOCK, RBLOCK], 0, tl.float32) for roffset in range(0, rnumel, RBLOCK): rindex = roffset + rbase rmask = rindex < rnumel r1 = rindex % 196 r2 = (rindex // 196) tmp0 = tl.load(in_ptr0 + (r1 + (196*x0) + (75264*r2)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp4 = tl.load(in_ptr1 + (r1 + (196*x0) + (75264*r2)), rmask & xmask, eviction_policy='evict_first', other=0.0) tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK]) tmp3 = _tmp2 + tmp1 _tmp2 = tl.where(rmask & xmask, tmp3, _tmp2) tmp6 = tmp4 - tmp5 tmp7 = tmp0 * tmp6 tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK]) tmp10 = _tmp9 + tmp8 _tmp9 = tl.where(rmask & xmask, tmp10, _tmp9) tmp2 = tl.sum(_tmp2, 1)[:, None] tl.store(out_ptr0 + (x0), tmp2, xmask) tmp9 = tl.sum(_tmp9, 1)[:, None] tl.store(out_ptr1 + (x0), tmp9, xmask) tmp11 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last') tmp12 = tmp9 * tmp11 tl.store(out_ptr2 + (x0), tmp12, xmask)
thuml/learn_torch.compile
timm/mobilenetv3_large_100_training_cuda/__compiled_fn_3 kernel 88.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/mobilenetv3_large_100_training_cuda/__compiled_fn_3%20kernel%2088.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, pointwise from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @pointwise( size_hints=[8192, 256], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_87', 'mutated_arg_names': []}, min_elem_per_thread=0 ) @triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 5376 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 672 y1 = (yindex // 672) tmp0 = tl.load(in_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (672*x2) + (131712*y1)), tmp0, xmask & ymask)
@triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 5376 xnumel = 196 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 672 y1 = (yindex // 672) tmp0 = tl.load(in_ptr0 + (x2 + (196*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (672*x2) + (131712*y1)), tmp0, xmask & ymask)
thuml/learn_torch.compile
timm/pnasnet5large_training_cuda/__compiled_fn_3 kernel 14.py
https://github.com/thuml/learn_torch.compile/blob/b1a5e6dfa7a14996962dc22bff78b41cffac430d/timm/pnasnet5large_training_cuda/__compiled_fn_3%20kernel%2014.py
import triton import triton.language as tl from torch._inductor.ir import ReductionHint from torch._inductor.ir import TileHint from torch._inductor.triton_heuristics import AutotuneHint, pointwise from torch._inductor.utils import instance_descriptor from torch._inductor import triton_helpers @pointwise( size_hints=[1024, 8192], tile_hint=TileHint.SQUARE, filename=__file__, triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': 0, 'device_type': 'cuda', 'constants': {}, 'configs': [instance_descriptor(divisible_by_16=(0, 1, 2), equal_to_1=(), ids_of_folded_args=(), divisible_by_8=(2,))]}, inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_13', 'mutated_arg_names': []}, min_elem_per_thread=0 ) @triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 6889 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 96 y1 = (yindex // 96) tmp0 = tl.load(in_ptr0 + (x2 + (6889*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (96*x2) + (661344*y1)), tmp0, xmask & ymask)
@triton.jit def triton_(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr): ynumel = 768 xnumel = 6889 yoffset = tl.program_id(1) * YBLOCK yindex = yoffset + tl.arange(0, YBLOCK)[None, :] ymask = yindex < ynumel xoffset = tl.program_id(0) * XBLOCK xindex = xoffset + tl.arange(0, XBLOCK)[:, None] xmask = xindex < xnumel x2 = xindex y3 = yindex y0 = yindex % 96 y1 = (yindex // 96) tmp0 = tl.load(in_ptr0 + (x2 + (6889*y3)), xmask & ymask, eviction_policy='evict_last') tl.store(out_ptr0 + (y0 + (96*x2) + (661344*y1)), tmp0, xmask & ymask)
zinccat/TritonTrace
labeled/level3/15_DenseNet121/triton_poi_fused__native_batch_norm_legit_functional_relu_172.py
https://github.com/zinccat/TritonTrace/blob/a24eba759122f6f75bf349cd44ef329d9cd3e476/labeled/level3/15_DenseNet121/triton_poi_fused__native_batch_norm_legit_functional_relu_172.py
# From: 15_DenseNet121 import triton import triton.language as tl from torch._inductor.runtime import triton_helpers triton_helpers.set_driver_to_gpu() @triton.jit def triton_poi_fused__native_batch_norm_legit_functional_relu_172poi_fused__native_batch_norm_legit_functional_relu_172(input_ptr_mean, input_ptr_var, input_ptr_scale, input_ptr_shift, input_ptr_input, output_ptr, num_elements, BLOCK_SIZE : tl.constexpr): num_elements = 376320 block_offset = tl.program_id(0) * BLOCK_SIZE block_indices = block_offset + tl.arange(0, BLOCK_SIZE)[:] valid_mask = block_indices < num_elements input_indices = block_indices channel_indices = (block_indices // 49) % 768 mean = tl.load(input_ptr_mean + (input_indices), valid_mask) variance = tl.load(input_ptr_var + (channel_indices), valid_mask, eviction_policy='evict_last') scale = tl.load(input_ptr_scale + (channel_indices), valid_mask, eviction_policy='evict_last') shift = tl.load(input_ptr_shift + (channel_indices), valid_mask, eviction_policy='evict_last') input_data = tl.load(input_ptr_input + (input_indices), valid_mask) normalized_data = input_data - mean variance_scale = 490.0 normalized_variance = variance / variance_scale epsilon = 1e-05 adjusted_variance = normalized_variance + epsilon inv_sqrt_variance = tl.extra.cuda.libdevice.rsqrt(adjusted_variance) scaled_data = normalized_data * inv_sqrt_variance scaled_and_shifted_data = scaled_data * scale output_data = scaled_and_shifted_data + shift zero_tensor = tl.full([1], 0, tl.int32) relu_output = triton_helpers.maximum(zero_tensor, output_data) tl.store(output_ptr + (input_indices), relu_output, valid_mask)
@triton.jit def triton_poi_fused__native_batch_norm_legit_functional_relu_172poi_fused__native_batch_norm_legit_functional_relu_172(input_ptr_mean, input_ptr_var, input_ptr_scale, input_ptr_shift, input_ptr_input, output_ptr, num_elements, BLOCK_SIZE : tl.constexpr): num_elements = 376320 block_offset = tl.program_id(0) * BLOCK_SIZE block_indices = block_offset + tl.arange(0, BLOCK_SIZE)[:] valid_mask = block_indices < num_elements input_indices = block_indices channel_indices = (block_indices // 49) % 768 mean = tl.load(input_ptr_mean + (input_indices), valid_mask) variance = tl.load(input_ptr_var + (channel_indices), valid_mask, eviction_policy='evict_last') scale = tl.load(input_ptr_scale + (channel_indices), valid_mask, eviction_policy='evict_last') shift = tl.load(input_ptr_shift + (channel_indices), valid_mask, eviction_policy='evict_last') input_data = tl.load(input_ptr_input + (input_indices), valid_mask) normalized_data = input_data - mean variance_scale = 490.0 normalized_variance = variance / variance_scale epsilon = 1e-05 adjusted_variance = normalized_variance + epsilon inv_sqrt_variance = tl.extra.cuda.libdevice.rsqrt(adjusted_variance) scaled_data = normalized_data * inv_sqrt_variance scaled_and_shifted_data = scaled_data * scale output_data = scaled_and_shifted_data + shift zero_tensor = tl.full([1], 0, tl.int32) relu_output = triton_helpers.maximum(zero_tensor, output_data) tl.store(output_ptr + (input_indices), relu_output, valid_mask)