#include #include #include #include #include "cuda_compat.h" #include "dispatch_utils.h" namespace vllm { // Activation and gating kernel template. template __global__ void act_and_mul_kernel( scalar_t* __restrict__ out, // [..., d] const scalar_t* __restrict__ input, // [..., 2, d] const int d) { const int64_t token_idx = blockIdx.x; for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); out[token_idx * d + idx] = ACT_FN(x) * y; } } template __device__ __forceinline__ T silu_kernel(const T& x) { // x * sigmoid(x) return (T)(((float)x) / (1.0f + expf((float)-x))); } template __device__ __forceinline__ T gelu_kernel(const T& x) { // Equivalent to PyTorch GELU with 'none' approximation. // Refer to: // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L36-L38 const float f = (float)x; constexpr float ALPHA = M_SQRT1_2; return (T)(f * 0.5f * (1.0f + ::erf(f * ALPHA))); } template __device__ __forceinline__ T gelu_tanh_kernel(const T& x) { // Equivalent to PyTorch GELU with 'tanh' approximation. // Refer to: // https://github.com/pytorch/pytorch/blob/8ac9b20d4b090c213799e81acf48a55ea8d437d6/aten/src/ATen/native/cuda/ActivationGeluKernel.cu#L25-L30 const float f = (float)x; constexpr float BETA = M_SQRT2 * M_2_SQRTPI * 0.5f; constexpr float KAPPA = 0.044715; float x_cube = f * f * f; float inner = BETA * (f + KAPPA * x_cube); return (T)(0.5f * f * (1.0f + ::tanhf(inner))); } } // namespace vllm // Launch activation and gating kernel. #define LAUNCH_ACTIVATION_GATE_KERNEL(KERNEL) \ int d = input.size(-1) / 2; \ int64_t num_tokens = input.numel() / input.size(-1); \ dim3 grid(num_tokens); \ dim3 block(std::min(d, 1024)); \ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ VLLM_DISPATCH_FLOATING_TYPES( \ input.scalar_type(), "act_and_mul_kernel", [&] { \ vllm::act_and_mul_kernel> \ <<>>(out.data_ptr(), \ input.data_ptr(), d); \ }); void silu_and_mul(torch::Tensor& out, // [..., d] torch::Tensor& input) // [..., 2 * d] { LAUNCH_ACTIVATION_GATE_KERNEL(vllm::silu_kernel); } //void gelu_and_mul(torch::Tensor& out, // [..., d] // torch::Tensor& input) // [..., 2 * d] //{ // LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_kernel); //} //void gelu_tanh_and_mul(torch::Tensor& out, // [..., d] // torch::Tensor& input) // [..., 2 * d] //{ // LAUNCH_ACTIVATION_GATE_KERNEL(vllm::gelu_tanh_kernel); //} namespace vllm { template __device__ __forceinline__ T fatrelu_kernel(const T& x, const float threshold) { const float f = (float)x; return (T)(f > threshold ? f : 0.0f); } template __global__ void act_and_mul_kernel_with_param( scalar_t* __restrict__ out, const scalar_t* __restrict__ input, const int d, const float param) { const int64_t token_idx = blockIdx.x; for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { const scalar_t x = VLLM_LDG(&input[token_idx * 2 * d + idx]); const scalar_t y = VLLM_LDG(&input[token_idx * 2 * d + d + idx]); out[token_idx * d + idx] = ACT_FN(x, param) * y; } } } // namespace vllm #define LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(KERNEL, PARAM) \ int d = input.size(-1) / 2; \ int64_t num_tokens = input.numel() / input.size(-1); \ dim3 grid(num_tokens); \ dim3 block(std::min(d, 1024)); \ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ VLLM_DISPATCH_FLOATING_TYPES( \ input.scalar_type(), "act_and_mul_kernel_with_param", [&] { \ vllm::act_and_mul_kernel_with_param> \ <<>>(out.data_ptr(), \ input.data_ptr(), d, \ PARAM); \ }); //void fatrelu_and_mul(torch::Tensor& out, // [..., d], // torch::Tensor& input, // [..., 2 * d] // double threshold) { // LAUNCH_ACTIVATION_GATE_KERNEL_WITH_PARAM(vllm::fatrelu_kernel, threshold); //} namespace vllm { // Element-wise activation kernel template. template __global__ void activation_kernel( scalar_t* __restrict__ out, // [..., d] const scalar_t* __restrict__ input, // [..., d] const int d) { const int64_t token_idx = blockIdx.x; for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) { const scalar_t x = VLLM_LDG(&input[token_idx * d + idx]); out[token_idx * d + idx] = ACT_FN(x); } } } // namespace vllm // Launch element-wise activation kernel. #define LAUNCH_ACTIVATION_KERNEL(KERNEL) \ int d = input.size(-1); \ int64_t num_tokens = input.numel() / d; \ dim3 grid(num_tokens); \ dim3 block(std::min(d, 1024)); \ const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); \ const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); \ VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "activation_kernel", [&] { \ vllm::activation_kernel> \ <<>>(out.data_ptr(), \ input.data_ptr(), d); \ }); namespace vllm { template __device__ __forceinline__ T gelu_new_kernel(const T& x) { const float x3 = (float)(x * x * x); const T t = (T)tanhf((T)(0.79788456f * (float)(x + (T)(0.044715f * x3)))); return ((T)0.5) * x * (((T)1.0) + t); } template __device__ __forceinline__ T gelu_fast_kernel(const T& x) { const float f = (float)x; const T t = (T)tanhf(((T)(f * 0.79788456f)) * (((T)1.0) + (T)(0.044715f * f) * x)); return ((T)0.5) * x * (((T)1.0) + t); } template __device__ __forceinline__ T gelu_quick_kernel(const T& x) { // x * sigmoid(1.702 * x) return (T)(((float)x) / (1.0f + expf(-1.702f * (float)x))); } } // namespace vllm //void gelu_new(torch::Tensor& out, // [..., d] // torch::Tensor& input) // [..., d] //{ // LAUNCH_ACTIVATION_KERNEL(vllm::gelu_new_kernel); //} //void gelu_fast(torch::Tensor& out, // [..., d] // torch::Tensor& input) // [..., d] //{ // LAUNCH_ACTIVATION_KERNEL(vllm::gelu_fast_kernel); //} //void gelu_quick(torch::Tensor& out, // [..., d] // torch::Tensor& input) // [..., d] //{ // LAUNCH_ACTIVATION_KERNEL(vllm::gelu_quick_kernel); //}