hip
stringlengths 140
3.32k
| cuda
stringlengths 84
3.33k
|
---|---|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/hip\HIPContext.h>
#include <hip/hip_runtime.h>
namespace at { namespace hip {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Threads per block for our apply kernel
// FIXME: use occupancy calculator instead
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <int step = 1>
inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
if (curDevice == -1) return false;
uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
if (numBlocks > maxGridX)
numBlocks = maxGridX;
grid = dim3(numBlocks);
return true;
}
constexpr int getApplyBlocksPerSM() {
return AT_APPLY_BLOCKS_PER_SM;
}
constexpr int getApplyBlockSize() {
return AT_APPLY_THREADS_PER_BLOCK;
}
inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
return dim3(max_threads_per_block);
}
}
}} // namespace at::cuda
### |
#include <ATen/cuda/CUDAContext.h>
#include <cuda_runtime.h>
namespace at { namespace cuda {
/**
Computes ceil(a / b)
*/
template <typename T>
__host__ __device__ __forceinline__ T ATenCeilDiv(T a, T b) {
return (a + b - 1) / b;
}
namespace {
// Threads per block for our apply kernel
// FIXME: use occupancy calculator instead
constexpr uint32_t AT_APPLY_THREADS_PER_BLOCK = 512;
constexpr uint32_t AT_APPLY_BLOCKS_PER_SM = 4;
template <int step = 1>
inline bool getApplyGrid(uint64_t totalElements, dim3& grid, int64_t curDevice, int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
if (curDevice == -1) return false;
uint64_t numel_per_thread = static_cast<uint64_t>(max_threads_per_block) * static_cast<uint64_t>(step);
uint64_t numBlocks = ATenCeilDiv(totalElements, numel_per_thread);
uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0];
if (numBlocks > maxGridX)
numBlocks = maxGridX;
grid = dim3(numBlocks);
return true;
}
constexpr int getApplyBlocksPerSM() {
return AT_APPLY_BLOCKS_PER_SM;
}
constexpr int getApplyBlockSize() {
return AT_APPLY_THREADS_PER_BLOCK;
}
inline dim3 getApplyBlock(int max_threads_per_block=AT_APPLY_THREADS_PER_BLOCK) {
return dim3(max_threads_per_block);
}
}
}} // namespace at::cuda
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/hip\HIPContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/repeat_interleave_native.h>
#endif
template <typename index_t>
__global__ static void compute_hip_kernel(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;
int warp_id = idx / C10_WARP_SIZE;
int tid_in_warp = idx % C10_WARP_SIZE;
for (int64_t i = warp_id; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
index_t repeat = repeat_ptr[i];
CUDA_KERNEL_ASSERT(repeat >= 0);
int64_t start = end - repeat;
for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {
result_ptr[j] = i;
}
}
}
template <typename index_t>
static void compute_hip(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
int64_t block = 512;
int64_t warps_per_block = block / at::cuda::warp_size();
int64_t grid =
std::min<int64_t>((size + warps_per_block - 1) / warps_per_block, 2048L);
hipLaunchKernelGGL(( compute_hip_kernel), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStream(),
repeat_ptr, cumsum_ptr, result_ptr, size, result_size);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
namespace at::native {
Tensor repeat_interleave_hip(
const Tensor& repeat,
c10::optional<int64_t> output_size) {
Tensor output;
AT_DISPATCH_INDEX_TYPES(
repeat.scalar_type(), "repeat_interleave_hip", [&]() {
output = repeat_interleave_common<index_t, compute_hip<index_t>>(
repeat, output_size);
});
return output;
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/native/Repeat.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/repeat_interleave_native.h>
#endif
template <typename index_t>
__global__ static void compute_cuda_kernel(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
CUDA_KERNEL_ASSERT(result_size == cumsum_ptr[size - 1]);
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t stride = (blockDim.x * gridDim.x) / C10_WARP_SIZE;
int warp_id = idx / C10_WARP_SIZE;
int tid_in_warp = idx % C10_WARP_SIZE;
for (int64_t i = warp_id; i < size; i += stride) {
int64_t end = cumsum_ptr[i];
index_t repeat = repeat_ptr[i];
CUDA_KERNEL_ASSERT(repeat >= 0);
int64_t start = end - repeat;
for (int64_t j = start + tid_in_warp; j < end; j += C10_WARP_SIZE) {
result_ptr[j] = i;
}
}
}
template <typename index_t>
static void compute_cuda(
index_t* repeat_ptr,
int64_t* cumsum_ptr,
index_t* result_ptr,
int64_t size,
int64_t result_size) {
int64_t block = 512;
int64_t warps_per_block = block / at::cuda::warp_size();
int64_t grid =
std::min<int64_t>((size + warps_per_block - 1) / warps_per_block, 2048L);
compute_cuda_kernel<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
repeat_ptr, cumsum_ptr, result_ptr, size, result_size);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
namespace at::native {
Tensor repeat_interleave_cuda(
const Tensor& repeat,
c10::optional<int64_t> output_size) {
Tensor output;
AT_DISPATCH_INDEX_TYPES(
repeat.scalar_type(), "repeat_interleave_cuda", [&]() {
output = repeat_interleave_common<index_t, compute_cuda<index_t>>(
repeat, output_size);
});
return output;
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_hip", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k0_name[] = "scaled_modified_bessel_k0_forward";
void scaled_modified_bessel_k0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k0_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k0_stub, &scaled_modified_bessel_k0_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_hip", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char scaled_modified_bessel_k1_name[] = "scaled_modified_bessel_k1_forward";
void scaled_modified_bessel_k1_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
jitted_gpu_kernel<scaled_modified_bessel_k1_name, scalar_t, scalar_t, 1>(iterator, scaled_modified_bessel_k1_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "scaled_modified_bessel_k1_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return scaled_modified_bessel_k1_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_scaled_modified_bessel_k1_stub, &scaled_modified_bessel_k1_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
void shifted_chebyshev_polynomial_t_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_t_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_t_name[] = "shifted_chebyshev_polynomial_t_forward";
void shifted_chebyshev_polynomial_t_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_t_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_t_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_t_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_t_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_t_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_t_stub, &shifted_chebyshev_polynomial_t_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_u_name[] = "shifted_chebyshev_polynomial_u_forward";
void shifted_chebyshev_polynomial_u_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_u_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_u_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_u_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_u_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_u_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_u_stub, &shifted_chebyshev_polynomial_u_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_v_name[] = "shifted_chebyshev_polynomial_v_forward";
void shifted_chebyshev_polynomial_v_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_v_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_v_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_v_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_v_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_v_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_v_stub, &shifted_chebyshev_polynomial_v_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
void shifted_chebyshev_polynomial_w_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_hip", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_w_kernel_hip
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char shifted_chebyshev_polynomial_w_name[] = "shifted_chebyshev_polynomial_w_forward";
void shifted_chebyshev_polynomial_w_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars<shifted_chebyshev_polynomial_w_name, scalar_t, scalar_t>(iterator, shifted_chebyshev_polynomial_w_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "shifted_chebyshev_polynomial_w_cuda", [&]() {
gpu_kernel_with_scalars(iterator, []GPU_LAMBDA(scalar_t x, scalar_t n) -> scalar_t {
return shifted_chebyshev_polynomial_w_forward<scalar_t, true>(x, n);
});
});
#endif
} // shifted_chebyshev_polynomial_w_kernel_cuda
} // namespace (anonymous)
REGISTER_DISPATCH(shifted_chebyshev_polynomial_w_stub, &shifted_chebyshev_polynomial_w_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim = self.dim();
// sort the strides in descending order according to its value,
// keeping dim the last.
std::vector<int64_t> strides = self.strides().vec();
strides[dim] = -1;
std::vector<int64_t> original_dim(ndim);
for (int64_t i = 0; i < ndim; i++) {
original_dim[i] = i;
}
thrust::stable_sort_by_key(
thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),
thrust::greater<int64_t>()
);
// generate contiguous strides on permuted dims
std::vector<int64_t> new_strides(ndim);
std::vector<int64_t> new_strides_unsort(ndim);
int64_t cumprod = 1;
for (int64_t i = 0; i < ndim; i++) {
new_strides[ndim - 1 - i] = cumprod;
cumprod *= self.sizes()[original_dim[ndim - 1 - i]];
}
// unsort new strides
for (int64_t i = 0; i < ndim; i++) {
new_strides_unsort[original_dim[i]] = new_strides[i];
}
return new_strides_unsort;
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <thrust/execution_policy.h>
#include <thrust/sort.h>
namespace at::native {
std::vector<int64_t> infer_dense_strides_dim_last(const Tensor & self, int64_t dim) {
int64_t ndim = self.dim();
// sort the strides in descending order according to its value,
// keeping dim the last.
std::vector<int64_t> strides = self.strides().vec();
strides[dim] = -1;
std::vector<int64_t> original_dim(ndim);
for (int64_t i = 0; i < ndim; i++) {
original_dim[i] = i;
}
thrust::stable_sort_by_key(
thrust::host, strides.data(), strides.data() + ndim, original_dim.data(),
thrust::greater<int64_t>()
);
// generate contiguous strides on permuted dims
std::vector<int64_t> new_strides(ndim);
std::vector<int64_t> new_strides_unsort(ndim);
int64_t cumprod = 1;
for (int64_t i = 0; i < ndim; i++) {
new_strides[ndim - 1 - i] = cumprod;
cumprod *= self.sizes()[original_dim[ndim - 1 - i]];
}
// unsort new strides
for (int64_t i = 0; i < ndim; i++) {
new_strides_unsort[original_dim[i]] = new_strides[i];
}
return new_strides_unsort;
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse_hip(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
}
Tensor& _sspaddmm_out_hip(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("NYI: HIP sspaddmm is not implemented");
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/sspaddmm_native.h>
#endif
namespace at::native {
// sparse, sparse, sparse, dense, real, real -> sparse
Tensor& _sspaddmm_out_only_sparse_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
}
Tensor& _sspaddmm_out_cuda(const Tensor& self,
const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
AT_ERROR("NYI: CUDA sspaddmm is not implemented");
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/hip/HIPMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_HIP char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
void spherical_bessel_j0_kernel_hip(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_hip", [&]() {
jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_hip", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return spherical_bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
#include <ATen/NumericUtils.h>
#include <c10/core/Scalar.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <c10/util/complex.h>
namespace at::native {
namespace {
CONSTEXPR_EXCEPT_WIN_CUDA char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward";
void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() {
jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() {
gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return spherical_bessel_j0_forward(a);
});
});
#endif // AT_USE_JITERATOR()
}
}
REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/hip/detail\TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace hip {
namespace detail {
TORCH_HIP_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
using at::native::canUse32BitIndexMath;
template <typename scalar, typename IndexType>
TensorInfo<scalar, IndexType>
getTensorInfo(const at::TensorBase &t) {
IndexType sz[MAX_TENSORINFO_DIMS];
IndexType st[MAX_TENSORINFO_DIMS];
int dims = t.dim();
for (int i = 0; i < dims; ++i) {
sz[i] = t.size(i);
st[i] = t.stride(i);
}
return TensorInfo<scalar, IndexType>(
t.data_ptr<scalar>(), dims, sz, st);
}
} // detail
} // cuda
} // at
### |
#pragma once
#include <ATen/core/TensorBase.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/native/CanUse32BitIndexMath.h>
namespace at {
namespace cuda {
namespace detail {
TORCH_CUDA_CU_API bool maybeOverlappingIndices(const at::TensorBase &t);
using at::native::canUse32BitIndexMath;
template <typename scalar, typename IndexType>
TensorInfo<scalar, IndexType>
getTensorInfo(const at::TensorBase &t) {
IndexType sz[MAX_TENSORINFO_DIMS];
IndexType st[MAX_TENSORINFO_DIMS];
int dims = t.dim();
for (int i = 0; i < dims; ++i) {
sz[i] = t.size(i);
st[i] = t.stride(i);
}
return TensorInfo<scalar, IndexType>(
t.data_ptr<scalar>(), dims, sz, st);
}
} // detail
} // cuda
} // at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: HIP on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void nextafter_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), "nextafter_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::nextafter(a, b);
});
});
}
void heaviside_kernel_hip(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a == 0 ? b : static_cast<scalar_t>(a > 0);
});
});
}
REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_hip);
REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/util/BFloat16-math.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at::native {
void nextafter_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND(kBFloat16, iter.common_dtype(), "nextafter_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return std::nextafter(a, b);
});
});
}
void heaviside_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBool, kBFloat16, iter.dtype(), "heaviside_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a == 0 ? b : static_cast<scalar_t>(a > 0);
});
});
}
REGISTER_DISPATCH(nextafter_stub, &nextafter_kernel_cuda);
REGISTER_DISPATCH(heaviside_stub, &heaviside_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char acosh_name[] = "acosh_impl";
#endif
void acosh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh_impl(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_hip",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acosh_name[] = "acosh_impl";
#endif
void acosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if(at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acosh_string = jiterator_stringify(
template <typename T>
T acosh_impl(T a) {
return std::acosh(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/ acosh_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, acosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "acosh_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "acosh_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acosh(a);
});
});
}
}
REGISTER_DISPATCH(acosh_stub, &acosh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char acos_name[] = "acos_impl";
#endif
void acos_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acos_string = jiterator_stringify(
template <typename T> T acos_impl(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char acos_name[] = "acos_impl";
#endif
void acos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto acos_string = jiterator_stringify(
template <typename T> T acos_impl(T a) { return std::acos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
jitted_gpu_kernel<
/*name=*/acos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, acos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "acos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::acos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"acos_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::acos(a);
});
});
}
}
REGISTER_DISPATCH(acos_stub, &acos_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char asinh_name[] = "asinh_impl";
#endif
void asinh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh_impl(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asinh_name[] = "asinh_impl";
#endif
void asinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asinh_string = jiterator_stringify(
template <typename T> T asinh_impl(T a) { return std::asinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/asinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"asinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asinh(a);
});
});
}
}
REGISTER_DISPATCH(asinh_stub, &asinh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char asin_name[] = "asin_impl";
#endif
void asin_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asin_string = jiterator_stringify(
template <typename T> T asin_impl(T a) { return std::asin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/asin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "asin_hip", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
REGISTER_DISPATCH(asin_stub, &asin_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char asin_name[] = "asin_impl";
#endif
void asin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto asin_string = jiterator_stringify(
template <typename T> T asin_impl(T a) { return std::asin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
jitted_gpu_kernel<
/*name=*/asin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, asin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "asin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::asin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, common_dtype, "asin_cuda", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::asin(a);
});
});
}
}
REGISTER_DISPATCH(asin_stub, &asin_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char atanh_name[] = "atanh_impl";
#endif
void atanh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atanh_string = jiterator_stringify(
template <typename T> T atanh_impl(T a) { return std::atanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/atanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"atanh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atanh_name[] = "atanh_impl";
#endif
void atanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atanh_string = jiterator_stringify(
template <typename T> T atanh_impl(T a) { return std::atanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/atanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, atanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "atanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"atanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atanh(a);
});
});
}
}
REGISTER_DISPATCH(atanh_stub, &atanh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char atan_name[] = "atan_impl";
#endif
void atan_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan_impl(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_hip",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
REGISTER_DISPATCH(atan_stub, &atan_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char atan_name[] = "atan_impl";
#endif
void atan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto atan_string = jiterator_stringify(
template <typename T>
T atan_impl(T a) {
return std::atan(a);
}
);
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
jitted_gpu_kernel<
/*name=*/ atan_name,
/*return_dtype=*/ scalar_t,
/*common_dtype=*/ scalar_t,
/*arity=*/ 1>(iter, atan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(kComplexHalf, common_dtype, "atan_name", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::atan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
common_dtype, "atan_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::atan(a);
});
});
}
}
REGISTER_DISPATCH(atan_stub, &atan_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char cosh_name[] = "cosh_impl";
#endif
void cosh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cosh_string = jiterator_stringify(
template <typename T> T cosh_impl(T a) { return std::cosh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/cosh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cosh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cosh_name[] = "cosh_impl";
#endif
void cosh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cosh_string = jiterator_stringify(
template <typename T> T cosh_impl(T a) { return std::cosh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
jitted_gpu_kernel<
/*name=*/cosh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cosh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cosh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cosh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cosh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::cosh(a);
});
});
}
}
REGISTER_DISPATCH(cosh_stub, &cosh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char cos_name[] = "cos_impl";
#endif // AT_USE_JITERATOR()
void cos_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cos_string = jiterator_stringify(
template <typename T> T cos_impl(T a) { return std::cos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/cos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cos_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });
});
}
}
REGISTER_DISPATCH(cos_stub, &cos_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char cos_name[] = "cos_impl";
#endif // AT_USE_JITERATOR()
void cos_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto cos_string = jiterator_stringify(
template <typename T> T cos_impl(T a) { return std::cos(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
jitted_gpu_kernel<
/*name=*/cos_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, cos_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "cos_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::cos(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"cos_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::cos(a); });
});
}
}
REGISTER_DISPATCH(cos_stub, &cos_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char sinh_name[] = "sinh_impl";
#endif
void sinh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sinh_string = jiterator_stringify(
template <typename T> T sinh_impl(T a) { return std::sinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/sinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sinh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sinh_name[] = "sinh_impl";
#endif
void sinh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sinh_string = jiterator_stringify(
template <typename T> T sinh_impl(T a) { return std::sinh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
jitted_gpu_kernel<
/*name=*/sinh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sinh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sinh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sinh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sinh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::sinh(a);
});
});
}
}
REGISTER_DISPATCH(sinh_stub, &sinh_kernel_cuda);
} // namespace at::native
### |
#pragma once
#include <assert.h>
#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <hip/hip_runtime.h>
#endif
namespace at {
namespace hip {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
template <typename Value>
struct IntDivider {
IntDivider() = default;
IntDivider(Value d) : divisor(d) { }
C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() = default;
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic);
}
C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
#if defined(__HIP_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor;
unsigned int m1;
unsigned int shift;
};
}}} ### |
#pragma once
#include <assert.h>
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
#include <cuda_runtime.h>
#endif
namespace at {
namespace cuda {
namespace detail {
template <typename Value>
struct DivMod {
Value div, mod;
C10_HOST_DEVICE DivMod(Value div, Value mod) : div(div), mod(mod) { }
};
template <typename Value>
struct IntDivider {
IntDivider() = default;
IntDivider(Value d) : divisor(d) { }
C10_HOST_DEVICE inline Value div(Value n) const { return n / divisor; }
C10_HOST_DEVICE inline Value mod(Value n) const { return n % divisor; }
C10_HOST_DEVICE inline DivMod<Value> divmod(Value n) const {
return DivMod<Value>(n / divisor, n % divisor);
}
Value divisor;
};
template <>
struct IntDivider<unsigned int> {
static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int.");
IntDivider() = default;
IntDivider(unsigned int d) : divisor(d) {
assert(divisor >= 1 && divisor <= INT32_MAX);
for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break;
uint64_t one = 1;
uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1;
m1 = magic;
assert(m1 > 0 && m1 == magic);
}
C10_HOST_DEVICE inline unsigned int div(unsigned int n) const {
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__)
unsigned int t = __umulhi(n, m1);
return (t + n) >> shift;
#else
uint64_t t = ((uint64_t) n * m1) >> 32;
return (t + n) >> shift;
#endif
}
C10_HOST_DEVICE inline unsigned int mod(unsigned int n) const {
return n - div(n) * divisor;
}
C10_HOST_DEVICE inline DivMod<unsigned int> divmod(unsigned int n) const {
unsigned int q = div(n);
return DivMod<unsigned int>(q, n - q * divisor);
}
unsigned int divisor;
unsigned int m1;
unsigned int shift;
};
}}}
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char sin_name[] = "sin_impl";
#endif
void sin_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sin_string = jiterator_stringify(
template <typename T> T sin_impl(T a) { return std::sin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/sin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sin_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });
});
}
}
REGISTER_DISPATCH(sin_stub, &sin_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char sin_name[] = "sin_impl";
#endif
void sin_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR()
static const auto sin_string = jiterator_stringify(
template <typename T> T sin_impl(T a) { return std::sin(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
jitted_gpu_kernel<
/*name=*/sin_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, sin_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "sin_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::sin(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"sin_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::sin(a); });
});
}
}
REGISTER_DISPATCH(sin_stub, &sin_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_hip",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh_impl";
#endif
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh_impl(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/hip\Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_HIP char tan_name[] = "tan_impl";
#endif
void tan_kernel_hip(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tan_string = jiterator_stringify(
template <typename T> T tan_impl(T a) { return std::tan(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/tan_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tan_hip",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });
});
}
}
REGISTER_DISPATCH(tan_stub, &tan_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
#if 0 && AT_USE_JITERATOR()
CONSTEXPR_EXCEPT_WIN_CUDA char tan_name[] = "tan_impl";
#endif
void tan_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
// Disabled due to accuracy issues
#if 0 && AT_USE_JITERATOR()
static const auto tan_string = jiterator_stringify(
template <typename T> T tan_impl(T a) { return std::tan(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
jitted_gpu_kernel<
/*name=*/tan_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tan_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tan_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tan(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tan_cuda",
[&]() {
gpu_kernel(
iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t { return ::tan(a); });
});
}
}
REGISTER_DISPATCH(tan_stub, &tan_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_hip_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
const bool return_counts);
} // namespace internal
} // namespace at
} // namespace native
### |
#include <ATen/core/Tensor.h>
namespace at {
namespace native {
namespace internal {
template <typename scalar_t>
std::tuple<Tensor, Tensor, Tensor> unique_cuda_template(
const Tensor& self,
const bool consecutive,
const bool return_inverse,
const bool return_counts);
} // namespace internal
} // namespace at
} // namespace native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/hip\Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct HIPKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
}
void _validate_compressed_sparse_indices_hip(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
validate_compressed_sparse_indices_kernel<HIPKernelLauncher>(
is_crow, cidx, idx, cdim, dim, nnz);
}
} // namespace at::native
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/ValidateCompressedIndicesCommon.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at::native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
}
void _validate_compressed_sparse_indices_cuda(
const bool is_crow,
const Tensor& cidx,
const Tensor& idx,
const int64_t cdim,
const int64_t dim,
const int64_t nnz) {
validate_compressed_sparse_indices_kernel<CUDAKernelLauncher>(
is_crow, cidx, idx, cdim, dim, nnz);
}
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/hip\JitLoops.cuh>
#include <ATen/native/hip\Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/hip\Math.cuh>
#include <ATen/native/hip\jit_utils.h>
namespace at::native {
namespace {
/*
* This function is derived from the implementation of the zeta function in the Cephes Math Library.
* See note [3-Clause BSD License for the Cephes Math Library].
*/
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_HIP char zeta_name[] = "zeta";
void zeta_kernel_hip(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_hip", [&]() {
opmath_jitted_gpu_kernel_with_scalars</*name=*/zeta_name,
/*return_dtype=*/ scalar_t,
/*f_inputs_dtype=*/ scalar_t>(iter, zeta_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_hip", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_hip=*/true>(x, q);
});
});
#endif //jiterator
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_hip);
} // namespace at::native
### |
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/JitLoops.cuh>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/native/cuda/jit_utils.h>
namespace at::native {
namespace {
/*
* This function is derived from the implementation of the zeta function in the Cephes Math Library.
* See note [3-Clause BSD License for the Cephes Math Library].
*/
// See note [Jiterator]
CONSTEXPR_EXCEPT_WIN_CUDA char zeta_name[] = "zeta";
void zeta_kernel_cuda(TensorIteratorBase& iter) {
#if AT_USE_JITERATOR()
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
opmath_jitted_gpu_kernel_with_scalars</*name=*/zeta_name,
/*return_dtype=*/ scalar_t,
/*f_inputs_dtype=*/ scalar_t>(iter, zeta_string);
});
#else
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_cuda=*/true>(x, q);
});
});
#endif //jiterator
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);
} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_hip_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
self.scalar_type(), "qrelu_hip", [&]() {
auto iter = TensorIterator::unary_op(self, self);
gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {
return scalar_t(std::max<underlying_t>(value.val_, zero_point));
});
});
return self;
}
} // namespace at::native
} // namespace at
### |
#include <ATen/ATen.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
Tensor& relu_quantized_cuda_(Tensor& self) {
const auto zero_point = self.q_zero_point();
AT_DISPATCH_QINT_TYPES(
self.scalar_type(), "qrelu_cuda", [&]() {
auto iter = TensorIterator::unary_op(self, self);
gpu_kernel(iter, [zero_point] GPU_LAMBDA(scalar_t value) -> scalar_t {
return scalar_t(std::max<underlying_t>(value.val_, zero_point));
});
});
return self;
}
} // namespace at::native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/int_repr_native.h>
#endif
namespace at {
namespace native {
Tensor int_repr_quantized_hip(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quantized_hip", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {
return value.val_;
});
});
return dst;
}
} // namespace native
} // namespace at
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/empty.h>
#include <ATen/ops/int_repr_native.h>
#endif
namespace at {
namespace native {
Tensor int_repr_quantized_cuda(const Tensor& self) {
Tensor dst;
AT_DISPATCH_QINT_TYPES(self.scalar_type(), "int_repr_quantized_cuda", [&]() {
dst = at::empty(
self.sizes(),
self.options().dtype(UNDERLYING_TYPE),
self.suggest_memory_format());
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t value) -> underlying_t {
return value.val_;
});
});
return dst;
}
} // namespace native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/hip\Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/empty.h>
#endif
namespace at {
namespace native {
void assign_quantized_tensor_hip(
const Tensor& self, Tensor& dst) {
AT_DISPATCH_QINT_TYPES(
dst.scalar_type(), "assign_quantized_tensor_hip", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {
return scalar_t(value);
});
});
}
Tensor make_per_tensor_quantized_tensor_hip(
const Tensor& self,
double scale,
int64_t zero_point) {
Tensor dst = at::_empty_affine_quantized(
self.sizes(),
self.options().dtype(toQIntType(self.scalar_type())),
scale,
zero_point);
assign_quantized_tensor_hip(self, dst);
return dst;
}
Tensor make_per_channel_quantized_tensor_hip(
const Tensor& self,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis) {
Tensor dst = at::_empty_per_channel_affine_quantized(
self.sizes(),
scales,
zero_points,
axis,
self.options().dtype(toQIntType(self.scalar_type())));
assign_quantized_tensor_hip(self, dst);
return dst;
}
} // namespace native
} // namespace at
### |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_empty_affine_quantized.h>
#include <ATen/ops/_empty_per_channel_affine_quantized.h>
#include <ATen/ops/_make_per_channel_quantized_tensor_native.h>
#include <ATen/ops/_make_per_tensor_quantized_tensor_native.h>
#include <ATen/ops/empty.h>
#endif
namespace at {
namespace native {
void assign_quantized_tensor_cuda(
const Tensor& self, Tensor& dst) {
AT_DISPATCH_QINT_TYPES(
dst.scalar_type(), "assign_quantized_tensor_cuda", [&]() {
auto iter = TensorIteratorConfig()
.check_all_same_dtype(false)
.add_output(dst)
.add_input(self)
.build();
gpu_kernel(iter, [] GPU_LAMBDA(underlying_t value) -> scalar_t {
return scalar_t(value);
});
});
}
Tensor make_per_tensor_quantized_tensor_cuda(
const Tensor& self,
double scale,
int64_t zero_point) {
Tensor dst = at::_empty_affine_quantized(
self.sizes(),
self.options().dtype(toQIntType(self.scalar_type())),
scale,
zero_point);
assign_quantized_tensor_cuda(self, dst);
return dst;
}
Tensor make_per_channel_quantized_tensor_cuda(
const Tensor& self,
const Tensor& scales,
const Tensor& zero_points,
int64_t axis) {
Tensor dst = at::_empty_per_channel_affine_quantized(
self.sizes(),
scales,
zero_points,
axis,
self.options().dtype(toQIntType(self.scalar_type())));
assign_quantized_tensor_cuda(self, dst);
return dst;
}
} // namespace native
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
#pragma once
#include <ATen/hip/detail\TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_hip_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);
}} // namespace at::native
### |
#pragma once
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <c10/macros/Macros.h>
namespace at {
class Tensor;
}
namespace c10 {
class Scalar;
}
namespace at { namespace native {
void s_addmm_out_sparse_dense_cuda_worker(int64_t nnz, int64_t m, int64_t n, int64_t k, Tensor& r_, const Scalar& beta, const Tensor& t, const Scalar& alpha, Tensor& indices, Tensor& values, const Tensor& dense);
}} // namespace at::native
### |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state values. Passed as a kernel argument.
// See Note [HIP Graph-safe RNG states].
//
// The raw definition lives in its own file so jit codegen can easily copy it.
namespace at {
struct PhiloxHipState {
PhiloxHipState() = default;
// Called if graph capture is not underway
PhiloxHipState(uint64_t seed,
uint64_t offset) {
seed_.val = seed;
offset_.val = offset;
}
// Called if graph capture is underway
PhiloxHipState(int64_t* seed,
int64_t* offset_extragraph,
uint32_t offset_intragraph) {
seed_.ptr = seed;
offset_.ptr = offset_extragraph;
offset_intragraph_ = offset_intragraph;
captured_ = true;
}
// Public members, directly accessible by at::cuda::philox::unpack.
// If we made them private with getters/setters, the getters/setters
// would have to be __device__, and we can't declare __device__ in ATen.
union Payload {
uint64_t val;
int64_t* ptr;
};
Payload seed_;
Payload offset_;
uint32_t offset_intragraph_ = 0;
bool captured_ = false;
};
} // namespace at
### |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGeneratorImpl.h>, which has a #pragma once.
// Stores RNG state values. Passed as a kernel argument.
// See Note [CUDA Graph-safe RNG states].
//
// The raw definition lives in its own file so jit codegen can easily copy it.
namespace at {
struct PhiloxCudaState {
PhiloxCudaState() = default;
// Called if graph capture is not underway
PhiloxCudaState(uint64_t seed,
uint64_t offset) {
seed_.val = seed;
offset_.val = offset;
}
// Called if graph capture is underway
PhiloxCudaState(int64_t* seed,
int64_t* offset_extragraph,
uint32_t offset_intragraph) {
seed_.ptr = seed;
offset_.ptr = offset_extragraph;
offset_intragraph_ = offset_intragraph;
captured_ = true;
}
// Public members, directly accessible by at::cuda::philox::unpack.
// If we made them private with getters/setters, the getters/setters
// would have to be __device__, and we can't declare __device__ in ATen.
union Payload {
uint64_t val;
int64_t* ptr;
};
Payload seed_;
Payload offset_;
uint32_t offset_intragraph_ = 0;
bool captured_ = false;
};
} // namespace at
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim128(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim32(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, hipStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
auto dprops = at::cuda::getCurrentDeviceProperties();
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
if ((dprops->major == 8 && dprops->minor == 0) ||
(dprops->major == 9 && dprops->minor == 0)) {
// Don't share smem for K & V, and don't keep V in registers
// This speeds things up by 2-3% by avoiding register spills, but it
// uses more shared memory, which is fine on A100 and H100 but not other
// GPUs. For other GPUs, we keep V in registers.
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 8 && dprops->minor > 0) {
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 7 && dprops->minor == 5) {
using Kernel_traits =
FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_bwd_launch_template.h>
void run_fmha_bwd_hdim64(FMHA_dgrad_params ¶ms, cudaStream_t stream, const bool configure) {
FP16_SWITCH(params.is_bf16, ([&] {
auto dprops = at::cuda::getCurrentDeviceProperties();
if (params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (params.seqlen_k >= 256) {
if ((dprops->major == 8 && dprops->minor == 0) ||
(dprops->major == 9 && dprops->minor == 0)) {
// Don't share smem for K & V, and don't keep V in registers
// This speeds things up by 2-3% by avoiding register spills, but it
// uses more shared memory, which is fine on A100 and H100 but not other
// GPUs. For other GPUs, we keep V in registers.
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x100u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 8 && dprops->minor > 0) {
using Kernel_traits =
FMHA_kernel_traits<256, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
} else if (dprops->major == 7 && dprops->minor == 5) {
using Kernel_traits =
FMHA_kernel_traits<128, 64, 16, 1, 8, 0x08u, elem_type>;
run_fmha_bwd_loop<Kernel_traits>(params, stream, configure);
}
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim128(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
using Kernel_traits = FMHA_kernel_traits<128, 128, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim32(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 32, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/hip/flash_attn\fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// Copyright (c) 2022, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
#include <ATen/native/transformers/cuda/flash_attn/fmha_fwd_launch_template.h>
void run_fmha_fwd_hdim64(Launch_params<FMHA_fprop_params> &launch_params) {
FP16_SWITCH(launch_params.params.is_bf16, ([&] {
if (launch_params.params.seqlen_k == 128) {
using Kernel_traits = FMHA_kernel_traits<128, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
} else if (launch_params.params.seqlen_k >= 256) {
using Kernel_traits = FMHA_kernel_traits<256, 64, 16, 1, 4, 0x08u, elem_type>;
run_fmha_fwd_loop<Kernel_traits>(launch_params);
}
}));
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 128, 128, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x128_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k128_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k32_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64, true>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_seqaligned_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace hip {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduceDim(int dim);
int collapseDims(const int excludeDim = -1);
__host__ __device__ inline bool isContiguous() const {
return (dims == 1 && strides[0] == 1);
}
T* data;
IndexType sizes[MAX_TENSORINFO_DIMS];
IndexType strides[MAX_TENSORINFO_DIMS];
int dims;
};
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo() {
data = nullptr;
dims = 0;
}
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {
data = p;
dims = dim;
TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "HIP Tensors cannot have more than 25 dimensions");
for (int i = 0; i < dim; ++i) {
sizes[i] = sz[i];
strides[i] = st[i];
}
}
template <typename T, typename IndexType>
void
TensorInfo<T, IndexType>::reduceDim(int dim) {
TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
sizes[dim] = 1;
}
template <typename T, typename IndexType>
int
TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
dims = std::get<1>(result);
return std::get<0>(result);
}
template <typename T, typename IndexType, int Dims>
struct IndexToOffset {
static __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = Dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
template <typename T, typename IndexType>
struct IndexToOffset<T, IndexType, -1> {
static inline __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = info.dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
}
}
} ### |
#pragma once
#include <ATen/CollapseDims.h>
namespace at {
namespace cuda {
namespace detail {
#define MAX_TENSORINFO_DIMS 25
template <typename T, typename IndexType>
struct TensorInfo {
TensorInfo();
TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]);
void reduceDim(int dim);
int collapseDims(const int excludeDim = -1);
__host__ __device__ inline bool isContiguous() const {
return (dims == 1 && strides[0] == 1);
}
T* data;
IndexType sizes[MAX_TENSORINFO_DIMS];
IndexType strides[MAX_TENSORINFO_DIMS];
int dims;
};
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo() {
data = nullptr;
dims = 0;
}
template <typename T, typename IndexType>
TensorInfo<T, IndexType>::TensorInfo(T* p, int dim, IndexType sz[MAX_TENSORINFO_DIMS], IndexType st[MAX_TENSORINFO_DIMS]) {
data = p;
dims = dim;
TORCH_CHECK(dims < MAX_TENSORINFO_DIMS, "CUDA Tensors cannot have more than 25 dimensions");
for (int i = 0; i < dim; ++i) {
sizes[i] = sz[i];
strides[i] = st[i];
}
}
template <typename T, typename IndexType>
void
TensorInfo<T, IndexType>::reduceDim(int dim) {
TORCH_CHECK(dim < dims && dim >= 0, "expected dim between 0 and dims - 1");
sizes[dim] = 1;
}
template <typename T, typename IndexType>
int
TensorInfo<T, IndexType>::collapseDims(const int excludeDim) {
auto result = at::collapse_dims(sizes, strides, dims, excludeDim);
dims = std::get<1>(result);
return std::get<0>(result);
}
template <typename T, typename IndexType, int Dims>
struct IndexToOffset {
static __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = Dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
template <typename T, typename IndexType>
struct IndexToOffset<T, IndexType, -1> {
static inline __host__ __device__ IndexType get(
IndexType linearId, const TensorInfo<T, IndexType>& info) {
IndexType offset = 0;
for (int i = info.dims - 1; i > 0; --i) {
IndexType curDimIndex = linearId % info.sizes[i];
IndexType curDimOffset = curDimIndex * info.strides[i];
offset += curDimOffset;
linearId /= info.sizes[i];
}
return offset + linearId * info.strides[0];
}
};
}
}
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, true, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k64_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 128, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, true, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_64x64_k65536_dropout_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_bf16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::bfloat16_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_bf16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 800
#if __HIP_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__HIP_ARCH__ + 0) / 10);
#endif
}
### |
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// This file is auto-generated. See "generate_kernels.py"
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kNumThreads,
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::kMinBlocksPerSm)
fmha_cutlassB_f16_aligned_128x64_k96_sm80(typename AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 800
#if __CUDA_ARCH__ < 1000
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm80, cutlass::half_t, true, false, true, 128, 64, 96>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f16_aligned_128x64_k96_sm80` is for sm80-sm100, but was built for sm%d\n",
int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 128>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k128_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 32>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k32_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/hip\HIPGraphsUtils.cuh>, which has a #pragma once.
namespace at {
namespace hip {
namespace philox {
// In-kernel call to retrieve philox seed and offset from a PhiloxHipState instance whether
// that instance was created with graph capture underway or not.
// See Note [HIP Graph-safe RNG states].
//
// We can't write a __device__ function in HIPGeneratorImpl.h, because it's in ATen.
// Also, whatever call unpacks PhiloxHipState in consumer kernels must be inlineable.
// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
//
// The raw definition lives in its own file so jit codegen can easily copy it.
__device__ __forceinline__ std::tuple<uint64_t, uint64_t>
unpack(at::PhiloxHipState arg) {
if (arg.captured_) {
// static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
// *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
// For most threads' reads it will hit in cache, so it shouldn't hurt performance.
return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
} else {
return std::make_tuple(arg.seed_.val, arg.offset_.val);
}
}
} // namespace philox
} // namespace hip
} // namespace at
### |
// No "#pragma once" because this is a raw definition that can be copied by jit codegen.
// Eager mode clients should not include this file directly, instead,
// they should #include <ATen/cuda/CUDAGraphsUtils.cuh>, which has a #pragma once.
namespace at {
namespace cuda {
namespace philox {
// In-kernel call to retrieve philox seed and offset from a PhiloxCudaState instance whether
// that instance was created with graph capture underway or not.
// See Note [CUDA Graph-safe RNG states].
//
// We can't write a __device__ function in CUDAGeneratorImpl.h, because it's in ATen.
// Also, whatever call unpacks PhiloxCudaState in consumer kernels must be inlineable.
// Easiest thing that comes to mind is, define a __device__ unpack helper here, in ATen/cuda.
//
// The raw definition lives in its own file so jit codegen can easily copy it.
__device__ __forceinline__ std::tuple<uint64_t, uint64_t>
unpack(at::PhiloxCudaState arg) {
if (arg.captured_) {
// static_cast avoids "warning: invalid narrowing conversion from "long" to "unsigned long".
// *(arg.offset_.ptr) is a broadcast load of a single int64_t to the entire kernel.
// For most threads' reads it will hit in cache, so it shouldn't hurt performance.
return std::make_tuple(static_cast<uint64_t>(*arg.seed_.ptr), static_cast<uint64_t>(*(arg.offset_.ptr) + arg.offset_intragraph_));
} else {
return std::make_tuple(arg.seed_.val, arg.offset_.val);
}
}
} // namespace philox
} // namespace cuda
} // namespace at
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, true, false, 64, 64, 64>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k64_dropout_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
#include "hip/hip_runtime.h"
#include <ATen/native/transformers/hip/mem_eff_attention\kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 500
#if __HIP_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 700
#if __HIP_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __HIP_ARCH__
#if __HIP_ARCH__ >= 750
#if __HIP_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__HIP_ARCH__ + 0) / 10);
#endif
}### |
#include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h>
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm50(typename AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 500
#if __CUDA_ARCH__ < 700
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm50, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm70(typename AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 700
#if __CUDA_ARCH__ < 750
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm70, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
__global__ void __launch_bounds__(
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kNumThreads, AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::kMinBlocksPerSm)
fmha_cutlassB_f32_notaligned_64x64_k65536_sm75(typename AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::Params p) {
#ifdef __CUDA_ARCH__
#if __CUDA_ARCH__ >= 750
#if __CUDA_ARCH__ < 800
if (!p.advance_to_block()) {
return;
}
AttentionBackwardKernel<cutlass::arch::Sm75, float, false, false, false, 64, 64, 65536>::attention_kernel(p);
return;
#endif
#endif
printf(
"FATAL: kernel `fmha_cutlassB_f32_notaligned_64x64_k65536_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10);
#endif
}
### |
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <c10/hip/HIPException.h>
int safeDeviceCount() {
int count;
hipError_t err = hipGetDeviceCount(&count);
if (err == hipErrorInsufficientDriver || err == hipErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void HIP##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
hipDeviceSynchronize(); \
hipLaunchKernelGGL(( HIP##a##b), dim3(1), dim3(1), 0, 0, ); \
C10_HIP_KERNEL_LAUNCH_CHECK(); \
hipDeviceSynchronize(); \
ASSERT_EQ(hipGetLastError(), hipSuccess); \
} \
__global__ void HIP##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
### |
#include <gtest/gtest.h>
#include <c10/cuda/CUDAException.h>
int safeDeviceCount() {
int count;
cudaError_t err = cudaGetDeviceCount(&count);
if (err == cudaErrorInsufficientDriver || err == cudaErrorNoDevice) {
return 0;
}
return count;
}
#define SKIP_IF_NO_GPU() \
do { \
if (safeDeviceCount() == 0) { \
return; \
} \
} while(0)
#define C10_ASSERT_NEAR(a, b, tol) assert(abs(a - b) < tol)
#define C10_DEFINE_TEST(a, b) \
__global__ void CUDA##a##b(); \
TEST(a##Device, b) { \
SKIP_IF_NO_GPU(); \
cudaDeviceSynchronize(); \
CUDA##a##b<<<1, 1>>>(); \
C10_CUDA_KERNEL_LAUNCH_CHECK(); \
cudaDeviceSynchronize(); \
ASSERT_EQ(cudaGetLastError(), cudaSuccess); \
} \
__global__ void CUDA##a##b()
#include <c10/test/util/complex_math_test_common.h>
#undef C10_DEFINE_TEST
#undef C10_ASSERT_NEAR
#define C10_DEFINE_TEST(a, b) TEST(a##Host, b)
#define C10_ASSERT_NEAR(a, b, tol) ASSERT_NEAR(a, b, tol)
#include <c10/test/util/complex_math_test_common.h>
### |
// !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/hip\HIPContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestHIP) {
if (!at::cuda::is_available()) return;
c10::optional<int64_t> trivially_destructible;
c10::optional<std::vector<int64_t>> non_trivially_destructible;
ASSERT_FALSE(trivially_destructible.has_value());
ASSERT_FALSE(non_trivially_destructible.has_value());
trivially_destructible = {5};
non_trivially_destructible = std::vector<int64_t>{5, 10};
ASSERT_TRUE(trivially_destructible.has_value());
ASSERT_TRUE(non_trivially_destructible.has_value());
}
### |
#include <gtest/gtest.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Optional.h>
#include <assert.h>
using namespace at;
// optional in cuda files
TEST(OptionalTest, OptionalTestCUDA) {
if (!at::cuda::is_available()) return;
c10::optional<int64_t> trivially_destructible;
c10::optional<std::vector<int64_t>> non_trivially_destructible;
ASSERT_FALSE(trivially_destructible.has_value());
ASSERT_FALSE(non_trivially_destructible.has_value());
trivially_destructible = {5};
non_trivially_destructible = std::vector<int64_t>{5, 10};
ASSERT_TRUE(trivially_destructible.has_value());
ASSERT_TRUE(non_trivially_destructible.has_value());
}
### |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 7