[general] version = "0.0.1" [torch] name = "quantization" src = [ "core/registration.h", "ext-torch/torch_binding.cpp", "ext-torch/torch_binding.h" ] include = [ "." ] pysrc = [ "ext-torch/__init__.py" ] [kernel.cutlass_w8a8] capabilities = [ "7.5", "8.0", "8.6", "8.7", "8.9", "9.0", "9.0a" ] src = [ "cutlass_w8a8/common.hpp", "cutlass_w8a8/scaled_mm_c2x.cu", "cutlass_w8a8/scaled_mm_c2x.cuh", "cutlass_w8a8/scaled_mm_c2x_sm75_dispatch.cuh", "cutlass_w8a8/scaled_mm_c2x_sm80_dispatch.cuh", "cutlass_w8a8/scaled_mm_c2x_sm89_fp8_dispatch.cuh", "cutlass_w8a8/scaled_mm_c2x_sm89_int8_dispatch.cuh", "cutlass_w8a8/scaled_mm_entry.cu", "cutlass_extensions/epilogue/scaled_mm_epilogues_c2x.hpp", "cutlass_extensions/epilogue/broadcast_load_epilogue_c2x.hpp", ] include = [ "." ] depends = [ "cutlass", "torch" ] [kernel.cutlass_w8a8_hopper] capabilities = [ "9.0", "9.0a" ] src = [ "cutlass_w8a8/common.hpp", "cutlass_w8a8/scaled_mm_c3x.cu", "cutlass_extensions/epilogue/scaled_mm_epilogues_c3x.hpp", "cutlass_extensions/epilogue/broadcast_load_epilogue_c3x.hpp", ] include = [ "." ] depends = [ "cutlass", "torch" ] [kernel.fp8_common] capabilities = [ "7.5", "8.0", "8.6", "8.7", "8.9", "9.0", "9.0a" ] src = [ "fp8/common.cu", "fp8/common.cuh", "dispatch_utils.h" ] include = [ "." ] depends = [ "torch" ] [kernel.fp8_marlin] capabilities = [ "8.0", "8.6", "8.7", "8.9", "9.0", "9.0a" ] src = [ "fp8/fp8_marlin.cu", "gptq_marlin/marlin.cuh", "gptq_marlin/marlin_dtypes.cuh", ] #include = [ "." ] depends = [ "torch" ] [kernel.int8_common] capabilities = [ "7.5", "8.0", "8.6", "8.7", "8.9", "9.0", "9.0a" ] src = [ "compressed_tensors/int8_quant_kernels.cu", "dispatch_utils.h" ] include = [ "." ] depends = [ "torch" ]