Dataset Viewer
python_code
stringlengths 0
456k
|
---|
#!/usr/bin/env python
import confu
parser = confu.standard_parser()
parser.add_argument("--backend", dest="backend", default="auto",
choices=["auto", "psimd", "scalar"])
parser.add_argument("--inference-only", dest="inference_only", default=False,
action="store_true",
help="Build only inference/forward pass functions to reduce library size")
parser.add_argument("--convolution-only", dest="convolution_only", default=False,
action="store_true",
help="Build only convolution functions to reduce library size")
def main(args):
options = parser.parse_args(args)
backend = options.backend
if backend == "auto":
if options.target.is_x86_64:
backend = "x86_64"
elif options.target.is_arm or options.target.is_arm64:
backend = "arm"
elif options.target.is_emscripten:
backend = "scalar"
else:
backend = "psimd"
build = confu.Build.from_options(options)
macros = dict()
if backend == "psimd":
macros["NNP_BACKEND_PSIMD"] = 1
if backend == "scalar":
macros["NNP_BACKEND_SCALAR"] = 1
export_macros = dict()
export_macros["NNP_CONVOLUTION_ONLY"] = int(options.convolution_only)
export_macros["NNP_INFERENCE_ONLY"] = int(options.inference_only)
macros.update(export_macros)
build.export_cpath("include", ["nnpack.h"])
with build.options(source_dir="src", macros=macros,
deps={
(build.deps.pthreadpool, build.deps.cpuinfo, build.deps.fxdiv, build.deps.fp16): any,
build.deps.psimd: backend == "psimd" or backend == "arm",
},
extra_include_dirs={
("src", "src/ref"): any,
"src/x86_64-fma": options.target.is_x86_64
}):
nnpack_objects = [
build.cc("init.c"),
build.cc("convolution-inference.c"),
]
if not options.convolution_only:
# Fully-connected, pooling, Softmax, ReLU layers
nnpack_objects += [
build.cc("fully-connected-inference.c"),
build.cc("pooling-output.c"),
build.cc("softmax-output.c"),
build.cc("relu-output.c"),
]
if not options.inference_only:
# Training functions for fully-connected and ReLU layers
nnpack_objects += [
build.cc("fully-connected-output.c"),
build.cc("relu-input-gradient.c"),
]
if not options.inference_only:
# Training functions for convolutional layer
nnpack_objects += [
build.cc("convolution-output.c"),
build.cc("convolution-input-gradient.c"),
build.cc("convolution-kernel-gradient.c"),
]
if backend == "x86_64":
arch_nnpack_objects = [
# Transformations
build.peachpy("x86_64-fma/2d-fourier-8x8.py"),
build.peachpy("x86_64-fma/2d-fourier-16x16.py"),
build.peachpy("x86_64-fma/2d-winograd-8x8-3x3.py"),
# Tuple GEMM
build.peachpy("x86_64-fma/blas/s8gemm.py"),
build.peachpy("x86_64-fma/blas/c8gemm.py"),
build.peachpy("x86_64-fma/blas/s4c6gemm.py"),
# Direct convolution
build.peachpy("x86_64-fma/blas/conv1x1.py"),
# BLAS microkernels
build.peachpy("x86_64-fma/blas/sgemm.py"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.peachpy("x86_64-fma/softmax.py"),
build.cc("x86_64-fma/softmax.c"),
build.peachpy("x86_64-fma/relu.py"),
# Pooling
build.peachpy("x86_64-fma/max-pooling.py"),
# BLAS microkernels
build.peachpy("x86_64-fma/blas/sdotxf.py"),
build.peachpy("x86_64-fma/blas/shdotxf.py"),
]
elif backend == "scalar":
arch_nnpack_objects = [
# Transformations
build.cc("scalar/2d-fourier-8x8.c"),
build.cc("scalar/2d-fourier-16x16.c"),
build.cc("scalar/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("scalar/blas/s2gemm.c"),
build.cc("scalar/blas/cgemm-conjb.c"),
# Direct convolution
build.cc("scalar/blas/conv1x1.c"),
# BLAS microkernels
build.cc("scalar/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Tuple GEMM
build.cc("scalar/blas/s2gemm-transc.c"),
build.cc("scalar/blas/cgemm.c"),
build.cc("scalar/blas/cgemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.cc("scalar/relu.c"),
build.cc("scalar/softmax.c"),
# BLAS microkernels
build.cc("scalar/blas/sdotxf.c"),
build.cc("scalar/blas/shdotxf.c"),
]
elif backend == "arm":
from confu import arm
with build.options(isa=arm.neon+arm.fp16 if options.target.is_arm else None):
arch_nnpack_objects = [
# Transformations
build.cc("psimd/2d-fourier-8x8.c"),
build.cc("psimd/2d-fourier-16x16.c"),
build.cc("neon/2d-winograd-8x8-3x3.c"),
build.cc("neon/2d-winograd-8x8-3x3-fp16.c"),
# Tuple GEMM
build.cc("neon/blas/h4gemm.c"),
build.cc("neon/blas/s4gemm.c"),
build.cc("neon/blas/c4gemm-conjb.c"),
build.cc("neon/blas/s4c2gemm-conjb.c"),
# Direct convolution
build.cc("neon/blas/conv1x1.c"),
# BLAS microkernels
build.cc("neon/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Transformations
build.cc("psimd/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("neon/blas/c4gemm.c"),
build.cc("neon/blas/s4c2gemm.c"),
build.cc("neon/blas/c4gemm-conjb-transc.c"),
build.cc("neon/blas/s4c2gemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# ReLU and Softmax
build.cc("neon/relu.c"),
build.cc("psimd/softmax.c"),
# BLAS microkernels
build.cc("neon/blas/sdotxf.c"),
build.cc("psimd/blas/shdotxf.c"),
]
if options.target.is_arm:
# Functions implemented in assembly
arch_nnpack_objects += [
build.cc("neon/blas/h4gemm-aarch32.S"),
build.cc("neon/blas/s4gemm-aarch32.S"),
build.cc("neon/blas/sgemm-aarch32.S"),
]
elif backend == "psimd":
arch_nnpack_objects = [
# Transformations
build.cc("psimd/2d-fourier-8x8.c"),
build.cc("psimd/2d-fourier-16x16.c"),
build.cc("psimd/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("psimd/blas/s4gemm.c"),
build.cc("psimd/blas/c4gemm-conjb.c"),
build.cc("psimd/blas/s4c2gemm-conjb.c"),
# Direct convolution
build.cc("psimd/blas/conv1x1.c"),
# BLAS microkernels
build.cc("psimd/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Tuple GEMM
build.cc("psimd/blas/c4gemm.c"),
build.cc("psimd/blas/s4c2gemm.c"),
build.cc("psimd/blas/c4gemm-conjb-transc.c"),
build.cc("psimd/blas/s4c2gemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.cc("psimd/relu.c"),
build.cc("psimd/softmax.c"),
# BLAS microkernels
build.cc("psimd/blas/sdotxf.c"),
build.cc("psimd/blas/shdotxf.c"),
]
reference_layer_objects = [
build.cc("ref/convolution-output.c"),
build.cc("ref/convolution-input-gradient.c"),
build.cc("ref/convolution-kernel.c"),
build.cc("ref/fully-connected-output.c"),
build.cc("ref/max-pooling-output.c"),
build.cc("ref/softmax-output.c"),
build.cc("ref/relu-output.c"),
build.cc("ref/relu-input-gradient.c"),
]
reference_fft_objects = [
build.cc("ref/fft/aos.c"),
build.cc("ref/fft/soa.c"),
build.cc("ref/fft/forward-real.c"),
build.cc("ref/fft/forward-dualreal.c"),
build.cc("ref/fft/inverse-real.c"),
build.cc("ref/fft/inverse-dualreal.c"),
]
if backend == "x86_64":
arch_fft_stub_objects = [
build.peachpy("x86_64-fma/fft-soa.py"),
build.peachpy("x86_64-fma/fft-aos.py"),
build.peachpy("x86_64-fma/fft-dualreal.py"),
build.peachpy("x86_64-fma/ifft-dualreal.py"),
build.peachpy("x86_64-fma/fft-real.py"),
build.peachpy("x86_64-fma/ifft-real.py"),
]
arch_winograd_stub_objects = [
build.peachpy("x86_64-fma/winograd-f6k3.py"),
]
arch_math_stub_objects = [
]
elif backend == "scalar":
arch_fft_stub_objects = [
build.cc("scalar/fft-aos.c"),
build.cc("scalar/fft-soa.c"),
build.cc("scalar/fft-real.c"),
build.cc("scalar/fft-dualreal.c"),
]
arch_winograd_stub_objects = [
build.cc("scalar/winograd-f6k3.c"),
]
elif backend == "psimd" or backend == "arm":
arch_fft_stub_objects = [
build.cc("psimd/fft-aos.c"),
build.cc("psimd/fft-soa.c"),
build.cc("psimd/fft-real.c"),
build.cc("psimd/fft-dualreal.c"),
]
if backend == "psimd":
arch_winograd_stub_objects = [
build.cc("psimd/winograd-f6k3.c"),
]
else:
# ARM NEON Winograd transform optionally uses FP16 storage
with build.options(isa=arm.neon+arm.fp16 if options.target.is_arm else None):
arch_winograd_stub_objects = [
build.cc("neon/winograd-f6k3.c"),
]
arch_math_stub_objects = [
build.cc("psimd/exp.c"),
]
fft_objects = reference_fft_objects + arch_fft_stub_objects
nnpack_objects = nnpack_objects + arch_nnpack_objects
build.static_library("nnpack", nnpack_objects)
# Build tests for micro-kernels. Link to the micro-kernels implementations
with build.options(source_dir="test", extra_include_dirs="test",
deps={
(build.deps.googletest, build.deps.cpuinfo, build.deps.clog, build.deps.fp16): any,
"log": build.target.is_android}):
build.unittest("fourier-reference-test",
reference_fft_objects + [build.cxx("fourier/reference.cc")])
if backend == "x86_64":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/x86_64-avx2.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/x86_64-fma3.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/x86_64-fma3.cc")])
elif backend == "psimd":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/psimd.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/psimd.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/psimd.cc")])
elif backend == "arm":
# No ARM-specific Fourier implementation; use PSIMD
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/psimd.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/neon.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/neon.cc")])
build.smoketest("sxgemm-test",
arch_nnpack_objects + [build.cxx("sxgemm/neon.cc")])
build.smoketest("hxgemm-test",
arch_nnpack_objects + [build.cxx("hxgemm/neon.cc")])
elif backend == "scalar":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/scalar.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/scalar.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/scalar.cc")])
# Build test for layers. Link to the library.
with build.options(source_dir="test", include_dirs="test", deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog, build.deps.googletest.core, build.deps.fp16): any,
"rt": build.target.is_linux,
"log": build.target.is_android,
}):
if not options.inference_only:
build.smoketest("convolution-output-smoketest",
reference_layer_objects + [build.cxx("convolution-output/smoke.cc")])
build.unittest("convolution-output-alexnet-test",
reference_layer_objects + [build.cxx("convolution-output/alexnet.cc")])
build.unittest("convolution-output-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-output/vgg-a.cc")])
build.unittest("convolution-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-output/overfeat-fast.cc")])
build.smoketest("convolution-input-gradient-smoketest",
reference_layer_objects + [build.cxx("convolution-input-gradient/smoke.cc")])
build.unittest("convolution-input-gradient-alexnet-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/alexnet.cc")])
build.unittest("convolution-input-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/vgg-a.cc")])
build.unittest("convolution-input-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/overfeat-fast.cc")])
build.smoketest("convolution-kernel-gradient-smoketest",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/smoke.cc")])
build.unittest("convolution-kernel-gradient-alexnet-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/alexnet.cc")])
build.unittest("convolution-kernel-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/vgg-a.cc")])
build.unittest("convolution-kernel-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/overfeat-fast.cc")])
build.smoketest("convolution-inference-smoketest",
reference_layer_objects + [build.cxx("convolution-inference/smoke.cc")])
build.unittest("convolution-inference-alexnet-test",
reference_layer_objects + [build.cxx("convolution-inference/alexnet.cc")])
build.unittest("convolution-inference-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-inference/vgg-a.cc")])
build.unittest("convolution-inference-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-inference/overfeat-fast.cc")])
if not options.convolution_only:
build.unittest("fully-connected-inference-alexnet-test",
reference_layer_objects + [build.cxx("fully-connected-inference/alexnet.cc")])
build.unittest("fully-connected-inference-vgg-a-test",
reference_layer_objects + [build.cxx("fully-connected-inference/vgg-a.cc")])
build.unittest("fully-connected-inference-overfeat-fast-test",
reference_layer_objects + [build.cxx("fully-connected-inference/overfeat-fast.cc")])
if not options.inference_only:
build.smoketest("fully-connected-output-smoketest",
reference_layer_objects + [build.cxx("fully-connected-output/smoke.cc")])
build.unittest("fully-connected-output-alexnet-test",
reference_layer_objects + [build.cxx("fully-connected-output/alexnet.cc")])
build.unittest("fully-connected-output-vgg-a-test",
reference_layer_objects + [build.cxx("fully-connected-output/vgg-a.cc")])
build.unittest("fully-connected-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("fully-connected-output/overfeat-fast.cc")])
build.smoketest("max-pooling-output-smoketest",
reference_layer_objects + [build.cxx("max-pooling-output/smoke.cc")])
build.unittest("max-pooling-output-vgg-a-test",
reference_layer_objects + [build.cxx("max-pooling-output/vgg-a.cc")])
build.unittest("max-pooling-output-overfeat-fast",
reference_layer_objects + [build.cxx("max-pooling-output/overfeat-fast.cc")])
build.unittest("relu-output-alexnet-test",
reference_layer_objects + [build.cxx("relu-output/alexnet.cc")])
build.unittest("relu-output-vgg-a-test",
reference_layer_objects + [build.cxx("relu-output/vgg-a.cc")])
build.unittest("relu-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("relu-output/overfeat-fast.cc")])
if not options.inference_only:
build.unittest("relu-input-gradient-alexnet-test",
reference_layer_objects + [build.cxx("relu-input-gradient/alexnet.cc")])
build.unittest("relu-input-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("relu-input-gradient/vgg-a.cc")])
build.unittest("relu-input-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("relu-input-gradient/overfeat-fast.cc")])
build.smoketest("softmax-output-smoketest",
reference_layer_objects + [build.cxx("softmax-output/smoke.cc")])
build.unittest("softmax-output-imagenet-test",
reference_layer_objects + [build.cxx("softmax-output/imagenet.cc")])
# Build automatic benchmarks
with build.options(source_dir="bench", extra_include_dirs=["bench", "test"], macros=macros, deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog, build.deps.fp16, build.deps.googlebenchmark): all,
"rt": build.target.is_linux,
"log": build.target.is_android}):
build.benchmark("convolution-inference-bench", build.cxx("convolution-inference.cc"))
build.benchmark("sgemm-bench", build.cxx("sgemm.cc"))
build.benchmark("sxgemm-bench", build.cxx("sxgemm.cc"))
build.benchmark("hxgemm-bench", build.cxx("hxgemm.cc"))
build.benchmark("conv1x1-bench", build.cxx("conv1x1.cc"))
build.benchmark("winograd-bench", build.cxx("winograd.cc"))
# Build benchmarking utilities
if not options.inference_only and not build.target.is_android:
with build.options(source_dir="bench", extra_include_dirs="bench", macros=macros, deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog): all,
"rt": build.target.is_linux,
"log": build.target.is_android}):
support_objects = [build.cc("median.c")]
if build.target.is_x86_64:
support_objects += [build.peachpy("memread.py")]
else:
support_objects += [build.cc("memread.c")]
if build.target.is_linux and build.target.is_x86_64:
support_objects += [build.cc("perf_counter.c")]
build.executable("transform-benchmark",
[build.cc("transform.c")] + support_objects)
build.executable("convolution-benchmark",
[build.cc("convolution.c")] + support_objects)
if not options.convolution_only:
build.executable("fully-connected-benchmark",
[build.cc("fully-connected.c")] + support_objects)
build.executable("pooling-benchmark",
[build.cc("pooling.c")] + support_objects)
build.executable("relu-benchmark",
[build.cc("relu.c")] + support_objects)
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
|
#!/usr/bin/env python
from __future__ import print_function
def extract_time(line, prefix):
if line.startswith(prefix):
line = line[len(prefix):].lstrip()
line = line[:line.index(" ms")].rstrip()
return line
def convolution(mode, batch_size, input_channels, output_channels, image_size, kernel_size, padding, algorithm, transform_strategy=None, threads=None, verbose=False, use_selldr=False):
import subprocess
if use_selldr:
import os
import sys
nacl_sdk_dir = os.getenv("NACL_SDK_ROOT")
if nacl_sdk_dir is None:
print("Error: can not find Native Client SDK: set NACL_SDK_ROOT envorinment variable and try again", file=sys.stderr)
sys.exit(1)
benchmark_args = [os.path.join(nacl_sdk_dir, "tools", "sel_ldr.py"), "--",
"bin/convolution-benchmark"]
else:
benchmark_args = ["bin/convolution-benchmark"]
benchmark_args += [
"-m", mode,
"-b", str(batch_size),
"-ic", str(input_channels),
"-oc", str(output_channels),
"-is", str(image_size[0]), str(image_size[1]),
"-ip", str(padding),
"-ks", str(kernel_size[0]), str(kernel_size[1]),
"-a", algorithm
]
if mode == "inference" and transform_strategy is not None:
benchmark_args += ["-ts", transform_strategy]
if threads is not None:
benchmark_args += ["-t", str(threads)]
benchmark = subprocess.Popen(benchmark_args, stdout=subprocess.PIPE)
benchmark_stdout, _ = benchmark.communicate()
if benchmark.returncode == 0:
output_lines = [line for line in benchmark_stdout.splitlines() if len(line)]
total, input_transform, kernel_transform, output_transform, block_multiplication, overhead = None, None, None, None, None, None
for output_line in output_lines:
total = total or extract_time(output_line, "Time:")
input_transform = input_transform or extract_time(output_line, "Input transform:")
kernel_transform = kernel_transform or extract_time(output_line, "Kernel transform:")
output_transform = output_transform or extract_time(output_line, "Output transform:")
block_multiplication = block_multiplication or extract_time(output_line, "Block multiplication:")
overhead = overhead or extract_time(output_line, "Overhead:")
if verbose:
return (total, input_transform, kernel_transform, output_transform, block_multiplication, overhead)
else:
return (total,)
def fully_connected(mode, batch_size, input_channels, output_channels, threads=None, verbose=False, use_selldr=False):
import subprocess
if use_selldr:
import os
import sys
nacl_sdk_dir = os.getenv("NACL_SDK_ROOT")
if nacl_sdk_dir is None:
print("Error: can not find Native Client SDK: set NACL_SDK_ROOT envorinment variable and try again", file=sys.stderr)
sys.exit(1)
benchmark_args = [os.path.join(nacl_sdk_dir, "tools", "sel_ldr.py"), "--",
"bin/fully-connected-benchmark"]
else:
benchmark_args = ["bin/fully-connected-benchmark"]
benchmark_args += [
"-m", mode,
"-b", str(batch_size),
"-ic", str(input_channels),
"-oc", str(output_channels)
]
if threads is not None:
benchmark_args += ["-t", str(threads)]
benchmark = subprocess.Popen(benchmark_args, stdout=subprocess.PIPE)
benchmark_stdout, _ = benchmark.communicate()
if benchmark.returncode == 0:
output_lines = [line for line in benchmark_stdout.splitlines() if len(line)]
total, input_transform, kernel_transform, block_multiplication, overhead = None, None, None, None, None
for output_line in output_lines:
total = total or extract_time(output_line, "Time:")
input_transform = input_transform or extract_time(output_line, "Input packing:")
kernel_transform = kernel_transform or extract_time(output_line, "Kernel packing:")
block_multiplication = block_multiplication or extract_time(output_line, "Block multiplication:")
overhead = overhead or extract_time(output_line, "Overhead:")
if verbose:
return (total, input_transform, kernel_transform, block_multiplication, overhead)
else:
return (total,)
overfeat_fast_layers = [
("conv2", 96, 256, (24, 24), (5, 5), 0),
("conv3", 256, 512, (12, 12), (3, 3), 1),
("conv4", 512, 1024, (12, 12), (3, 3), 1),
("conv5", 1024, 1024, (12, 12), (3, 3), 1),
("fc6", 36864, 3072),
("fc7", 3072, 4096),
("fc8", 4096, 1000),
]
alexnet_layers = [
("conv2", 64, 192, (27, 27), (5, 5), 2),
("conv3", 192, 384, (13, 13), (3, 3), 1),
("conv4", 384, 256, (13, 13), (3, 3), 1),
("conv5", 256, 256, (13, 13), (3, 3), 1),
("fc6", 12544, 4096),
("fc7", 4096, 4096),
("fc8", 4096, 1000),
]
vgg_a_layers = [
("conv1", 3, 64, (224, 224), (3, 3), 1),
("conv2", 64, 128, (112, 112), (3, 3), 1),
("conv3.1", 128, 256, (56, 56), (3, 3), 1),
("conv3.2", 256, 256, (56, 56), (3, 3), 1),
("conv4.1", 256, 512, (28, 28), (3, 3), 1),
("conv4.2", 512, 512, (28, 28), (3, 3), 1),
("conv5", 512, 512, (14, 14), (3, 3), 1),
("fc6", 25088, 4096),
("fc7", 4096, 4096),
("fc8", 4096, 1000),
]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="NNPACK benchmarking script")
parser.add_argument("--enable-selldr", dest="use_selldr", action="store_true")
parser.add_argument("-l", "--layer", dest="layer", required=True, choices=["convolution", "fully-connected", "pooling"])
parser.add_argument("-n", "--network", dest="network", required=True, choices=["vgg-a", "alexnet", "overfeat-fast"])
parser.add_argument("-m", "--mode", dest="mode", required=True, choices=["inference", "output", "input-gradient", "kernel-gradient"])
parser.add_argument("--transform-strategy", dest="transform_strategy", default="compute", choices=["compute", "precompute"])
parser.add_argument("-b", "--batch", dest="batch", type=int)
parser.add_argument("-t", "--threads", dest="threads")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False)
options = parser.parse_args()
network_layers, default_batch = {
"vgg-a": (vgg_a_layers, 64),
"alexnet": (alexnet_layers, 128),
"overfeat-fast": (overfeat_fast_layers, 128)
}[options.network]
layer_prefix = {
"convolution": "conv",
"fully-connected": "fc",
"pooling": "pool"
}[options.layer]
network_layers = [layer for layer in network_layers if layer[0].startswith(layer_prefix)]
batch = default_batch
if options.batch is not None:
batch = options.batch
if batch != 1 and options.mode == "inference":
raise ValueError("Non-unit batch {batch} is not allowed in inference mode".format(batch=batch))
elif options.mode == "inference":
batch = 1
if options.transform_strategy is not None:
if options.layer != "convolution":
raise ValueError("Transform strategy {transform_strategy} is meaningless for non-convolutional layers".format(transform_strategy=transform_strategy))
elif options.mode != "inference":
raise ValueError("Transform strategy {transform_strategy} is meaningless in non-inference mode".format(transform_strategy=transform_strategy))
if options.layer == "convolution":
for name, input_channels, output_channels, image_size, kernel_size, padding in network_layers:
measurements = [name]
for algorithm in ["implicit-gemm", "ft8x8", "ft16x16", "wt8x8"]:
if algorithm.startswith("wt") and kernel_size != (3, 3):
continue
measurements += list(convolution(options.mode, batch, input_channels, output_channels,
image_size, kernel_size, padding, algorithm,
transform_strategy=options.transform_strategy,
threads=options.threads, verbose=options.verbose, use_selldr=options.use_selldr))
print("\t".join(map(str, measurements)))
elif options.layer == "fully-connected":
for name, input_channels, output_channels in network_layers:
measurements = fully_connected(options.mode, batch, input_channels, output_channels,
threads=options.threads, verbose=options.verbose, use_selldr=options.use_selldr)
print("{name}\t{measurements}".format(name=name, measurements="\t".join(measurements)))
|
arg_mem = Argument(ptr(), "mem")
arg_len = Argument(size_t, "n")
with Function("read_memory", (arg_mem, arg_len)):
reg_mem = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_mem, arg_mem)
reg_len = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_len, arg_len)
main_loop = Loop()
SUB(reg_len, 64)
JB(main_loop.end)
with main_loop:
MOVAPS(xmm0, [reg_mem])
ADD(reg_mem, 64)
SUB(reg_len, 64)
JAE(main_loop.begin)
RETURN()
|
import fft.complex_soa
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft16_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real = YMMRegister(), YMMRegister()
ymm_imag = YMMRegister(), YMMRegister()
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS(ymm_data, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft16_within_rows(ymm_real, ymm_imag)
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_data)
RETURN()
with Function("nnp_fft8_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real, ymm_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_real, [reg_t])
VMOVUPS(ymm_imag, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag)
VMOVUPS([reg_f], ymm_real)
VMOVUPS([reg_f + YMMRegister.size], ymm_imag)
RETURN()
with Function("nnp_ifft8_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real, ymm_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_real, [reg_t])
VMOVUPS(ymm_imag, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag, transformation="inverse")
VMOVUPS([reg_f], ymm_real)
VMOVUPS([reg_f + YMMRegister.size], ymm_imag)
RETURN()
with Function("nnp_ifft16_soa__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_real = YMMRegister(), YMMRegister()
ymm_imag = YMMRegister(), YMMRegister()
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS(ymm_data, [reg_f + i * YMMRegister.size])
fft.complex_soa.ifft16_within_rows(ymm_real, ymm_imag)
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_data)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import fft16x16
import fft.complex_soa
import fft.two_real_to_two_complex_soa_perm_planar
import fft.two_complex_soa_perm_to_two_real_planar
arg_t_pointer = Argument(ptr(const_float_), name="t")
arg_f_pointer = Argument(ptr(float_), name="f")
arg_x_pointer = Argument(ptr(const_float_), name="x")
arg_t_stride = Argument(size_t, name="stride_t")
arg_f_stride = Argument(size_t, name="stride_f")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for post_operation in ["stream", "store"]:
fft16x16_arguments = (arg_t_pointer, arg_f_pointer, arg_t_stride, arg_f_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_fft16x16_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
fft16x16_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t_pointer)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_row_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_end, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
ADD(reg_row_end, reg_row_start)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
ymm_column_start, ymm_column_end = YMMRegister(), YMMRegister()
VMOVD(ymm_column_start.as_xmm, reg_column_start.as_dword)
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_start, ymm_column_start.as_xmm)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_column_01234567 = YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_column_start_gt_01234567, ymm_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_01234567, ymm_column_start, ymm_column_01234567)
VPCMPGTD(ymm_column_end_gt_01234567, ymm_column_end, ymm_column_01234567)
ymm_column_89ABCDEF = YMMRegister()
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_89ABCDEF, ymm_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_column_end_gt_89ABCDEF, ymm_column_end, ymm_column_89ABCDEF)
ymm_load_mask_columns_0_to_8 = YMMRegister()
VPANDN(ymm_load_mask_columns_0_to_8, ymm_column_start_gt_01234567, ymm_column_end_gt_01234567)
ymm_load_mask_columns_8_to_16 = YMMRegister()
VPANDN(ymm_load_mask_columns_8_to_16, ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF)
load_mask_columns_8_to_16 = LocalVariable(ymm_load_mask_columns_8_to_16)
VMOVDQA(load_mask_columns_8_to_16, ymm_load_mask_columns_8_to_16)
# data points to the first element, which is loaded into lane `reg_column_start`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SHL(reg_column_start, 2)
SUB(reg_t0, reg_column_start.as_qword)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_t_stride, 2)
# t8_offset = stride * (8 - row_start)
reg_t8_offset = GeneralPurposeRegister64()
MOV(reg_t8_offset.as_dword, 8)
SUB(reg_t8_offset.as_dword, reg_row_start)
IMUL(reg_t8_offset, reg_t_stride)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t8_offset * 1])
CMP(reg_row_start, 8)
CMOVAE(reg_t8, reg_t0)
reg_t0_column_8, reg_t8_column_8 = GeneralPurposeRegister64(), GeneralPurposeRegister64()
LEA(reg_t0_column_8, [reg_t0 + YMMRegister.size])
LEA(reg_t8_column_8, [reg_t8 + YMMRegister.size])
vfft_columns_0_to_8 = [LocalVariable(YMMRegister.size) for _ in range(16)]
vfft_columns_8_to_16 = [YMMRegister() if i < 4 else LocalVariable(YMMRegister.size) for i in range(16)]
fft16x16.forward_vfft(reg_t0, reg_t8, reg_t_stride, data_out=vfft_columns_0_to_8,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, ymm_load_mask=ymm_load_mask_columns_0_to_8)
ymm_load_mask_columns_8_to_16 = YMMRegister()
VMOVDQA(ymm_load_mask_columns_8_to_16, load_mask_columns_8_to_16)
fft16x16.forward_vfft(reg_t0_column_8, reg_t8_column_8, reg_t_stride, data_out=vfft_columns_8_to_16,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, ymm_load_mask=ymm_load_mask_columns_8_to_16)
for row_batch_start, row_batch_end in [(0, 2), (2, 5), (5, 8)]:
ymm_wr_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
ymm_wi_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(ymm_wr[0], vfft_columns_0_to_8[row*2+0])
VMOVAPS(ymm_wr[1], vfft_columns_8_to_16[row*2+0])
VMOVAPS(ymm_wi[0], vfft_columns_0_to_8[row*2+1])
VMOVAPS(ymm_wi[1], vfft_columns_8_to_16[row*2+1])
fft.complex_soa.fft16_within_rows(ymm_wr_list, ymm_wi_list, bit_reversal=False)
if row_batch_start == 0:
fft.two_real_to_two_complex_soa_perm_planar.fft16_within_rows_postprocess(ymm_wr_list[0], ymm_wi_list[0], bit_reversal=True)
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for row_batch_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_batch_offset
for column in range(2):
VSTOREPS([reg_f], ymm_wr[column])
VSTOREPS([reg_f + YMMRegister.size], ymm_wi[column])
if row + 1 != 8 or column + 1 != 2:
ADD(reg_f, reg_f_stride)
RETURN()
arg_f_pointer = Argument(ptr(const_float_), name="f_pointer")
arg_t_pointer = Argument(ptr(float_), name="t_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_f_stride = Argument(size_t, name="f_stride")
arg_t_stride = Argument(size_t, name="t_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
ifft16x16_arguments = (arg_f_pointer, arg_t_pointer, arg_bias, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
else:
ifft16x16_arguments = (arg_f_pointer, arg_t_pointer, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
if with_offset:
ifft16x16_arguments += (arg_row_offset, arg_column_offset)
with Function("nnp_ifft16x16{with_offset}{with_bias}{with_relu}__avx2".format(
with_offset="_with_offset" if with_offset else "",
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
ifft16x16_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_row_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_end, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
if with_offset:
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
ADD(reg_row_end, reg_row_start)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
else:
reg_row_start = None
if with_offset:
ymm_column_start, ymm_column_end = YMMRegister(), YMMRegister()
VMOVD(ymm_column_start.as_xmm, reg_column_start.as_dword)
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_start, ymm_column_start.as_xmm)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_column_01234567 = YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_column_start_gt_01234567, ymm_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_01234567, ymm_column_start, ymm_column_01234567)
VPCMPGTD(ymm_column_end_gt_01234567, ymm_column_end, ymm_column_01234567)
ymm_column_89ABCDEF = YMMRegister()
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_89ABCDEF, ymm_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_column_end_gt_89ABCDEF, ymm_column_end, ymm_column_89ABCDEF)
ymm_store_mask_columns_0_to_8 = YMMRegister()
VPANDN(ymm_store_mask_columns_0_to_8, ymm_column_start_gt_01234567, ymm_column_end_gt_01234567)
store_mask_columns_0_to_8 = LocalVariable(ymm_store_mask_columns_0_to_8)
VMOVDQA(store_mask_columns_0_to_8, ymm_store_mask_columns_0_to_8)
ymm_store_mask_columns_8_to_16 = YMMRegister()
VPANDN(ymm_store_mask_columns_8_to_16, ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF)
store_mask_columns_8_to_16 = LocalVariable(ymm_store_mask_columns_8_to_16)
VMOVDQA(store_mask_columns_8_to_16, ymm_store_mask_columns_8_to_16)
SHL(reg_column_start, 2)
SUB(reg_t0, reg_column_start.as_qword)
else:
ymm_column_end = YMMRegister()
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_store_mask_columns_0_to_8, ymm_store_mask_columns_8_to_16 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_store_mask_columns_0_to_8, ymm_column_end, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPCMPGTD(ymm_store_mask_columns_8_to_16, ymm_column_end, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
store_mask_columns_0_to_8 = LocalVariable(ymm_store_mask_columns_0_to_8)
VMOVDQA(store_mask_columns_0_to_8, ymm_store_mask_columns_0_to_8)
store_mask_columns_8_to_16 = LocalVariable(ymm_store_mask_columns_8_to_16)
VMOVDQA(store_mask_columns_8_to_16, ymm_store_mask_columns_8_to_16)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_t_stride, 2)
vfft_columns_0_to_8 = [YMMRegister() if i > 10 else LocalVariable(YMMRegister.size) for i in range(16)]
vfft_columns_8_to_16 = [LocalVariable(YMMRegister.size) for _ in range(16)]
for row_batch_start, row_batch_end in [(0, 2), (2, 5), (5, 8)]:
ymm_wr_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
ymm_wi_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(ymm_wr[0], [reg_f])
VMOVAPS(ymm_wi[0], [reg_f + YMMRegister.size])
ADD(reg_f, reg_f_stride)
if with_bias and row == 0:
ymm_bias = YMMRegister()
VMOVSS(ymm_bias.as_xmm, [reg_bias])
VFMADD231PS(ymm_wr[0], ymm_bias, Constant.float32x8(256.0))
VMOVAPS(ymm_wr[1], [reg_f])
VMOVAPS(ymm_wi[1], [reg_f + YMMRegister.size])
if row + 1 != 8:
ADD(reg_f, reg_f_stride)
if row_batch_start == 0:
fft.two_complex_soa_perm_to_two_real_planar.ifft16_within_rows_preprocess(ymm_wr_list[0], ymm_wi_list[0], bit_reversal=True)
fft.complex_soa.ifft16_within_rows(ymm_wr_list, ymm_wi_list, bit_reversal=False)
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(vfft_columns_0_to_8[row*2+0], ymm_wr[0])
VMOVAPS(vfft_columns_8_to_16[row*2+0], ymm_wr[1])
VMOVAPS(vfft_columns_0_to_8[row*2+1], ymm_wi[0])
VMOVAPS(vfft_columns_8_to_16[row*2+1], ymm_wi[1])
if reg_row_start is not None:
# t8_offset = stride * (8 - row_start)
reg_t8_offset = GeneralPurposeRegister64()
MOV(reg_t8_offset.as_dword, 8)
SUB(reg_t8_offset.as_dword, reg_row_start)
IMUL(reg_t8_offset, reg_t_stride)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t8_offset * 1])
CMP(reg_row_start, 8)
CMOVAE(reg_t8, reg_t0)
else:
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t_stride * 8])
reg_t0_column_8, reg_t8_column_8 = GeneralPurposeRegister64(), GeneralPurposeRegister64()
LEA(reg_t0_column_8, [reg_t0 + YMMRegister.size])
LEA(reg_t8_column_8, [reg_t8 + YMMRegister.size])
fft16x16.inverse_vfft(reg_t0, reg_t8, reg_t_stride, data_in=vfft_columns_0_to_8,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, store_mask=store_mask_columns_0_to_8, relu=with_relu)
with Block() as store_columns_8_to_16:
CMP(reg_column_end, 8)
JB(store_columns_8_to_16.end)
fft16x16.inverse_vfft(reg_t0_column_8, reg_t8_column_8, reg_t_stride, data_in=vfft_columns_8_to_16, \
reg_row_start=reg_row_start, reg_row_end=reg_row_end, store_mask=store_mask_columns_8_to_16, relu=with_relu)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import butterfly, sqrt2_over_2
from common import butterfly, sqrt2_over_2, cos_npi_over_8, interleave
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
def load_ymm_variable(variable):
assert isinstance(variable, (YMMRegister, LocalVariable))
ymm_variable = variable
if isinstance(variable, LocalVariable):
assert variable.size == YMMRegister.size
ymm_variable = YMMRegister()
VMOVAPS(ymm_variable, variable)
return ymm_variable
def store_ymm_result(variable, result):
assert isinstance(result, YMMRegister)
if isinstance(variable, YMMRegister):
SWAP.REGISTERS(variable, result)
else:
VMOVAPS(variable, result)
def forward_vfft(reg_t0, reg_t8, reg_t_stride, data_out, reg_row_start=None, reg_row_end=None, ymm_load_mask=None):
assert isinstance(reg_t0, GeneralPurposeRegister64)
assert isinstance(reg_t8, GeneralPurposeRegister64)
assert isinstance(reg_t_stride, GeneralPurposeRegister64)
assert isinstance(data_out, list) and len(data_out) == 16
assert ymm_load_mask is None or isinstance(ymm_load_mask, YMMRegister)
out_real, out_imag = data_out[0::2], data_out[1::2]
real, imag = [YMMRegister() for _ in range(8)], [YMMRegister() for _ in range(8)]
imag[0] = LocalVariable(YMMRegister.size)
imag[4] = LocalVariable(YMMRegister.size)
data = interleave(real, imag)
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
row_lo = i
row_hi = row_lo + 8
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VXORPS(ymm_data_lo, ymm_data_lo, ymm_data_lo)
skip_data_lo = Label()
if reg_row_start:
CMP(reg_row_start, row_lo)
JA(skip_data_lo)
if reg_row_end:
CMP(reg_row_end, row_lo)
JBE(skip_data_lo)
if ymm_load_mask is None:
VMOVUPS(ymm_data_lo, [reg_t0])
else:
VMASKMOVPS(ymm_data_lo, ymm_load_mask, [reg_t0])
if i + 1 != 8:
ADD(reg_t0, reg_t_stride)
LABEL(skip_data_lo)
VMOVAPS(ymm_data_hi, ymm_data_lo)
skip_data_hi = Label()
if reg_row_start:
CMP(reg_row_start, row_hi)
JA(skip_data_hi)
if reg_row_end:
CMP(reg_row_end, row_hi)
JBE(skip_data_hi)
if ymm_load_mask is None:
VMOVUPS(ymm_data_hi, [reg_t8])
butterfly(ymm_data_lo, ymm_data_hi)
else:
ymm_temp_hi = YMMRegister()
VMASKMOVPS(ymm_temp_hi, ymm_load_mask, [reg_t8])
VSUBPS(ymm_data_hi, ymm_data_lo, ymm_temp_hi)
VADDPS(ymm_data_lo, ymm_data_lo, ymm_temp_hi)
if i + 1 != 8:
ADD(reg_t8, reg_t_stride)
LABEL(skip_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = w6.im, -w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(imag[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re + w5.im), SQRT2_OVER_2 * (w5.im - w5.re)
butterfly(imag[5], real[5])
SWAP.REGISTERS(real[5], imag[5])
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re - w7.im), -SQRT2_OVER_2 * (w7.re + w7.im)
butterfly(real[7], imag[7], negate_b=True)
fft4_negate_b[id(real[7])] = True
fft4_negate_b[id(imag[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
ymm_sqrt2_over_2 = YMMRegister()
fft2_scale_b[id(real[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(real[7])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[7])] = ymm_sqrt2_over_2
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = w3.im, -w3.re
# w7.re, w7.im = w7.im, -w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(imag[3])] = True
fft2_negate_b[id(imag[7])] = True
# 4x FFT2: butterfly
# Process the first two elements separately
ymm_real0, ymm_real1 = butterfly(real[0], real[1], writeback=False)
store_ymm_result(out_real[4], ymm_real1) # bit-reversal: 1->4
ymm_imag0, ymm_imag1 = butterfly(imag[0], imag[1], negate_out_b=True, writeback=False)
store_ymm_result(out_imag[4], ymm_imag1) # bit-reversal: 1->4
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
for i, (data_lo, data_hi) in enumerate(zip(data[4:6] + data[8:10] + data[12:14], data[6:8] + data[10:12] + data[14:16])):
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)))
butterfly(ymm_real0, ymm_imag0)
store_ymm_result(out_real[0], ymm_real0)
store_ymm_result(out_imag[0], ymm_imag0)
# Bit reversal
for i in range(8):
new_i = fft8_bitreverse(i)
if new_i > i:
real[i], real[new_i] = real[new_i], real[i]
imag[i], imag[new_i] = imag[new_i], imag[i]
data = interleave(real, imag)
ymm_two_g2_real, ymm_two_g2_imag = YMMRegister(), YMMRegister()
ymm_two_h2_real, ymm_two_h2_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g2_real, real[2], real[6])
VSUBPS(ymm_two_h2_imag, real[6], real[2])
VSUBPS(ymm_two_g2_imag, imag[2], imag[6])
VADDPS(ymm_two_h2_real, imag[2], imag[6])
ymm_two_g1_real, ymm_two_g1_imag = YMMRegister(), YMMRegister()
ymm_two_h1_real, ymm_two_h1_imag = YMMRegister(), YMMRegister()
ymm_real1 = load_ymm_variable(real[1])
VADDPS(ymm_two_g1_real, ymm_real1, real[7])
VSUBPS(ymm_two_h1_imag, real[7], ymm_real1)
ymm_imag1 = load_ymm_variable(imag[1])
VSUBPS(ymm_two_g1_imag, ymm_imag1, imag[7])
VADDPS(ymm_two_h1_real, ymm_imag1, imag[7])
ymm_two_h2_add, ymm_two_h2_sub = YMMRegister(), YMMRegister()
VADDPS(ymm_two_h2_add, ymm_two_h2_real, ymm_two_h2_imag)
VSUBPS(ymm_two_h2_sub, ymm_two_h2_imag, ymm_two_h2_real)
ymm_two_g3_real, ymm_two_g3_imag = YMMRegister(), YMMRegister()
ymm_two_h3_real, ymm_two_h3_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g3_real, real[3], real[5])
VSUBPS(ymm_two_h3_imag, real[5], real[3])
VSUBPS(ymm_two_g3_imag, imag[3], imag[5])
VADDPS(ymm_two_h3_real, imag[3], imag[5])
# const float two_w2_real = two_g2_real + SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w2_imag = two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
# const float two_w6_real = two_g2_real - SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w6_imag = -two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
ymm_two_w2_real, ymm_two_w6_real = YMMRegister(), ymm_two_g2_real
VMOVAPS(ymm_two_w2_real, ymm_two_g2_real)
VFMADD231PS(ymm_two_w2_real, ymm_two_h2_add, ymm_sqrt2_over_2)
VFNMADD231PS(ymm_two_w6_real, ymm_two_h2_add, ymm_sqrt2_over_2)
ymm_two_w2_imag, ymm_two_w6_imag = YMMRegister(), ymm_two_g2_imag
VMOVAPS(ymm_two_w2_imag, ymm_two_g2_imag)
VFMADD231PS(ymm_two_w2_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_two_w6_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_two_w2_real, ymm_two_w2_real, ymm_half)
store_ymm_result(out_real[2], ymm_two_w2_real)
VMULPS(ymm_two_w6_real, ymm_two_w6_real, ymm_half)
store_ymm_result(out_real[6], ymm_two_w6_real)
VMULPS(ymm_two_w2_imag, ymm_two_w2_imag, ymm_half)
store_ymm_result(out_imag[2], ymm_two_w2_imag)
VMULPS(ymm_two_w6_imag, ymm_two_w6_imag, ymm_half)
store_ymm_result(out_imag[6], ymm_two_w6_imag)
# const float two_w1_real = two_g1_real + two_h1_real * COS_1PI_OVER_8 + two_h1_imag * COS_3PI_OVER_8;
# const float two_w1_imag = two_g1_imag + two_h1_imag * COS_1PI_OVER_8 - two_h1_real * COS_3PI_OVER_8;
# const float two_w7_real = two_g1_real - two_h1_real * COS_1PI_OVER_8 - two_h1_imag * COS_3PI_OVER_8;
# const float two_w7_imag = -two_g1_imag + two_h1_imag * COS_1PI_OVER_8 - two_h1_real * COS_3PI_OVER_8;
# const float two_w3_real = two_g3_real + two_h3_real * COS_3PI_OVER_8 + two_h3_imag * COS_1PI_OVER_8;
# const float two_w3_imag = two_g3_imag + two_h3_imag * COS_3PI_OVER_8 - two_h3_real * COS_1PI_OVER_8;
# const float two_w5_real = two_g3_real - two_h3_real * COS_3PI_OVER_8 - two_h3_imag * COS_1PI_OVER_8;
# const float two_w5_imag = -two_g3_imag + two_h3_imag * COS_3PI_OVER_8 - two_h3_real * COS_1PI_OVER_8;
ymm_cos_1pi_over_8 = YMMRegister()
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
ymm_two_w1_real, ymm_two_w7_real = YMMRegister(), ymm_two_g1_real
VMOVAPS(ymm_two_w1_real, ymm_two_g1_real)
VFMADD231PS(ymm_two_w1_real, ymm_two_h1_real, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_two_w7_real, ymm_two_h1_real, ymm_cos_1pi_over_8)
ymm_two_w1_imag, ymm_two_w7_imag = YMMRegister(), ymm_two_g1_imag
VMOVAPS(ymm_two_w1_imag, ymm_two_g1_imag)
VFMADD231PS(ymm_two_w1_imag, ymm_two_h1_imag, ymm_cos_1pi_over_8)
VFMSUB231PS(ymm_two_w7_imag, ymm_two_h1_imag, ymm_cos_1pi_over_8)
ymm_two_w3_real, ymm_two_w5_real = YMMRegister(), ymm_two_g3_real
VMOVAPS(ymm_two_w3_real, ymm_two_g3_real)
VFMADD231PS(ymm_two_w3_real, ymm_two_h3_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_two_w5_real, ymm_two_h3_imag, ymm_cos_1pi_over_8)
ymm_two_w3_imag, ymm_two_w5_imag = YMMRegister(), ymm_two_g3_imag
VMOVAPS(ymm_two_w3_imag, ymm_two_g3_imag)
VFNMADD231PS(ymm_two_w3_imag, ymm_two_h3_real, ymm_cos_1pi_over_8)
VFNMSUB231PS(ymm_two_w5_imag, ymm_two_h3_real, ymm_cos_1pi_over_8)
ymm_cos_3pi_over_8 = YMMRegister()
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VFMADD231PS(ymm_two_w1_real, ymm_two_h1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w7_real, ymm_two_h1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w1_imag, ymm_two_h1_real, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w7_imag, ymm_two_h1_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w3_real, ymm_two_h3_real, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w5_real, ymm_two_h3_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w3_imag, ymm_two_h3_imag, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w5_imag, ymm_two_h3_imag, ymm_cos_3pi_over_8)
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_two_w1_real, ymm_two_w1_real, ymm_half)
store_ymm_result(out_real[1], ymm_two_w1_real)
VMULPS(ymm_two_w7_real, ymm_two_w7_real, ymm_half)
store_ymm_result(out_real[7], ymm_two_w7_real)
VMULPS(ymm_two_w1_imag, ymm_two_w1_imag, ymm_half)
store_ymm_result(out_imag[1], ymm_two_w1_imag)
VMULPS(ymm_two_w7_imag, ymm_two_w7_imag, ymm_half)
store_ymm_result(out_imag[7], ymm_two_w7_imag)
VMULPS(ymm_two_w3_real, ymm_two_w3_real, ymm_half)
store_ymm_result(out_real[3], ymm_two_w3_real)
VMULPS(ymm_two_w5_real, ymm_two_w5_real, ymm_half)
store_ymm_result(out_real[5], ymm_two_w5_real)
VMULPS(ymm_two_w3_imag, ymm_two_w3_imag, ymm_half)
store_ymm_result(out_imag[3], ymm_two_w3_imag)
VMULPS(ymm_two_w5_imag, ymm_two_w5_imag, ymm_half)
store_ymm_result(out_imag[5], ymm_two_w5_imag)
def inverse_vfft(reg_t0, reg_t8, reg_t_stride, data_in, reg_row_start=None, reg_row_end=None, store_mask=None, relu=False):
assert isinstance(reg_t0, GeneralPurposeRegister64)
assert isinstance(reg_t8, GeneralPurposeRegister64)
assert isinstance(reg_t_stride, GeneralPurposeRegister64)
assert isinstance(data_in, list) and len(data_in) == 16
assert reg_row_end is None or isinstance(reg_row_end, GeneralPurposeRegister32)
assert store_mask is None or isinstance(store_mask, LocalVariable) and store_mask.size == YMMRegister.size
in_real, in_imag = data_in[0::2], data_in[1::2]
ymm_scale_factor = YMMRegister()
VMOVAPS(ymm_scale_factor, Constant.float32x8(0.0625))
ymm_W1_real, ymm_W1_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W1_real, ymm_scale_factor, in_real[1])
VMULPS(ymm_W1_imag, ymm_scale_factor, in_imag[1])
ymm_W2_real, ymm_W2_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W2_real, ymm_scale_factor, in_real[2])
VMULPS(ymm_W2_imag, ymm_scale_factor, in_imag[2])
ymm_W3_real, ymm_W3_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W3_real, ymm_scale_factor, in_real[3])
VMULPS(ymm_W3_imag, ymm_scale_factor, in_imag[3])
# G[n].real, H[n].real = W[n].real + W[8-n].real, W[n].real - W[8-n].real
# G[n].imag, H[n].imag = W[n].imag - W[8-n].imag, W[n].imag + W[8-n].imag
ymm_W7_real, ymm_W7_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W7_real, in_real[7])
ymm_G1_real, ymm_H1_real = butterfly(ymm_W1_real, ymm_W7_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W7_imag, in_imag[7])
ymm_G1_imag, ymm_H1_imag = butterfly(ymm_W1_imag, ymm_W7_imag, scale_b=ymm_scale_factor, negate_b=True)
ymm_W6_real, ymm_W6_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W6_real, in_real[6])
ymm_G2_real, ymm_H2_real = butterfly(ymm_W2_real, ymm_W6_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W6_imag, in_imag[6])
ymm_G2_imag, ymm_H2_imag = butterfly(ymm_W2_imag, ymm_W6_imag, scale_b=ymm_scale_factor, negate_b=True)
ymm_W5_real, ymm_W5_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W5_real, in_real[5])
ymm_G3_real, ymm_H3_real = butterfly(ymm_W3_real, ymm_W5_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W5_imag, in_imag[5])
ymm_G3_imag, ymm_H3_imag = butterfly(ymm_W3_imag, ymm_W5_imag, scale_b=ymm_scale_factor, negate_b=True)
# H[2]+, H[2]- = H[2].real + H[2].imag, H[2].real - H[2].imag
ymm_H2_add, ymm_H2_sub = butterfly(ymm_H2_real, ymm_H2_imag)
# w[ n].real = G[ n].real - H[ n].real * cos((N-n)*pi/2N) - H[ n].imag * cos(n*pi/2N)
# w[2N-n].real = G[ n].real + H[ n].real * cos((N-n)*pi/2N) + H[ n].imag * cos(n*pi/2N)
# w[ n].imag = G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[2N-n].imag = -G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[ N-n].real = G[N-n].real - H[N-n].real * cos(n*pi/2N) - H[N-n].imag * cos((N-n)*pi/2N)
# w[ N+n].real = G[N-n].real + H[N-n].real * cos(n*pi/2N) + H[N-n].imag * cos((N-n)*pi/2N)
# w[ N-n].imag = G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
# w[ N+n].imag = -G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
ymm_cos_1pi_over_8, ymm_cos_3pi_over_8 = YMMRegister(), YMMRegister()
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
ymm_w1_real, ymm_w7_real = YMMRegister(), ymm_G1_real
VMOVAPS(ymm_w1_real, ymm_G1_real)
VFNMADD231PS(ymm_w1_real, ymm_H1_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_w7_real, ymm_H1_real, ymm_cos_3pi_over_8)
ymm_w1_imag, ymm_w7_imag = YMMRegister(), ymm_G1_imag
VMOVAPS(ymm_w1_imag, ymm_G1_imag)
VFMADD231PS(ymm_w1_imag, ymm_H1_real, ymm_cos_1pi_over_8)
VFMSUB231PS(ymm_w7_imag, ymm_H1_real, ymm_cos_1pi_over_8)
ymm_w3_real, ymm_w5_real = YMMRegister(), ymm_G3_real
VMOVAPS(ymm_w3_real, ymm_G3_real)
VFNMADD231PS(ymm_w3_real, ymm_H3_real, ymm_cos_1pi_over_8)
VFMADD231PS(ymm_w5_real, ymm_H3_real, ymm_cos_1pi_over_8)
ymm_w3_imag, ymm_w5_imag = YMMRegister(), ymm_G3_imag
VMOVAPS(ymm_w3_imag, ymm_G3_imag)
VFMADD231PS(ymm_w3_imag, ymm_H3_real, ymm_cos_3pi_over_8)
VFMSUB231PS(ymm_w5_imag, ymm_H3_real, ymm_cos_3pi_over_8)
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
# w[ N/2].real = G[N/2].real - H[N/2]+ * sqrt(2)/2
# w[ N/2].imag = G[N/2].imag + H[N/2]- * sqrt(2)/2
# w[3N/2].real = G[N/2].real + H[N/2]+ * sqrt(2)/2
# w[3N/2].imag = -G[N/2].imag + H[N/2]- * sqrt(2)/2
ymm_w2_real, ymm_w6_real = YMMRegister(), ymm_G2_real
VMOVAPS(ymm_w2_real, ymm_G2_real)
VFNMADD231PS(ymm_w2_real, ymm_H2_add, ymm_sqrt2_over_2)
VFMADD231PS(ymm_w6_real, ymm_H2_add, ymm_sqrt2_over_2)
ymm_w2_imag, ymm_w6_imag = YMMRegister(), ymm_G2_imag
VMOVAPS(ymm_w2_imag, ymm_G2_imag)
VFMADD231PS(ymm_w2_imag, ymm_H2_sub, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_w6_imag, ymm_H2_sub, ymm_sqrt2_over_2)
# w[ n].real = G[ n].real - H[ n].real * cos((N-n)*pi/2N) - H[ n].imag * cos(n*pi/2N)
# w[2N-n].real = G[ n].real + H[ n].real * cos((N-n)*pi/2N) + H[ n].imag * cos(n*pi/2N)
# w[ n].imag = G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[2N-n].imag = -G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[ N-n].real = G[N-n].real - H[N-n].real * cos(n*pi/2N) - H[N-n].imag * cos((N-n)*pi/2N)
# w[ N+n].real = G[N-n].real + H[N-n].real * cos(n*pi/2N) + H[N-n].imag * cos((N-n)*pi/2N)
# w[ N-n].imag = G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
# w[ N+n].imag = -G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
ymm_cos_1pi_over_8, ymm_cos_3pi_over_8 = YMMRegister(), YMMRegister()
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VFNMADD231PS(ymm_w1_real, ymm_H1_imag, ymm_cos_1pi_over_8)
VFMADD231PS(ymm_w7_real, ymm_H1_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_w1_imag, ymm_H1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w7_imag, ymm_H1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w3_real, ymm_H3_imag, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_w5_real, ymm_H3_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w3_imag, ymm_H3_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_w5_imag, ymm_H3_imag, ymm_cos_1pi_over_8)
data = [
LocalVariable(YMMRegister.size), YMMRegister(),
ymm_w1_real, ymm_w1_imag,
ymm_w2_real, ymm_w2_imag,
ymm_w3_real, ymm_w3_imag,
LocalVariable(YMMRegister.size), LocalVariable(YMMRegister.size),
ymm_w5_real, ymm_w5_imag,
ymm_w6_real, ymm_w6_imag,
ymm_w7_real, ymm_w7_imag
]
real, imag = data[0::2], data[1::2]
# TODO: optimize
ymm_w0_real, ymm_w0_imag = YMMRegister(), imag[0]
VMOVUPS(ymm_w0_real, in_real[0])
VMOVUPS(ymm_w0_imag, in_imag[0])
VMULPS(ymm_w0_real, ymm_w0_real, Constant.float32x8(0.0625))
butterfly(ymm_w0_real, ymm_w0_imag, scale_b=Constant.float32x8(0.0625))
VMOVAPS(real[0], ymm_w0_real)
# TODO: optimize
ymm_w4_real, ymm_w4_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_w4_real, in_real[4])
VMOVUPS(ymm_w4_imag, in_imag[4])
VMULPS(ymm_w4_real, ymm_w4_real, Constant.float32x8(0.125))
VMULPS(ymm_w4_imag, ymm_w4_imag, Constant.float32x8(-0.125))
VMOVAPS(real[4], ymm_w4_real)
VMOVAPS(imag[4], ymm_w4_imag)
# Bit reversal
for i in range(8):
new_i = fft8_bitreverse(i)
if new_i > i:
real[i], real[new_i] = real[new_i], real[i]
imag[i], imag[new_i] = imag[new_i], imag[i]
data = interleave(real, imag)
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
butterfly(data_lo, data_hi)
# 2x FFT4: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft8_scale_b, fft8_negate_b = {}, {}
# w3.re, w3.im = -w3.im, w3.re
# w7.re, w7.im = -w7.im, w7.re
SWAP.REGISTERS(real[3], imag[3])
fft4_negate_b[id(real[3])] = True
SWAP.REGISTERS(real[7], imag[7])
fft4_negate_b[id(real[7])] = True
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False))
# FFT8: multiplication by twiddle factors
# w6.re, w6.im = -w6.im, w6.re
SWAP.REGISTERS(real[6], imag[6])
fft8_negate_b[id(real[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re - w5.im), SQRT2_OVER_2 * (w5.re + w5.im)
butterfly(real[5], imag[5], negate_b=True)
fft8_scale_b[id(real[5])] = Constant.float32x8(sqrt2_over_2)
fft8_scale_b[id(imag[5])] = Constant.float32x8(sqrt2_over_2)
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re + w7.im), SQRT2_OVER_2 * (w7.re - w7.im)
butterfly(real[7], imag[7])
fft8_scale_b[id(real[7])] = Constant.float32x8(sqrt2_over_2)
fft8_negate_b[id(real[7])] = True
fft8_scale_b[id(imag[7])] = Constant.float32x8(sqrt2_over_2)
ymm_store_mask = YMMRegister()
if store_mask:
VMOVAPS(ymm_store_mask, store_mask)
# FFT8: butterfly
with Block() as store_data:
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
row_lo = i
row_hi = row_lo + 8
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
scale_b=fft8_scale_b.get(id(data_hi)),
negate_b=fft8_negate_b.get(id(data_hi), False),
writeback=False)
if relu:
ymm_zero = YMMRegister()
VMOVAPS(ymm_zero, Constant.float32x8(-0.0))
with Block() as store_data_lo:
if reg_row_start:
CMP(reg_row_start, row_lo)
JA(store_data_lo.end)
if reg_row_end:
CMP(reg_row_end, row_lo)
JBE(store_data_lo.end)
elif reg_row_end:
CMP(reg_row_end, row_lo)
JBE(store_data.end)
if relu:
VMAXPS(ymm_data_lo, ymm_zero, ymm_data_lo)
if store_mask:
VMASKMOVPS([reg_t0], ymm_store_mask, ymm_data_lo)
else:
VMOVUPS([reg_t0], ymm_data_lo)
if i + 1 != 8:
ADD(reg_t0, reg_t_stride)
with Block() as store_data_hi:
if reg_row_start:
CMP(reg_row_start, row_hi)
JA(store_data_hi.end)
if reg_row_end:
CMP(reg_row_end, row_hi)
JBE(store_data_hi.end)
if relu:
VMAXPS(ymm_data_hi, ymm_zero, ymm_data_hi)
if store_mask:
VMASKMOVPS([reg_t8], ymm_store_mask, ymm_data_hi)
else:
VMOVUPS([reg_t8], ymm_data_hi)
if i + 1 != 8:
ADD(reg_t8, reg_t_stride)
|
from __future__ import absolute_import
from __future__ import division
import fft.complex_soa
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft4_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_data = [YMMRegister() for _ in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft4_across_rows(ymm_real, ymm_imag)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_i)
RETURN()
from common import butterfly, sqrt2_over_2
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
with Function("nnp_fft8_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
data = [YMMRegister() for _ in range(16)]
data[0] = LocalVariable(data[0])
data[8] = LocalVariable(data[8])
real, imag = data[0::2], data[1::2]
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VMOVUPS(ymm_data_lo, [reg_t + i * YMMRegister.size])
VMOVUPS(ymm_data_hi, [reg_t + (i + 8) * YMMRegister.size])
butterfly(ymm_data_lo, ymm_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = w6.im, -w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(imag[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re + w5.im), SQRT2_OVER_2 * (w5.im - w5.re)
butterfly(imag[5], real[5])
SWAP.REGISTERS(real[5], imag[5])
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re - w7.im), -SQRT2_OVER_2 * (w7.re + w7.im)
butterfly(real[7], imag[7], negate_b=True)
fft4_negate_b[id(real[7])] = True
fft4_negate_b[id(imag[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
ymm_sqrt2_over_2 = YMMRegister()
fft2_scale_b[id(real[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(real[7])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[7])] = ymm_sqrt2_over_2
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = w3.im, -w3.re
# w7.re, w7.im = w7.im, -w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(imag[3])] = True
fft2_negate_b[id(imag[7])] = True
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)),
writeback=False)
index_lo = (i // 2) * 2
index_hi = index_lo + 1
VMOVUPS([reg_f + (fft8_bitreverse(index_lo) * 2 + i % 2) * YMMRegister.size], ymm_data_lo)
VMOVUPS([reg_f + (fft8_bitreverse(index_hi) * 2 + i % 2) * YMMRegister.size], ymm_data_hi)
if i == 0:
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
RETURN()
with Function("nnp_ifft8_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
data = [YMMRegister() for _ in range(16)]
data[0] = LocalVariable(data[0])
data[8] = LocalVariable(data[8])
real, imag = data[0::2], data[1::2]
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VMOVUPS(ymm_data_lo, [reg_t + i * YMMRegister.size])
VMOVUPS(ymm_data_hi, [reg_t + (i + 8) * YMMRegister.size])
butterfly(ymm_data_lo, ymm_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = -w6.im, w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(real[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re - w5.im), SQRT2_OVER_2 * (w5.re + w5.im)
butterfly(real[5], imag[5], negate_b=True)
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re + w7.im), SQRT2_OVER_2 * (w7.re - w7.im)
butterfly(real[7], imag[7])
fft4_negate_b[id(real[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
fft2_scale_b[id(real[5])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(imag[5])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(real[7])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(imag[7])] = Constant.float32x8(sqrt2_over_2)
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = -w3.im, w3.re
# w7.re, w7.im = -w7.im, w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(real[3])] = True
fft2_negate_b[id(real[7])] = True
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)),
writeback=False)
index_lo = (i // 2) * 2
index_hi = index_lo + 1
VMULPS(ymm_data_lo, ymm_data_lo, Constant.float32x8(0.125))
VMULPS(ymm_data_hi, ymm_data_hi, Constant.float32x8(0.125))
VMOVUPS([reg_f + (fft8_bitreverse(index_lo) * 2 + i % 2) * YMMRegister.size], ymm_data_lo)
VMOVUPS([reg_f + (fft8_bitreverse(index_hi) * 2 + i % 2) * YMMRegister.size], ymm_data_hi)
RETURN()
|
import fft.complex_soa
import fft.two_real_to_two_complex_soa_perm_planar
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft8_dualreal__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_seq_a, ymm_seq_b = YMMRegister(), YMMRegister()
VMOVUPS(ymm_seq_a, [reg_t])
VMOVUPS(ymm_seq_b, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_seq_a, ymm_seq_b)
ymm_wr, ymm_wi = ymm_seq_a, ymm_seq_b
fft.two_real_to_two_complex_soa_perm_planar.fft8_within_rows_postprocess(ymm_wr, ymm_wi)
ymm_xhr, ymm_xhi = ymm_wr, ymm_wi
VMOVUPS([reg_f], ymm_xhr)
VMOVUPS([reg_f + YMMRegister.size], ymm_xhi)
RETURN()
with Function("nnp_fft16_dualreal__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_seq_a = YMMRegister(), YMMRegister()
ymm_seq_b = YMMRegister(), YMMRegister()
for i, ymm_a in enumerate(ymm_seq_a + ymm_seq_b):
VMOVUPS(ymm_a, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft16_within_rows(ymm_seq_a, ymm_seq_b)
ymm_wr, ymm_wi = ymm_seq_a, ymm_seq_b
fft.two_real_to_two_complex_soa_perm_planar.fft16_within_rows_postprocess(ymm_wr, ymm_wi)
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_w)
RETURN()
|
import fft.complex_soa_perm_to_real
from common import butterfly, cos_npi_over_8, sqrt2_over_2
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
arg_f = Argument(ptr(const_float_), name="f")
arg_t = Argument(ptr(float_), name="t")
with Function("nnp_ifft8_8real__fma3",
(arg_f, arg_t),
target=uarch.default + isa.fma3):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_data = [YMMRegister() for _ in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_f + i * YMMRegister.size])
fft.complex_soa_perm_to_real.ifft8_across_rows(ymm_data)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_i)
RETURN()
import fft16x16
with Function("nnp_ifft16_8real__fma3",
(arg_f, arg_t),
target=uarch.default + isa.fma3):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t)
reg_stride = GeneralPurposeRegister64()
MOV(reg_stride, YMMRegister.size)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + 8 * YMMRegister.size])
fft16x16.inverse_vfft(reg_t0, reg_t8, reg_stride,
data_in=[yword[reg_f + YMMRegister.size * i] for i in range(16)])
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import winograd.o6x6k3x3
import block8x8
from common import _MM_SHUFFLE
for post_operation in ["store", "stream"]:
arg_d_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wd_pointer = Argument(ptr(float_), name="wd_pointer")
arg_d_stride = Argument(size_t, name="d_stride")
arg_wd_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
with Function("nnp_iwt8x8_3x3_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
(arg_d_pointer, arg_wd_pointer, arg_d_stride, arg_wd_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset),
target=uarch.default + isa.fma3 + isa.avx2):
reg_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_d, arg_d_pointer)
reg_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wd, arg_wd_pointer)
reg_stride_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_d, arg_d_stride)
reg_stride_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wd, arg_wd_stride)
reg_row_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_cnt, arg_row_count)
reg_col_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_cnt, arg_column_count)
reg_row_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_off, arg_row_offset)
reg_col_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_off, arg_column_offset)
ymm_data = [YMMRegister() for _ in range(8)]
block8x8.load_with_padding(ymm_data, reg_d, reg_stride_d, reg_row_off, reg_row_cnt, reg_col_off, reg_col_cnt)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
winograd.o6x6k3x3.transpose8x8(ymm_data)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_row in ymm_data:
VSTOREPS([reg_wd], ymm_row)
if ymm_row is not ymm_data[-1]:
ADD(reg_wd, reg_stride_wd)
RETURN()
for reverse_kernel in [False, True]:
for post_operation in ["store", "stream"]:
arg_g_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wg_pointer = Argument(ptr(float_), name="wd_pointer")
arg_g_stride = Argument(size_t, name="d_stride")
arg_wg_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
kwt_arguments = (arg_g_pointer, arg_wg_pointer, arg_g_stride, arg_wg_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_kwt8x8_3{reverse}x3{reverse}_and_{post_operation}__avx2".format(
reverse="R" if reverse_kernel else "", post_operation=post_operation),
kwt_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_g, arg_g_pointer)
reg_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wg, arg_wg_pointer)
reg_stride_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_g, arg_g_stride)
reg_stride_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wg, arg_wg_stride)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride_g, 2)
xmm_load_mask = XMMRegister()
VMOVAPS(xmm_load_mask.as_ymm, Constant.float32x8(-0.0, -0.0, -0.0, +0.0, +0.0, +0.0, +0.0, +0.0))
xmm_g = [XMMRegister() for _ in range(3)]
for xmm in xmm_g:
VMASKMOVPS(xmm, xmm_load_mask, [reg_g])
if xmm is not xmm_g[-1]:
ADD(reg_g, reg_stride_g)
if reverse_kernel:
xmm_g = xmm_g[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform([xmm.as_ymm for xmm in xmm_g], rescale_coefficients=False)
ymm_g_rows = winograd.o6x6k3x3.transpose8x3([ymm.as_xmm for ymm in ymm_wg_rows])
if reverse_kernel:
ymm_g_rows = ymm_g_rows[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform(ymm_g_rows, rescale_coefficients=False)
rcp_9 = float.fromhex("0x1.C71C72p-4")
rcp_81 = float.fromhex("0x1.948B10p-7")
rcp_90 = float.fromhex("0x1.6C16C2p-7")
rcp_180 = float.fromhex("0x1.6C16C2p-8")
rcp_810 = float.fromhex("0x1.43A274p-10")
rcp_1620 = float.fromhex("0x1.43A274p-11")
rcp_8100 = float.fromhex("0x1.02E85Cp-13")
rcp_16200 = float.fromhex("0x1.02E85Cp-14")
rcp_32400 = float.fromhex("0x1.02E85Cp-15")
ymm_edge_scale = YMMRegister()
VMOVAPS(ymm_edge_scale, Constant.float32x8( 1.0, -2.0 * rcp_9, -2.0 * rcp_9, rcp_90, rcp_90, rcp_180, rcp_180, 1.0))
VMULPS(ymm_wg_rows[0], ymm_wg_rows[0], ymm_edge_scale)
VMULPS(ymm_wg_rows[7], ymm_wg_rows[7], ymm_edge_scale)
ymm_row12_scale = YMMRegister()
VMOVAPS(ymm_row12_scale, Constant.float32x8(-2.0 * rcp_9, 4.0 * rcp_81, 4.0 * rcp_81, -2.0 * rcp_810, -2.0 * rcp_810, -2.0 * rcp_1620, -2.0 * rcp_1620, -2.0 * rcp_9))
VMULPS(ymm_wg_rows[1], ymm_wg_rows[1], ymm_row12_scale)
VMULPS(ymm_wg_rows[2], ymm_wg_rows[2], ymm_row12_scale)
ymm_row34_scale = YMMRegister()
VMOVAPS(ymm_row34_scale, Constant.float32x8( rcp_90, -2.0 * rcp_810, -2.0 * rcp_810, rcp_8100, rcp_8100, rcp_16200, rcp_16200, rcp_90))
VMULPS(ymm_wg_rows[3], ymm_wg_rows[3], ymm_row34_scale)
VMULPS(ymm_wg_rows[4], ymm_wg_rows[4], ymm_row34_scale)
ymm_row56_scale = YMMRegister()
VMOVAPS(ymm_row56_scale, Constant.float32x8( rcp_180, -2.0 * rcp_1620, -2.0 * rcp_1620, rcp_16200, rcp_16200, rcp_32400, rcp_32400, rcp_180))
VMULPS(ymm_wg_rows[5], ymm_wg_rows[5], ymm_row56_scale)
VMULPS(ymm_wg_rows[6], ymm_wg_rows[6], ymm_row56_scale)
# Write output with stride
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_wg_row in ymm_wg_rows:
VSTOREPS([reg_wg], ymm_wg_row)
if ymm_wg_row is not ymm_wg_rows[-1]:
ADD(reg_wg, reg_stride_wg)
RETURN()
arg_m_pointer = Argument(ptr(const_float_), name="m_pointer")
arg_s_pointer = Argument(ptr(float_), name="s_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_m_stride = Argument(size_t, name="m_stride")
arg_s_stride = Argument(size_t, name="s_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_bias, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count)
else:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count)
if with_offset:
# Note: the version with offset has offset arguments, but they are never used (assumed 0).
owt8x8_arguments += (arg_row_offset, arg_column_offset)
with Function("nnp_owt8x8_3x3{with_bias}{with_relu}__avx2".format(
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
owt8x8_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_m = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m, arg_m_pointer)
reg_s = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s, arg_s_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
xmm_bias = XMMRegister()
VINSERTPS(xmm_bias, xmm_bias, [reg_bias], 0b1101 | 1<<4)
reg_m_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m_stride, arg_m_stride)
reg_s_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s_stride, arg_s_stride)
reg_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_count, arg_row_count)
reg_column_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_count, arg_column_count)
ymm_m = [YMMRegister() for _ in range(8)]
for ymm in ymm_m:
if with_bias and ymm is ymm_m[1]:
VADDPS(ymm, xmm_bias.as_ymm, [reg_m])
else:
VMOVAPS(ymm, [reg_m])
if ymm is not ymm_m[-1]:
ADD(reg_m, reg_m_stride)
ymm_t = winograd.o6x6k3x3.output_transform(ymm_m)
ymm_tt = winograd.o6x6k3x3.transpose6x8(ymm_t)
ymm_s = winograd.o6x6k3x3.output_transform(ymm_tt)
block8x8.store_packed(ymm_s, reg_s, reg_s_stride, reg_row_count, reg_column_count, None, None, with_relu)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import block8x8
import fft.complex_soa
import fft.real_to_complex_soa_perm
import fft.complex_soa_perm_to_real
import fft.two_real_to_two_complex_soa_perm_planar
import fft.two_complex_soa_perm_to_two_real_planar
arg_t_pointer = Argument(ptr(const_float_), name="t_pointer")
arg_f_pointer = Argument(ptr(float_), name="f_pointer")
arg_x_pointer = Argument(ptr(const_float_), name="x_pointer")
arg_t_stride = Argument(size_t, name="t_stride")
arg_f_stride = Argument(size_t, name="f_stride")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_offset = Argument(uint32_t, name="column_offset")
arg_column_count = Argument(uint32_t, name="column_count")
for post_operation in ["stream", "store"]:
fft8x8_arguments = (arg_t_pointer, arg_f_pointer, arg_t_stride, arg_f_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_fft8x8_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
fft8x8_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t_pointer)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_inct = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_inct, arg_t_stride)
reg_incf = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_incf, arg_f_stride)
reg_row_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_cnt, arg_row_count)
reg_col_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_cnt, arg_column_count)
reg_row_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_off, arg_row_offset)
reg_col_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_off, arg_column_offset)
ymm_data = [YMMRegister(i) for i in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
block8x8.load_with_padding(ymm_data, reg_t, reg_inct, reg_row_off, reg_row_cnt, reg_col_off, reg_col_cnt)
fft.real_to_complex_soa_perm.fft8_across_rows(ymm_data)
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag)
fft.two_real_to_two_complex_soa_perm_planar.fft8_within_rows_postprocess(ymm_real[0], ymm_imag[0])
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_re, ymm_im in zip(ymm_real, ymm_imag):
VSTOREPS([reg_f], ymm_re)
VSTOREPS([reg_f + YMMRegister.size], ymm_im)
if ymm_re is not ymm_real[-1]:
ADD(reg_f, reg_incf)
RETURN()
arg_f_pointer = Argument(ptr(const_float_), name="f_pointer")
arg_t_pointer = Argument(ptr(float_), name="t_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_f_stride = Argument(size_t, name="f_stride")
arg_t_stride = Argument(size_t, name="t_stride")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_offset = Argument(uint32_t, name="column_offset")
arg_column_count = Argument(uint32_t, name="column_count")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
ifft8x8_arguments = (arg_f_pointer, arg_t_pointer, arg_bias, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
else:
ifft8x8_arguments = (arg_f_pointer, arg_t_pointer, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
if with_offset:
ifft8x8_arguments += arg_row_offset, arg_column_offset
with Function("nnp_ifft8x8{with_offset}{with_bias}{with_relu}__avx2".format(
with_offset="_with_offset" if with_offset else "",
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
ifft8x8_arguments,
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_count, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
if with_offset:
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
else:
reg_row_start = None
reg_column_start = None
ymm_data = [YMMRegister(i) for i in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
if with_bias:
ymm_bias = YMMRegister()
VMOVSS(ymm_bias.as_xmm, [reg_bias])
for ymm_re, ymm_im in zip(ymm_real, ymm_imag):
VMOVAPS(ymm_re, [reg_f])
VMOVAPS(ymm_im, [reg_f + YMMRegister.size])
if with_bias and ymm_re is ymm_real[0]:
VFMADD231PS(ymm_re, ymm_bias, Constant.float32x8(64.0))
if ymm_im is not ymm_imag[-1]:
ADD(reg_f, reg_f_stride)
fft.two_complex_soa_perm_to_two_real_planar.ifft8_within_rows_preprocess(ymm_real[0], ymm_imag[0])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag, transformation="inverse")
fft.complex_soa_perm_to_real.ifft8_across_rows(ymm_data)
block8x8.store_packed(ymm_data, reg_t, reg_t_stride, reg_row_count, reg_column_end, reg_row_start, reg_column_start, with_relu)
RETURN()
|
import fft.complex_soa
import fft.two_complex_soa_perm_to_two_real_planar
arg_f = Argument(ptr(const_float_), name="f")
arg_t = Argument(ptr(float_), name="t")
with Function("nnp_ifft8_dualreal__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_xhr, ymm_xhi = YMMRegister(), YMMRegister()
VMOVUPS(ymm_xhr, [reg_f])
VMOVUPS(ymm_xhi, [reg_f + YMMRegister.size])
fft.two_complex_soa_perm_to_two_real_planar.ifft8_within_rows_preprocess(ymm_xhr, ymm_xhi)
ymm_wr, ymm_wi = ymm_xhr, ymm_xhi
fft.complex_soa.fft8_within_rows(ymm_wr, ymm_wi, transformation="inverse")
ymm_seq_a, ymm_seq_b = ymm_wr, ymm_wi
VMOVUPS([reg_t], ymm_seq_a)
VMOVUPS([reg_t + YMMRegister.size], ymm_seq_b)
RETURN()
with Function("nnp_ifft16_dualreal__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_wr = YMMRegister(), YMMRegister()
ymm_wi = YMMRegister(), YMMRegister()
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS(ymm_w, [reg_f + i * YMMRegister.size])
fft.two_complex_soa_perm_to_two_real_planar.ifft16_within_rows_preprocess(ymm_wr, ymm_wi)
fft.complex_soa.ifft16_within_rows(ymm_wr, ymm_wi)
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_w)
RETURN()
|
import fft.real_to_complex_soa_perm
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft8_8real__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_data = [YMMRegister() for _ in range(8)]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_t + i * YMMRegister.size])
fft.real_to_complex_soa_perm.fft8_across_rows(ymm_data)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_i)
RETURN()
import fft16x16
with Function("nnp_fft16_8real__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_stride = GeneralPurposeRegister64()
MOV(reg_stride, YMMRegister.size)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + 8 * YMMRegister.size])
fft16x16.forward_vfft(reg_t0, reg_t8, reg_stride,
data_out=[yword[reg_f + YMMRegister.size * i] for i in range(16)])
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
def load_with_padding(ymm_data, reg_data, reg_stride, reg_row_offset, reg_row_count, reg_column_offset, reg_column_count):
assert isinstance(ymm_data, list) and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_data)
assert isinstance(reg_data, GeneralPurposeRegister64)
assert isinstance(reg_stride, GeneralPurposeRegister64)
assert isinstance(reg_row_offset, GeneralPurposeRegister32)
assert isinstance(reg_row_count, GeneralPurposeRegister32)
assert isinstance(reg_column_offset, GeneralPurposeRegister32)
assert isinstance(reg_column_count, GeneralPurposeRegister32)
reg_column_end = GeneralPurposeRegister64()
LEA(reg_column_end, [reg_column_offset.as_qword + reg_column_count.as_qword * 1])
ymm_before_column_end_mask = YMMRegister()
VMOVD(ymm_before_column_end_mask.as_xmm, reg_column_end.as_dword)
ymm_before_column_start_mask = YMMRegister()
VMOVD(ymm_before_column_start_mask.as_xmm, reg_column_offset.as_dword)
ymm_column_index_mask = YMMRegister()
VMOVAPD(ymm_column_index_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPBROADCASTD(ymm_before_column_end_mask, ymm_before_column_end_mask.as_xmm)
VPCMPGTD(ymm_before_column_end_mask, ymm_before_column_end_mask, ymm_column_index_mask)
VPBROADCASTD(ymm_before_column_start_mask, ymm_before_column_start_mask.as_xmm)
VPCMPGTD(ymm_before_column_start_mask, ymm_before_column_start_mask, ymm_column_index_mask)
ymm_load_mask = YMMRegister()
VPANDN(ymm_load_mask, ymm_before_column_start_mask, ymm_before_column_end_mask)
# Multiply by sizeof(float) to get offset in bytes
SHL(reg_column_offset, 2)
# data points to the first element, which is loaded into lane `reg_column_offset`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SUB(reg_data, reg_column_offset.as_qword)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride, 2)
# Zero all elements. Rows which are not loaded are initialized here.
for ymm_row in ymm_data:
VXORPS(ymm_row, ymm_row, ymm_row)
with Block() as load_rows:
for i, ymm_row in enumerate(ymm_data):
with Block() as load_row:
CMP(reg_row_offset, i)
JA(load_row.end)
VMASKMOVPS(ymm_row, ymm_load_mask, [reg_data])
if i + 1 != len(ymm_data):
ADD(reg_data, reg_stride)
SUB(reg_row_count, 1)
JZ(load_rows.end)
def store_packed(ymm_data, reg_data, reg_stride, reg_row_count, reg_column_end, reg_row_offset=None, reg_column_start=None, relu=False):
assert isinstance(ymm_data, list) and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_data)
assert isinstance(reg_data, GeneralPurposeRegister64)
assert isinstance(reg_stride, GeneralPurposeRegister64)
assert isinstance(reg_row_count, GeneralPurposeRegister32)
assert isinstance(reg_column_end, GeneralPurposeRegister32)
assert reg_row_offset is None or isinstance(reg_row_offset, GeneralPurposeRegister32)
assert reg_column_start is None or isinstance(reg_column_start, GeneralPurposeRegister32)
if reg_column_start is None:
ymm_store_mask = YMMRegister()
VMOVD(ymm_store_mask.as_xmm, reg_column_end)
VPBROADCASTD(ymm_store_mask, ymm_store_mask.as_xmm)
VPCMPGTD(ymm_store_mask, ymm_store_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
else:
ymm_before_column_end_mask = YMMRegister()
VMOVD(ymm_before_column_end_mask.as_xmm, reg_column_end)
ymm_before_column_start_mask = YMMRegister()
VMOVD(ymm_before_column_start_mask.as_xmm, reg_column_start)
SHL(reg_column_start, 2)
SUB(reg_data, reg_column_start.as_qword)
ymm_column_index_mask = YMMRegister()
VMOVDQA(ymm_column_index_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPBROADCASTD(ymm_before_column_end_mask, ymm_before_column_end_mask.as_xmm)
VPCMPGTD(ymm_before_column_end_mask, ymm_before_column_end_mask, ymm_column_index_mask)
VPBROADCASTD(ymm_before_column_start_mask, ymm_before_column_start_mask.as_xmm)
VPCMPGTD(ymm_before_column_start_mask, ymm_before_column_start_mask, ymm_column_index_mask)
ymm_store_mask = YMMRegister()
VPANDN(ymm_store_mask, ymm_before_column_start_mask, ymm_before_column_end_mask)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride, 2)
if relu:
ymm_zero = YMMRegister()
VMOVAPS(ymm_zero, Constant.float32x8(-0.0))
with Block() as store_rows:
for i, ymm_row in enumerate(ymm_data):
with Block() as store_row:
if reg_row_offset is not None:
CMP(reg_row_offset, i)
JA(store_row.end)
if relu:
VMAXPS(ymm_row, ymm_zero, ymm_row)
VMASKMOVPS([reg_data], ymm_store_mask, ymm_row)
if ymm_row is not ymm_data[-1]:
ADD(reg_data, reg_stride)
SUB(reg_row_count, 1)
JZ(store_rows.end)
|
import winograd.o6x6k3x3
arg_d_pointer = Argument(ptr(const_float_), name="d")
arg_w_pointer = Argument(ptr(float_), name="w")
with Function("nnp_iwt_f6k3__fma3", (arg_d_pointer, arg_w_pointer),
target=uarch.default + isa.fma3):
reg_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_d, arg_d_pointer)
reg_w = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_w, arg_w_pointer)
ymm_data = [YMMRegister() for _ in range(8)]
for i, ymm_row in enumerate(ymm_data):
VMOVUPS(ymm_row, [reg_d + i * YMMRegister.size])
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
for i, ymm_row in enumerate(ymm_data):
VMOVUPS([reg_w + i * YMMRegister.size], ymm_row)
RETURN()
arg_g_pointer = Argument(ptr(const_float_), name="g")
arg_w_pointer = Argument(ptr(float_), name="w")
with Function("nnp_kwt_f6k3__fma3", (arg_g_pointer, arg_w_pointer),
target=uarch.default + isa.fma3):
reg_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_g, arg_g_pointer)
reg_w = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_w, arg_w_pointer)
ymm_data = [YMMRegister() for _ in range(3)]
for i, ymm_row in enumerate(ymm_data):
VMOVUPS(ymm_row, [reg_g + i * YMMRegister.size])
ymm_data = winograd.o6x6k3x3.kernel_transform(ymm_data)
for i, ymm_row in enumerate(ymm_data):
VMOVUPS([reg_w + i * YMMRegister.size], ymm_row)
RETURN()
arg_m_pointer = Argument(ptr(const_float_), name="m")
arg_s_pointer = Argument(ptr(float_), name="s")
with Function("nnp_owt_f6k3__fma3", (arg_m_pointer, arg_s_pointer),
target=uarch.default + isa.fma3):
reg_m = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m, arg_m_pointer)
reg_s = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s, arg_s_pointer)
ymm_m = [YMMRegister() for _ in range(8)]
for i, ymm_row in enumerate(ymm_m):
VMOVUPS(ymm_row, [reg_m + i * YMMRegister.size])
ymm_s = winograd.o6x6k3x3.output_transform(ymm_m)
for i, ymm_row in enumerate(ymm_s):
VMOVUPS([reg_s + i * YMMRegister.size], ymm_row)
RETURN()
|
from common import _MM_SHUFFLE
arg_src_pointer = Argument(ptr(const_float_), name="src_pointer")
arg_dst_pointer = Argument(ptr(float_), name="dst_pointer")
arg_src_stride = Argument(size_t, name="src_stride")
arg_src_row_offset = Argument(uint32_t, name="src_row_offset")
arg_src_row_count = Argument(uint32_t, name="src_row_count")
arg_src_column_offset = Argument(uint32_t, name="src_column_offset")
arg_src_column_count = Argument(uint32_t, name="src_column_count")
arg_dst_column_count = Argument(uint32_t, name="dst_column_count")
with Function("nnp_maxpool_2x2_2x2__avx2",
(arg_src_pointer, arg_dst_pointer, arg_src_stride,
arg_src_row_offset, arg_src_row_count, arg_src_column_offset, arg_src_column_count,
arg_dst_column_count),
target=uarch.default + isa.fma3 + isa.avx2):
reg_src_ptr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_src_ptr, arg_src_pointer)
reg_dst_ptr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_dst_ptr, arg_dst_pointer)
reg_src_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_src_stride, arg_src_stride)
reg_src_row_index = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_row_index, arg_src_row_offset)
reg_src_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_row_count, arg_src_row_count)
reg_src_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_column_start, arg_src_column_offset)
reg_src_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_column_end, arg_src_column_count)
ADD(reg_src_column_end, reg_src_column_start)
reg_dst_column_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_dst_column_count, arg_dst_column_count)
ymm_src_column_start, ymm_src_column_end, ymm_dst_column_count = YMMRegister(), YMMRegister(), YMMRegister()
VMOVD(ymm_src_column_start.as_xmm, reg_src_column_start)
VMOVD(ymm_src_column_end.as_xmm, reg_src_column_end)
VMOVD(ymm_dst_column_count.as_xmm, reg_dst_column_count)
VPBROADCASTD(ymm_src_column_start, ymm_src_column_start.as_xmm)
VPBROADCASTD(ymm_src_column_end, ymm_src_column_end.as_xmm)
VPBROADCASTD(ymm_dst_column_count, ymm_dst_column_count.as_xmm)
ymm_column_01234567, ymm_column_89ABCDEF = YMMRegister(), YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_src_column_start_gt_01234567, ymm_src_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_src_column_start_gt_01234567, ymm_src_column_start, ymm_column_01234567)
VPCMPGTD(ymm_src_column_end_gt_01234567, ymm_src_column_end, ymm_column_01234567)
ymm_src_column_start_gt_89ABCDEF, ymm_src_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_src_column_start_gt_89ABCDEF, ymm_src_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_src_column_end_gt_89ABCDEF, ymm_src_column_end, ymm_column_89ABCDEF)
ymm_src_mask_columns_0_to_8, ymm_src_mask_columns_8_to_16 = YMMRegister(), YMMRegister()
VPANDN(ymm_src_mask_columns_0_to_8, ymm_src_column_start_gt_01234567, ymm_src_column_end_gt_01234567)
VPANDN(ymm_src_mask_columns_8_to_16, ymm_src_column_start_gt_89ABCDEF, ymm_src_column_end_gt_89ABCDEF)
ymm_dst_mask_columns_0_to_8 = YMMRegister()
VPCMPGTD(ymm_dst_mask_columns_0_to_8, ymm_dst_column_count, ymm_column_01234567)
# data points to the first element, which is loaded into lane `reg_column_start`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SHL(reg_src_column_start, 2)
SUB(reg_src_ptr, reg_src_column_start.as_qword)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_src_stride, 2)
ymm_row0 = YMMRegister(), YMMRegister()
ymm_row1 = YMMRegister(), YMMRegister()
ymm_minus_inf = YMMRegister()
VMOVAPS(ymm_minus_inf, Constant.float32x8(-float("inf")))
VMOVAPS(ymm_row0[0], ymm_minus_inf)
VMOVAPS(ymm_row0[1], ymm_minus_inf)
VMOVAPS(ymm_row1[0], ymm_minus_inf)
VMOVAPS(ymm_row1[1], ymm_minus_inf)
NEG(reg_src_row_index)
with Block() as load_row0:
CMP(reg_src_row_index, reg_src_row_count)
JAE(load_row0.end)
VMASKMOVPS(ymm_row0[0], ymm_src_mask_columns_0_to_8, [reg_src_ptr])
VBLENDVPS(ymm_row0[0], ymm_minus_inf, ymm_row0[0], ymm_src_mask_columns_0_to_8)
VMASKMOVPS(ymm_row0[1], ymm_src_mask_columns_8_to_16, [reg_src_ptr + YMMRegister.size])
VBLENDVPS(ymm_row0[1], ymm_minus_inf, ymm_row0[1], ymm_src_mask_columns_8_to_16)
ADD(reg_src_ptr, reg_src_stride)
with Block() as load_row1:
INC(reg_src_row_index)
CMP(reg_src_row_index, reg_src_row_count)
JAE(load_row1.end)
VMASKMOVPS(ymm_row1[0], ymm_src_mask_columns_0_to_8, [reg_src_ptr])
VBLENDVPS(ymm_row1[0], ymm_minus_inf, ymm_row1[0], ymm_src_mask_columns_0_to_8)
VMASKMOVPS(ymm_row1[1], ymm_src_mask_columns_8_to_16, [reg_src_ptr + YMMRegister.size])
VBLENDVPS(ymm_row1[1], ymm_minus_inf, ymm_row1[1], ymm_src_mask_columns_8_to_16)
# ymm_row[0] = ( x7 x6 x5 x4 x3 x2 x1 x0 )
# ymm_row[1] = ( x15 x14 x13 x12 x11 x10 x9 x8 )
ymm_row = YMMRegister(), YMMRegister()
VMAXPS(ymm_row[0], ymm_row0[0], ymm_row1[0])
VMAXPS(ymm_row[1], ymm_row0[1], ymm_row1[1])
# ymm_row[0] = ( x14 x12 x6 x4 x10 x8 x2 x0 )
# ymm_row[1] = ( x15 x13 x7 x5 x11 x9 x3 x1 )
ymm_tmp = YMMRegister()
VSHUFPS(ymm_tmp, ymm_row[0], ymm_row[1], _MM_SHUFFLE(2, 0, 2, 0))
VSHUFPS(ymm_row[1], ymm_row[0], ymm_row[1], _MM_SHUFFLE(3, 1, 3, 1))
SWAP.REGISTERS(ymm_row[0], ymm_tmp)
# ymm_out = ( y7 y6 y3 y2 y5 y4 y1 y0 )
ymm_out = YMMRegister()
VMAXPS(ymm_out, ymm_row[0], ymm_row[1])
VPERMPD(ymm_out, ymm_out, _MM_SHUFFLE(3, 1, 2, 0))
VMASKMOVPS([reg_dst_ptr], ymm_dst_mask_columns_0_to_8, ymm_out)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
sqrt2_over_2 = float.fromhex("0x1.6A09E6p-1")
cos_1pi_over_8 = float.fromhex("0x1.D906BCp-1")
cos_3pi_over_8 = float.fromhex("0x1.87DE2Ap-2")
tan_1pi_over_8 = float.fromhex("0x1.A8279Ap-2")
tan_3pi_over_8 = float.fromhex("0x1.3504F4p+1")
cos_npi_over_8 = [
1.0,
cos_1pi_over_8,
sqrt2_over_2,
cos_3pi_over_8,
0.0,
-cos_3pi_over_8,
-sqrt2_over_2,
-cos_1pi_over_8,
]
cos_npi_over_4 = [
1.0,
sqrt2_over_2,
0.0,
-sqrt2_over_2
]
sin_npi_over_8 = [
0.0,
cos_3pi_over_8,
sqrt2_over_2,
cos_1pi_over_8,
1.0,
cos_1pi_over_8,
sqrt2_over_2,
cos_3pi_over_8
]
sin_npi_over_4 = [
0.0,
sqrt2_over_2,
1.0,
sqrt2_over_2
]
def _MM_SHUFFLE(z, y, x, w):
assert z & ~0b11 == 0
assert y & ~0b11 == 0
assert x & ~0b11 == 0
assert w & ~0b11 == 0
return (z << 6) | (y << 4) | (x << 2) | w
def _MM_SHUFFLE2(x, y):
assert x & ~1 == 0
assert y & ~1 == 0
return (x << 1) | y
def butterfly(a, b, negate_a=False, negate_b=False, scale_a=None, scale_b=None, negate_out_b=False, writeback=True):
assert isinstance(a, YMMRegister) or isinstance(a, LocalVariable) and a.size == YMMRegister.size
assert isinstance(b, YMMRegister) or isinstance(b, LocalVariable) and b.size == YMMRegister.size
assert isinstance(negate_a, bool)
assert isinstance(negate_b, bool)
assert isinstance(negate_out_b, bool)
assert scale_b is None or \
isinstance(scale_b, YMMRegister) or \
isinstance(scale_b, (LocalVariable, Constant)) and scale_b.size == YMMRegister.size
assert scale_a is None or \
isinstance(scale_a, YMMRegister) or \
isinstance(scale_a, (LocalVariable, Constant)) and scale_a.size == YMMRegister.size
assert scale_a is None or scale_b is None
assert isinstance(writeback, bool)
assert not negate_out_b or not negate_a and not negate_b and scale_a is None and scale_b is None
ymm_a, ymm_b = a, b
if isinstance(a, LocalVariable):
ymm_a = YMMRegister()
VMOVAPS(ymm_a, a)
if isinstance(b, LocalVariable):
ymm_b = YMMRegister()
VMOVAPS(ymm_b, b)
if scale_b is None and scale_a is None:
assert not negate_a, "Negation of a is supported only in combination with scaling"
ymm_new_a = YMMRegister()
VADDPS(ymm_new_a, ymm_a, ymm_b)
ymm_new_b = YMMRegister()
if not negate_out_b:
VSUBPS(ymm_new_b, ymm_a, ymm_b)
else:
VSUBPS(ymm_new_b, ymm_b, ymm_a)
if not negate_b:
SWAP.REGISTERS(ymm_new_a, ymm_a)
SWAP.REGISTERS(ymm_new_b, ymm_b)
else:
SWAP.REGISTERS(ymm_new_a, ymm_b)
SWAP.REGISTERS(ymm_new_b, ymm_a)
elif scale_a is not None:
ymm_a_copy = YMMRegister()
VMOVAPS(ymm_a_copy, ymm_a)
if not negate_a and not negate_b:
VFMADD132PS(ymm_a, ymm_b, scale_a)
VFMSUB132PS(ymm_a_copy, ymm_b, scale_a)
elif not negate_a and negate_b:
VFMSUB132PS(ymm_a, ymm_b, scale_a)
VFMADD132PS(ymm_a_copy, ymm_b, scale_a)
elif negate_a and not negate_b:
VFMMADD132PS(ymm_a, ymm_b, scale_a)
VFNMSUB132PS(ymm_a_copy, ymm_b, scale_a)
elif negate_a and negate_b:
VFNMSUB132PS(ymm_a, ymm_b, scale_a)
VFNMADD132PS(ymm_a_copy, ymm_b, scale_a)
SWAP.REGISTERS(ymm_b, ymm_a_copy)
elif scale_b is not None:
ymm_a_copy = YMMRegister()
VMOVAPS(ymm_a_copy, ymm_a)
if not negate_a and not negate_b:
VFMADD231PS(ymm_a, ymm_b, scale_b)
VFNMADD231PS(ymm_a_copy, ymm_b, scale_b)
elif not negate_a and negate_b:
VFNMADD231PS(ymm_a, ymm_b, scale_b)
VFMADD231PS(ymm_a_copy, ymm_b, scale_b)
elif negate_a and not negate_b:
VFMSUB231PS(ymm_a, ymm_b, scale_b)
VFNMSUB231PS(ymm_a_copy, ymm_b, scale_b)
elif negate_a and negate_b:
VFNMSUB231PS(ymm_a, ymm_b, scale_b)
VFMSUB231PS(ymm_a_copy, ymm_b, scale_b)
SWAP.REGISTERS(ymm_b, ymm_a_copy)
if writeback and isinstance(a, LocalVariable):
VMOVAPS(a, ymm_a)
if writeback and isinstance(b, LocalVariable):
VMOVAPS(b, ymm_b)
return ymm_a, ymm_b
def transpose2x2x128(ymm_a, ymm_b, use_blend=True):
# ymm_a = (a.lo, a.hi)
# ymm_b = (b.lo, b.hi)
if use_blend:
# ymm_ab = (a.hi, b.lo)
ymm_ab = YMMRegister()
VPERM2F128(ymm_ab, ymm_a, ymm_b, 0x21)
# ymm_a = (a.lo, b.lo)
VBLENDPS(ymm_a, ymm_a, ymm_ab, 0xF0)
# ymm_b = (a.hi, b.hi)
VBLENDPS(ymm_b, ymm_b, ymm_ab, 0x0F)
else:
# ymm_new_a = (a.lo, b.lo)
ymm_new_a = YMMRegister()
VINSERTF128(ymm_new_a, ymm_a, ymm_b.as_xmm, 1)
# ymm_new_b = (a.hi, b.hi)
ymm_new_b = YMMRegister()
VPERM2F128(ymm_new_b, ymm_a, ymm_b, 0x31)
SWAP.REGISTERS(ymm_a, ymm_new_a)
SWAP.REGISTERS(ymm_b, ymm_new_b)
def transpose2x2x2x64(ymm_a, ymm_b, use_blend=True):
# ymm_a = (a0, a1, a2, a3)
# ymm_b = (b0, b1, a2, b3)
if use_blend:
# ymm_ab = (a1, b0, a3, b2)
ymm_ab = YMMRegister()
VSHUFPD(ymm_ab, ymm_a, ymm_b, 0b0101)
# ymm_a = (a0, b0, a2, b2)
VBLENDPS(ymm_a, ymm_a, ymm_ab, 0b11001100)
# ymm_b = (a1, b1, a3, b3)
VBLENDPS(ymm_b, ymm_b, ymm_ab, 0b00110011)
else:
# ymm_new_a = (a0, b0, a2, b2)
ymm_new_a = YMMRegister()
VUNPCKLPD(ymm_new_a, ymm_a, ymm_b)
# ymm_new_b = (a1, b1, a3, b3)
ymm_new_b = YMMRegister()
VUNPCKHPD(ymm_new_b, ymm_a, ymm_b)
SWAP.REGISTERS(ymm_a, ymm_new_a)
SWAP.REGISTERS(ymm_b, ymm_new_b)
def compute_masks(masks, reg_column_offset, reg_column_count):
assert isinstance(masks, list) and all(isinstance(mask, (YMMRegister, LocalVariable)) for mask in masks)
assert isinstance(reg_column_offset, GeneralPurposeRegister64)
assert isinstance(reg_column_count, GeneralPurposeRegister64)
def interleave(sequence_a, sequence_b):
assert isinstance(sequence_a, list) and isinstance(sequence_b, list) or isinstance(sequence_a, tuple) and isinstance(sequence_b, tuple)
if isinstance(sequence_a, list):
return list(sum(zip(sequence_a, sequence_b), ()))
else:
return sum(zip(sequence_a, sequence_b), ())
|
arg_input = Argument(ptr(const_float_), "input")
arg_output = Argument(ptr(float_), "output")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_relu__avx2",
(arg_input, arg_output, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_output = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output, arg_output)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Stream (aligned!) data to memory and update output pointer
VMOVNTPS([reg_output], ymm_data)
ADD(reg_output, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_data = Argument(ptr(float_), "data")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_inplace_relu__avx2",
(arg_data, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_data = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_data, arg_data)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load data
ymm_data = YMMRegister()
VMOVAPS(ymm_data, [reg_data])
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Store data back to the same location and update pointer
VMOVAPS([reg_data], ymm_data)
ADD(reg_data, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_output_gradient = Argument(ptr(const_float_), "output_gradient")
arg_input = Argument(ptr(const_float_), "input")
arg_input_gradient = Argument(ptr(float_), "input_gradient")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_grad_relu__avx2",
(arg_output_gradient, arg_input, arg_input_gradient, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_output_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output_gradient, arg_output_gradient)
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_input_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_gradient, arg_input_gradient)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) gradient and update output gradient pointer
ymm_gradient = YMMRegister()
VMOVUPS(ymm_gradient, [reg_output_gradient])
ADD(reg_output_gradient, YMMRegister.size)
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale gradient with negative slope (for negative inputs)
ymm_scaled_gradient = YMMRegister()
VMULPS(ymm_scaled_gradient, ymm_gradient, ymm_negative_slope)
# Select scaled gradient if input is negative
VBLENDVPS(ymm_gradient, ymm_gradient, ymm_scaled_gradient, ymm_data)
# Stream (aligned!) gradient to memory and update input gradient pointer
VMOVAPS([reg_input_gradient], ymm_gradient)
ADD(reg_input_gradient, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
log2e = float.fromhex("+0x1.715476p+3")
magic_bias = float.fromhex("+0x1.800000p+23")
zero_cutoff = float.fromhex("-0x1.9FE368p+6")
inf_cutoff = float.fromhex("+0x1.62E42Ep+6")
minus_ln2_hi = float.fromhex("-0x1.62E430p-4")
minus_ln2_lo = float.fromhex("+0x1.05C610p-32")
plus_inf = float("inf")
c2 = float.fromhex("0x1.00088Ap-1")
c3 = float.fromhex("0x1.555A86p-3")
t0 = float.fromhex("0x1.000000p+0")
t1 = float.fromhex("0x1.172B84p+0")
t2 = float.fromhex("0x1.306FE0p+0")
t3 = float.fromhex("0x1.4BFDAEp+0")
t4 = float.fromhex("0x1.6A09E6p+0")
t5 = float.fromhex("0x1.8ACE54p+0")
t6 = float.fromhex("0x1.AE89FAp+0")
t7 = float.fromhex("0x1.D5818Ep+0")
min_exponent = (-126 << 23) & 0xFFFFFFFF
max_exponent = 127 << 23
default_exponent = 0x3F800000
mantissa_mask = 0x007FFFF8
x_arg = Argument(m256, "x")
with Function("_mm256_exp_ps", (x_arg,), m256,
target=uarch.default + isa.fma3 + isa.avx2):
ymm_x = YMMRegister()
LOAD.ARGUMENT(ymm_x, x_arg)
ymm_magic_bias = YMMRegister()
VMOVAPS(ymm_magic_bias, Constant.float32x8(magic_bias))
ymm_t = YMMRegister()
VMOVAPS(ymm_t, ymm_x)
VFMADD132PS(ymm_t, ymm_magic_bias, Constant.float32x8(log2e))
ymm_e1, ymm_e2 = YMMRegister(), YMMRegister()
VPAND(ymm_e2, ymm_t, Constant.uint32x8(mantissa_mask))
VPSLLD(ymm_e2, ymm_e2, 20)
ymm_tf = YMMRegister()
VPERMPS(ymm_tf, ymm_t, Constant.float32x8(t0, t1, t2, t3, t4, t5, t6, t7))
VSUBPS(ymm_t, ymm_t, ymm_magic_bias)
# rx = fma(t, minus_ln2_lo, fma(t, minus_ln2_hi, x))
# rx := t * minus_ln2_hi + x
# rx := t * minus_ln2_lo + rx
ymm_rx = YMMRegister()
VMOVAPS(ymm_rx, ymm_x)
VFMADD231PS(ymm_rx, ymm_t, Constant.float32x8(minus_ln2_hi))
VFMADD231PS(ymm_rx, ymm_t, Constant.float32x8(minus_ln2_lo))
VPMAXSD(ymm_e1, ymm_e2, Constant.uint32x8(min_exponent))
VPMINSD(ymm_e1, ymm_e1, Constant.uint32x8(max_exponent))
ymm_default_exponent = YMMRegister()
VMOVDQA(ymm_default_exponent, Constant.uint32x8(default_exponent))
VPSUBD(ymm_e2, ymm_e2, ymm_e1)
VPADDD(ymm_e1, ymm_e1, ymm_default_exponent)
VPADDD(ymm_e2, ymm_e2, ymm_default_exponent)
# rf = fma(rx, rx * fma(rx, c3, c2), rx)
# rf := rx * c3 + c2
# rf := rx * rf
# rf := rx * rf + rx
ymm_rf = YMMRegister()
VMOVAPS(ymm_rf, Constant.float32x8(c2))
VFMADD231PS(ymm_rf, ymm_rx, Constant.float32x8(c3))
VMULPS(ymm_rf, ymm_rf, ymm_rx)
VFMADD213PS(ymm_rf, ymm_rx, ymm_rx)
# f = fma(tf, rf, tf)
VFMADD231PS(ymm_tf, ymm_tf, ymm_rf)
ymm_f = ymm_tf
VMULPS(ymm_f, ymm_f, ymm_e1)
VMULPS(ymm_f, ymm_f, ymm_e2)
RETURN(ymm_f)
|
from common import _MM_SHUFFLE
from vecmath.exp import simd_exp
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
with Function("max__avx", (arg_n, arg_v), float_,
target=uarch.default + isa.avx):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 4
# Initialize reduction registers with the first element (v[0])
ymm_ms = [YMMRegister() for _ in range(unroll_factor)]
VBROADCASTSS(ymm_ms[0], [reg_v])
for ymm_m in ymm_ms[1:]:
VMOVAPS(ymm_m, ymm_ms[0])
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
for i, ymm_m in enumerate(ymm_ms):
VMAXPS(ymm_m, ymm_m, [reg_v + i * YMMRegister.size])
SUB(reg_v, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
VMAXPS(ymm_ms[0], ymm_ms[0], ymm_ms[1])
VMAXPS(ymm_ms[2], ymm_ms[2], ymm_ms[3])
VMAXPS(ymm_ms[0], ymm_ms[0], ymm_ms[2])
ymm_m = ymm_ms[0]
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
VMAXPS(ymm_m, ymm_m, [reg_v])
ADD(reg_v, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
reg_mask = GeneralPurposeRegister64()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
NEG(reg_n)
LEA(reg_mask, [reg_mask + reg_n * 4 + 16])
ymm_mask = YMMRegister()
VMOVUPS(ymm_mask, [reg_mask])
ymm_temp = YMMRegister()
VMASKMOVPS(ymm_temp, ymm_mask, [reg_v])
VBLENDVPS(ymm_temp, ymm_temp, ymm_m, ymm_mask)
VMAXPS(ymm_m, ymm_m, ymm_temp)
ymm_temp = YMMRegister()
VPERM2F128(ymm_temp, ymm_m, ymm_m, 0x01)
VMAXPS(ymm_m, ymm_m, ymm_temp)
VPERMILPS(ymm_temp, ymm_m, _MM_SHUFFLE(1, 0, 3, 2))
VMAXPS(ymm_m, ymm_m, ymm_temp)
VPERMILPS(ymm_temp, ymm_m, _MM_SHUFFLE(2, 3, 0, 1))
VMAXPS(ymm_m, ymm_m, ymm_temp)
RETURN(ymm_m.as_xmm)
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
arg_c = Argument(float_, "c")
with Function("sum_exp_minus_c__avx2", (arg_n, arg_v, arg_c), float_,
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 3
# Clear reduction registers
ymm_sums = [YMMRegister() for _ in range(unroll_factor)]
for ymm_sum in ymm_sums:
VXORPS(ymm_sum.as_xmm, ymm_sum.as_xmm, ymm_sum.as_xmm)
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
ymm_xs = [YMMRegister() for _ in ymm_sums]
for i, ymm_x in enumerate(ymm_xs):
VMOVUPS(ymm_x, [reg_v + i * YMMRegister.size])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_ys = simd_exp(ymm_xs)
for ymm_sum, ymm_y in zip(ymm_sums, ymm_ys):
VADDPS(ymm_sum, ymm_sum, ymm_y)
SUB(reg_v, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
VADDPS(ymm_sums[0], ymm_sums[0], ymm_sums[1])
VADDPS(ymm_sums[0], ymm_sums[0], ymm_sums[2])
ymm_sum = ymm_sums[0]
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_v])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VADDPS(ymm_sum, ymm_sum, ymm_y)
ADD(reg_v, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_v])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VANDPS(ymm_y, ymm_y, ymm_mask)
VADDPS(ymm_sum, ymm_sum, ymm_y)
ymm_temp = YMMRegister()
VPERM2F128(ymm_temp, ymm_sum, ymm_sum, 0x01)
VADDPS(ymm_sum, ymm_sum, ymm_temp)
VPERMILPS(ymm_temp, ymm_sum, _MM_SHUFFLE(1, 0, 3, 2))
VADDPS(ymm_sum, ymm_sum, ymm_temp)
VPERMILPS(ymm_temp, ymm_sum, _MM_SHUFFLE(2, 3, 0, 1))
VADDPS(ymm_sum, ymm_sum, ymm_temp)
RETURN(ymm_sum.as_xmm)
def scaled_exp_minus_c(reg_n, reg_x, reg_y, ymm_scale, ymm_c):
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 3
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
ymm_xs = [YMMRegister() for _ in range(unroll_factor)]
for i, ymm_x in enumerate(ymm_xs):
VMOVUPS(ymm_x, [reg_x + i * YMMRegister.size])
VSUBPS(ymm_x, ymm_x, ymm_c)
if reg_x != reg_y:
SUB(reg_x, -unroll_factor * YMMRegister.size)
ymm_ys = simd_exp(ymm_xs)
for i, ymm_y in enumerate(ymm_ys):
VMULPS(ymm_y, ymm_y, ymm_scale)
VMOVUPS([reg_y + i * YMMRegister.size], ymm_y)
SUB(reg_y, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
if reg_x != reg_y:
ADD(reg_x, YMMRegister.size)
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VMULPS(ymm_y, ymm_y, ymm_scale)
VMOVUPS([reg_y], ymm_y)
ADD(reg_y, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_x])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VMULPS(ymm_y, ymm_y, ymm_scale)
VMASKMOVPS([reg_y], ymm_mask, ymm_y)
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
arg_scale = Argument(float_, "scale")
arg_c = Argument(float_, "c")
with Function("inplace_scaled_exp_minus_c__avx2", (arg_n, arg_v, arg_scale, arg_c),
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
ymm_scale = YMMRegister()
LOAD.ARGUMENT(ymm_scale.as_xmm, arg_scale)
VBROADCASTSS(ymm_scale, ymm_scale.as_xmm)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
scaled_exp_minus_c(reg_n, reg_v, reg_v, ymm_scale, ymm_c)
RETURN()
arg_n = Argument(size_t, "n")
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(float_), "y")
arg_scale = Argument(float_, "scale")
arg_c = Argument(float_, "c")
with Function("scaled_exp_minus_c__avx2", (arg_n, arg_x, arg_y, arg_scale, arg_c),
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_y, arg_y)
ymm_scale = YMMRegister()
LOAD.ARGUMENT(ymm_scale.as_xmm, arg_scale)
VBROADCASTSS(ymm_scale, ymm_scale.as_xmm)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
scaled_exp_minus_c(reg_n, reg_x, reg_y, ymm_scale, ymm_c)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 3, 4
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s8gemm_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
ymm_c = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(mr)]
ymm_b_n = YMMRegister()
with Loop() as loop:
for m in range(mr):
VMOVAPS(ymm_a[m], [reg_a + m * YMMRegister.size])
SUB(reg_a, -mr * YMMRegister.size)
for n in range(nr):
VMOVAPS(ymm_b_n, [reg_b + n * YMMRegister.size])
for m in range(mr):
VFMADD231PS(ymm_c[m][n], ymm_a[m], ymm_b_n)
SUB(reg_b, -nr * YMMRegister.size)
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s8gemm_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(mr)]
ymm_b_n = YMMRegister()
with Loop() as loop:
with Block() as load_a:
for m in range(mr):
VMOVAPS(ymm_a[m], [reg_a])
ADD(reg_a, YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n in range(nr):
VMOVAPS(ymm_b_n, [reg_b])
ADD(reg_b, YMMRegister.size)
for m in range(mr):
VFMADD231PS(ymm_c[m][n], ymm_a[m], ymm_b_n)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 4
arg_input_channels = Argument(size_t, "input_channels")
arg_image_size = Argument(size_t, "image_size")
arg_input = Argument(ptr(const_float_), "input")
arg_kernel = Argument(ptr(const_float_), "kernel")
arg_output = Argument(ptr(float_), "output")
with Function("nnp_conv1x1_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_input_channels, arg_image_size, arg_input, arg_kernel, arg_output),
target=uarch.default + isa.fma3):
reg_input_channels = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_channels, arg_input_channels)
SHL(reg_input_channels, 2)
reg_image_size = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_image_size, arg_image_size)
SHL(reg_image_size, 2)
reg_inputs = [GeneralPurposeRegister64() for m in range(mr)]
LOAD.ARGUMENT(reg_inputs[0], arg_input)
for m in range(1, mr):
LEA(reg_inputs[m], [reg_inputs[m - 1] + reg_image_size * 1])
reg_kernel = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_kernel, arg_kernel)
reg_outputs = [GeneralPurposeRegister64() for n in range(nr)]
LOAD.ARGUMENT(reg_outputs[0], arg_output)
for n in range(1, nr):
LEA(reg_outputs[n], [reg_outputs[n - 1] + reg_image_size * 1])
ymm_kernel = [[YMMRegister() for n in range(nr)] for m in range(mr)]
for n in range(nr):
for m in range(mr):
VBROADCASTSS(ymm_kernel[m][n], [reg_kernel + m * float_.size])
if n + 1 != nr:
ADD(reg_kernel, reg_input_channels)
main_loop = Loop()
final_block = Block()
SUB(reg_image_size, YMMRegister.size)
JB(main_loop.end)
with main_loop:
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
for reg_output, ymm_output in zip(reg_outputs, ymm_outputs):
VMOVUPS(ymm_output, [reg_output])
for m, reg_input in enumerate(reg_inputs):
# Load vector for a channel of the input image
ymm_input = YMMRegister()
VMOVUPS(ymm_input, [reg_input])
ADD(reg_input, YMMRegister.size)
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if reg_input is reg_inputs[-1]:
VMOVUPS([reg_output], ymm_output)
ADD(reg_output, YMMRegister.size)
SUB(reg_image_size, YMMRegister.size)
JAE(main_loop.begin)
ADD(reg_image_size, YMMRegister.size)
JZ(final_block.end)
with final_block:
reg_mask, ymm_mask = GeneralPurposeRegister64(), YMMRegister()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
SUB(reg_mask, reg_image_size)
VMOVDQU(ymm_mask, [reg_mask + YMMRegister.size])
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
for reg_output, ymm_output in zip(reg_outputs, ymm_outputs):
VMASKMOVPS(ymm_output, ymm_mask, [reg_output])
for m, reg_input in enumerate(reg_inputs):
# Load vector for a channel of the input image
ymm_input = YMMRegister()
VMASKMOVPS(ymm_input, ymm_mask, [reg_input])
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if reg_input is reg_inputs[-1]:
VMASKMOVPS([reg_output], ymm_mask, ymm_output)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_input_channels = Argument(size_t, "input_channels")
arg_image_size = Argument(size_t, "image_size")
arg_input = Argument(ptr(const_float_), "input")
arg_kernel = Argument(ptr(const_float_), "kernel")
arg_output = Argument(ptr(float_), "output")
with Function("nnp_conv1x1_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_input_channels, arg_image_size, arg_input, arg_kernel, arg_output),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_input_channels = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_channels, arg_input_channels)
SHL(reg_input_channels, 2)
reg_image_size = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_image_size, arg_image_size)
SHL(reg_image_size, 2)
reg_inputs = [GeneralPurposeRegister64() for m in range(mr)]
LOAD.ARGUMENT(reg_inputs[0], arg_input)
for m in range(1, mr):
LEA(reg_inputs[m], [reg_inputs[m - 1] + reg_image_size * 1])
reg_kernel = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_kernel, arg_kernel)
reg_outputs = [GeneralPurposeRegister64() for n in range(nr)]
LOAD.ARGUMENT(reg_outputs[0], arg_output)
for n in range(1, nr):
LEA(reg_outputs[n], [reg_outputs[n - 1] + reg_image_size * 1])
VZEROALL()
ymm_inputs = [YMMRegister() for m in range(mr)]
ymm_kernel = [[YMMRegister() for n in range(nr)] for m in range(mr)]
with Block() as load_kernels:
for n in range(nr):
with Block() as load_kernels_row:
for m in range(mr):
VBROADCASTSS(ymm_kernel[m][n], [reg_kernel + m * float_.size])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_kernels_row.end)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_kernels.end)
ADD(reg_kernel, reg_input_channels)
main_loop = Loop()
final_block = Block()
SUB(reg_image_size, YMMRegister.size)
JB(main_loop.end)
with main_loop:
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
with Block() as load_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMOVUPS(ymm_output, [reg_output])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_outputs.end)
with Block() as load_inputs:
for m, (ymm_input, reg_input) in enumerate(zip(ymm_inputs, reg_inputs)):
# Load vector for a channel of the input image
VMOVUPS(ymm_input, [reg_input])
ADD(reg_input, YMMRegister.size)
# Update all outputs using the input and corresponding kernel elements
for n, ymm_output in enumerate(ymm_outputs):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_inputs.end)
# Store vectors to different channels of the output image
with Block() as store_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMOVUPS([reg_output], ymm_output)
ADD(reg_output, YMMRegister.size)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_outputs.end)
SUB(reg_image_size, YMMRegister.size)
JAE(main_loop.begin)
ADD(reg_image_size, YMMRegister.size)
JZ(final_block.end)
with final_block:
reg_mask, ymm_mask = GeneralPurposeRegister64(), YMMRegister()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
SUB(reg_mask, reg_image_size)
VMOVDQU(ymm_mask, [reg_mask + YMMRegister.size])
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
with Block() as load_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMASKMOVPS(ymm_output, ymm_mask, [reg_output])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_outputs.end)
with Block() as load_inputs:
for m, (ymm_input, reg_input) in enumerate(zip(ymm_inputs, reg_inputs)):
# Load vector for a channel of the input image
VMASKMOVPS(ymm_inputs[m], ymm_mask, [reg_input])
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_inputs[m])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_inputs.end)
# Store vectors to different channels of the output image
with Block() as store_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMASKMOVPS([reg_output], ymm_mask, ymm_output)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_outputs.end)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
simd_width = YMMRegister.size // float_.size
for fusion_factor in range(1, 8 + 1):
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(const_float_), "y")
arg_stride_y = Argument(size_t, "stride_y")
arg_sum = Argument(ptr(float_), "sum")
arg_n = Argument(size_t, "n")
with Function("nnp_sdotxf{fusion_factor}__avx2".format(fusion_factor=fusion_factor),
(arg_x, arg_y, arg_stride_y, arg_sum, arg_n),
target=uarch.default + isa.fma3 + isa.avx2):
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_ys = [GeneralPurposeRegister64() for m in range(fusion_factor)]
LOAD.ARGUMENT(reg_ys[0], arg_y)
reg_stride_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_y, arg_stride_y)
SHL(reg_stride_y, 2)
reg_sum = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_sum, arg_sum)
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
ymm_accs = [YMMRegister() for m in range(fusion_factor)]
VZEROALL()
for m in range(1, fusion_factor):
LEA(reg_ys[m], [reg_ys[m - 1] + reg_stride_y * 1])
main_loop = Loop()
end_block = Block()
SUB(reg_n, YMMRegister.size // float_.size)
JB(main_loop.end)
with main_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
VFMADD231PS(ymm_acc, ymm_x, [reg_y])
ADD(reg_y, YMMRegister.size)
SUB(reg_n, YMMRegister.size // float_.size)
JAE(main_loop.begin)
ADD(reg_n, YMMRegister.size // float_.size)
JE(end_block.end)
with end_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_x])
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
ymm_y = YMMRegister()
VMASKMOVPS(ymm_y, ymm_mask, [reg_y])
VFMADD231PS(ymm_acc, ymm_x, ymm_y)
# Reduce the SIMD registers into a single elements
xmm_tmp = XMMRegister()
for i, ymm_acc in enumerate(ymm_accs):
VEXTRACTF128(xmm_tmp, ymm_acc, 1)
VADDPS(ymm_acc.as_xmm, ymm_acc.as_xmm, xmm_tmp)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VMOVSS([reg_sum + i * float_.size], ymm_acc.as_xmm)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from fp16.avx import fp16_alt_xmm_to_fp32_xmm
from fp16.avx2 import fp16_alt_xmm_to_fp32_ymm
simd_width = YMMRegister.size // float_.size
for fusion_factor in range(1, 8 + 1):
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(const_float_), "y")
arg_stride_y = Argument(size_t, "stride_y")
arg_sum = Argument(ptr(float_), "sum")
arg_n = Argument(size_t, "n")
with Function("nnp_shdotxf{fusion_factor}__avx2".format(fusion_factor=fusion_factor),
(arg_x, arg_y, arg_stride_y, arg_sum, arg_n),
target=uarch.default + isa.fma3 + isa.avx2):
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_ys = [GeneralPurposeRegister64() for m in range(fusion_factor)]
LOAD.ARGUMENT(reg_ys[0], arg_y)
reg_stride_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_y, arg_stride_y)
ADD(reg_stride_y, reg_stride_y)
reg_sum = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_sum, arg_sum)
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
ymm_accs = [YMMRegister() for m in range(fusion_factor)]
VZEROALL()
for m in range(1, fusion_factor):
LEA(reg_ys[m], [reg_ys[m - 1] + reg_stride_y * 1])
main_loop = Loop()
edge_loop = Loop()
SUB(reg_n, XMMRegister.size // uint16_t.size)
JB(main_loop.end)
with main_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
xmm_half = XMMRegister()
VMOVUPS(xmm_half, [reg_y])
ADD(reg_y, XMMRegister.size)
ymm_y = fp16_alt_xmm_to_fp32_ymm(xmm_half)
VFMADD231PS(ymm_acc, ymm_x, ymm_y)
SUB(reg_n, YMMRegister.size // float_.size)
JAE(main_loop.begin)
ADD(reg_n, XMMRegister.size // uint16_t.size)
JE(edge_loop.end)
with edge_loop:
xmm_x = XMMRegister()
VMOVSS(xmm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
reg_half = GeneralPurposeRegister32()
MOVZX(reg_half, word[reg_y])
xmm_half = XMMRegister()
VMOVD(xmm_half, reg_half)
ADD(reg_y, uint16_t.size)
ymm_y = fp16_alt_xmm_to_fp32_ymm(xmm_half)
VFMADD231PS(ymm_acc, xmm_x.as_ymm, ymm_y)
SUB(reg_n, 1)
JAE(edge_loop.begin)
# Reduce the SIMD registers into a single elements
xmm_tmp = XMMRegister()
for i, ymm_acc in enumerate(ymm_accs):
VEXTRACTF128(xmm_tmp, ymm_acc, 1)
VADDPS(ymm_acc.as_xmm, ymm_acc.as_xmm, xmm_tmp)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VMOVSS([reg_sum + i * float_.size], ymm_acc.as_xmm)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from common import _MM_SHUFFLE
simd_width = YMMRegister.size // float_.size
mr = 4
nr = 3 * simd_width
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_sgemm_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
ymm_c = [[YMMRegister() for n in range(0, nr, simd_width)] for m in range(mr)]
VZEROALL()
ymm_b = [YMMRegister() for n in range(0, nr, simd_width)]
ymm_a_m = YMMRegister()
with Loop() as loop:
for n in range(nr // simd_width):
VMOVAPS(ymm_b[n], [reg_b + n * YMMRegister.size])
ADD(reg_b, nr * float_.size)
for m in range(mr):
VBROADCASTSS(ymm_a_m, [reg_a + m * float_.size])
for n in range(nr // simd_width):
VFMADD231PS(ymm_c[m][n], ymm_a_m, ymm_b[n])
ADD(reg_a, mr * float_.size)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as load_and_store_c:
for m in reversed(range(mr)):
for n in range(nr // simd_width):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVUPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr // simd_width):
VMOVUPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_sgemm_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c = [[YMMRegister() for n in range(0, nr, simd_width)] for m in range(mr)]
VZEROALL()
ymm_b = [YMMRegister() for n in range(0, nr, simd_width)]
ymm_a_m = YMMRegister()
with Loop() as loop:
with Block() as load_b:
for n in range(nr // simd_width):
VMOVAPS(ymm_b[n], [reg_b])
ADD(reg_b, YMMRegister.size)
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(load_b.end)
with Block() as multiply_by_a:
for m in range(mr):
VBROADCASTSS(ymm_a_m, [reg_a])
ADD(reg_a, float_.size)
for n in range(nr // simd_width):
VFMADD231PS(ymm_c[m][n], ymm_a_m, ymm_b[n])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(multiply_by_a.end)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Load mask
reg_mask_index = GeneralPurposeRegister32()
LEA(reg_mask_index, [reg_nr.as_qword - 1])
AND(reg_mask_index, simd_width - 1)
NEG(reg_mask_index.as_qword)
const_mask_table = Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8))
reg_mask = GeneralPurposeRegister64()
LEA(reg_mask, const_mask_table)
LEA(reg_mask, [reg_mask + reg_mask_index.as_qword * 4 + 32 - 4])
ymm_mask = YMMRegister()
VMOVDQU(ymm_mask, [reg_mask])
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in range(mr):
reg_c_mn = GeneralPurposeRegister64()
MOV(reg_c_mn, reg_c)
ymm_c_mn = YMMRegister()
with Block() as update_c_full_registers:
for n in range(nr // simd_width):
# Copy the current accumulator register into a fixed register ymm_c_mn.
# If a partial register is to be stored, the storing code would expect it there.
VMOVAPS(ymm_c_mn, ymm_c[m][n])
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(update_c_full_registers.end)
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c_mn])
VMOVUPS([reg_c_mn], ymm_c[m][n])
ADD(reg_c_mn, YMMRegister.size)
# Update (potentially) partial register
# Important: ymm_c_mn is the content of the register and [reg_c_mn] is the address of the tuple of C
ymm_temp = YMMRegister()
VMASKMOVPS(ymm_temp, ymm_mask, [reg_c_mn])
VADDPS(ymm_c_mn, ymm_c_mn, ymm_temp)
VMASKMOVPS([reg_c_mn], ymm_mask, ymm_c_mn)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
reg_c_mn = GeneralPurposeRegister64()
MOV(reg_c_mn, reg_c)
ymm_c_mn = YMMRegister()
with Block() as store_c_full_registers:
for n in range(nr // simd_width):
# Copy the current accumulator register into a fixed register ymm_c_mn.
# If a partial register is to be stored, the storing code would expect it there.
VMOVAPS(ymm_c_mn, ymm_c[m][n])
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(store_c_full_registers.end)
VMOVUPS([reg_c_mn], ymm_c[m][n])
ADD(reg_c_mn, YMMRegister.size)
# Store (potentially) partial register
# Important: ymm_c_mn is the content of the register and [reg_c_mn] is the address of the tuple of C
VMASKMOVPS([reg_c_mn], ymm_mask, ymm_c_mn)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 2
for conjugate_b, transpose_c in [(False, False), (True, False), (True, True)]:
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_c8gemm{conjb}{transc}_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
if not transpose_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
else:
for n in range(nr):
PREFETCHT0([reg_c])
if n + 1 != nr:
ADD(reg_c, reg_row_stride)
ymm_c_re = [[YMMRegister() for n in range(nr)] for m in range(mr)]
ymm_c_im = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(2*mr)]
ymm_a_re, ymm_a_im = ymm_a[0::2], ymm_a[1::2]
ymm_b = [YMMRegister() for n in range(2*nr)]
ymm_b_re, ymm_b_im = ymm_b[0::2], ymm_b[1::2]
with Loop() as loop:
for i, ymm in enumerate(ymm_a):
VMOVAPS(ymm, [reg_a + i * YMMRegister.size])
SUB(reg_a, -YMMRegister.size * 2 * mr)
for j, ymm in enumerate(ymm_b):
VMOVAPS(ymm, [reg_b + j * YMMRegister.size])
SUB(reg_b, -YMMRegister.size * 2 * nr)
for n in range(nr):
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
for n in range(nr):
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_c8gemm{conjb}{transc}_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(nr)] for c in range(2))
with Loop() as loop:
with Block() as load_a:
for m, (ymm_re, ymm_im) in enumerate(zip(ymm_a_re, ymm_a_im)):
VMOVAPS(ymm_re, [reg_a])
VMOVAPS(ymm_im, [reg_a + YMMRegister.size])
ADD(reg_a, 2 * YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n, (ymm_re, ymm_im) in enumerate(zip(ymm_b_re, ymm_b_im)):
VMOVAPS(ymm_re, [reg_b])
VMOVAPS(ymm_im, [reg_b + YMMRegister.size])
ADD(reg_b, 2 * YMMRegister.size)
with Block() as mutiply_by_bn:
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(mutiply_by_bn.end)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
reg_mr, reg_nr = reg_nr, reg_mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 2
for conjugate_b, transpose_c in [(False, False), (True, False), (True, True)]:
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s4c6gemm{conjb}{transc}_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
if not transpose_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
else:
for n in range(nr):
PREFETCHT0([reg_c])
if n + 1 != nr:
ADD(reg_c, reg_row_stride)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(2*mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(2*nr)] for c in range(2))
with Loop() as loop:
for m in range(mr):
VMOVAPS(ymm_a_re[m], [reg_a + (2*m+0) * YMMRegister.size])
VMOVAPS(ymm_a_im[m], [reg_a + (2*m+1) * YMMRegister.size])
SUB(reg_a, -YMMRegister.size * 2 * mr)
for n in range(nr):
VMOVAPS(ymm_b_re[n], [reg_b + (2*n+0) * YMMRegister.size])
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VMOVAPS(ymm_b_im[n], [reg_b + (2*n+1) * YMMRegister.size])
VBLENDPS(ymm_b_re[n], ymm_b_re[n], ymm_b_im[n], 0b00000011)
for m in range(mr):
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
SUB(reg_b, -YMMRegister.size * 2 * nr)
ymm_zero_columns01_mask = YMMRegister()
VMOVAPS(ymm_zero_columns01_mask, Constant.uint32x8(0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
for n in range(nr):
VANDPS(ymm_b_im[n], ymm_b_im[n], ymm_zero_columns01_mask)
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s4c6gemm{conjb}{transc}_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(nr)] for c in range(2))
with Loop() as loop:
with Block() as load_a:
for m, (ymm_re, ymm_im) in enumerate(zip(ymm_a_re, ymm_a_im)):
VMOVAPS(ymm_re, [reg_a])
VMOVAPS(ymm_im, [reg_a + YMMRegister.size])
ADD(reg_a, 2 * YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n in range(nr):
VMOVAPS(ymm_b_re[n], [reg_b])
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VMOVAPS(ymm_b_im[n], [reg_b + YMMRegister.size])
VBLENDPS(ymm_b_re[n], ymm_b_re[n], ymm_b_im[n], 0b00000011)
for m in range(mr):
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
ADD(reg_b, YMMRegister.size * 2)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
ymm_zero_columns01_mask = YMMRegister()
VMOVAPS(ymm_zero_columns01_mask, Constant.uint32x8(0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
for n in range(nr):
VANDPS(ymm_b_im[n], ymm_b_im[n], ymm_zero_columns01_mask)
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
reg_mr, reg_nr = reg_nr, reg_mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import sqrt2_over_2
from common import butterfly
import fft.complex_soa
def fft8_across_rows(ymm_data):
assert isinstance(ymm_data, list) and len(ymm_data) == 8
ymm_real = ymm_data[0::2]
ymm_imag = ymm_data[1::2]
fft.complex_soa.fft4_across_rows(ymm_real, ymm_imag)
butterfly(ymm_real[0], ymm_imag[0])
# const float two_gdata1_real = crealf(data1) + crealf(data3);
# const float two_gdata1_imag = cimagf(data1) - cimagf(data3);
ymm_two_gdata1_real, ymm_two_gdata1_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_gdata1_real, ymm_real[1], ymm_real[3])
VSUBPS(ymm_two_gdata1_imag, ymm_imag[1], ymm_imag[3])
# const float two_hdata1_real = cimagf(data1) + cimagf(data3);
# const float two_hdata1_imag = crealf(data3) - crealf(data1);
ymm_two_hdata1_real, ymm_two_hdata1_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_hdata1_real, ymm_imag[1], ymm_imag[3])
VSUBPS(ymm_two_hdata1_imag, ymm_real[3], ymm_real[1])
# const float two_hdata1_real_plus_imag = two_hdata1_real + two_hdata1_imag;
# const float two_hdata1_real_minus_imag = two_hdata1_real - two_hdata1_imag;
ymm_two_hdata1_plus, ymm_two_hdata1_minus = YMMRegister(), YMMRegister()
VADDPS(ymm_two_hdata1_plus, ymm_two_hdata1_real, ymm_two_hdata1_imag)
VSUBPS(ymm_two_hdata1_minus, ymm_two_hdata1_real, ymm_two_hdata1_imag)
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
# const float two_data1_real = two_gdata1_real + SQRT2_OVER_2 * two_hdata1_real_plus_imag;
# const float two_data1_imag = two_gdata1_imag - SQRT2_OVER_2 * two_hdata1_real_minus_imag;
# const float two_data3_real = two_gdata1_real - SQRT2_OVER_2 * two_hdata1_real_plus_imag;
# const float two_data3_imag = -two_gdata1_imag - SQRT2_OVER_2 * two_hdata1_real_minus_imag;
ymm_two_data1_real, ymm_two_data1_imag = YMMRegister(), YMMRegister()
ymm_two_data3_real, ymm_two_data3_imag = YMMRegister(), YMMRegister()
VMOVAPS(ymm_two_data3_real, ymm_two_gdata1_real)
VMOVAPS(ymm_two_data3_imag, ymm_two_gdata1_imag)
VFMADD231PS(ymm_two_gdata1_real, ymm_two_hdata1_plus, ymm_sqrt2_over_2)
VFNMADD231PS(ymm_two_gdata1_imag, ymm_two_hdata1_minus, ymm_sqrt2_over_2)
SWAP.REGISTERS(ymm_two_data1_real, ymm_two_gdata1_real)
SWAP.REGISTERS(ymm_two_data1_imag, ymm_two_gdata1_imag)
VFNMADD231PS(ymm_two_data3_real, ymm_two_hdata1_plus, ymm_sqrt2_over_2)
VFNMSUB231PS(ymm_two_data3_imag, ymm_two_hdata1_minus, ymm_sqrt2_over_2)
# /* Store outputs */
# fdata[0] = crealf(data0) + cimagf(data0);
# fdata[1] = crealf(data0) - cimagf(data0);
# fdata[2] = 0.5f * two_data1_real;
# fdata[3] = 0.5f * two_data1_imag;
# fdata[4] = crealf(data2);
# fdata[5] = -cimagf(data2);
# fdata[6] = 0.5f * two_data3_real;
# fdata[7] = 0.5f * two_data3_imag;
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_real[1], ymm_two_data1_real, ymm_half)
VMULPS(ymm_imag[1], ymm_two_data1_imag, ymm_half)
VXORPS(ymm_imag[2], ymm_imag[2], Constant.float32x8(-0.0))
VMULPS(ymm_real[3], ymm_two_data3_real, ymm_half)
VMULPS(ymm_imag[3], ymm_two_data3_imag, ymm_half)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 30