python_code
stringlengths 0
456k
|
---|
#!/usr/bin/env python
import confu
parser = confu.standard_parser()
parser.add_argument("--backend", dest="backend", default="auto",
choices=["auto", "psimd", "scalar"])
parser.add_argument("--inference-only", dest="inference_only", default=False,
action="store_true",
help="Build only inference/forward pass functions to reduce library size")
parser.add_argument("--convolution-only", dest="convolution_only", default=False,
action="store_true",
help="Build only convolution functions to reduce library size")
def main(args):
options = parser.parse_args(args)
backend = options.backend
if backend == "auto":
if options.target.is_x86_64:
backend = "x86_64"
elif options.target.is_arm or options.target.is_arm64:
backend = "arm"
elif options.target.is_emscripten:
backend = "scalar"
else:
backend = "psimd"
build = confu.Build.from_options(options)
macros = dict()
if backend == "psimd":
macros["NNP_BACKEND_PSIMD"] = 1
if backend == "scalar":
macros["NNP_BACKEND_SCALAR"] = 1
export_macros = dict()
export_macros["NNP_CONVOLUTION_ONLY"] = int(options.convolution_only)
export_macros["NNP_INFERENCE_ONLY"] = int(options.inference_only)
macros.update(export_macros)
build.export_cpath("include", ["nnpack.h"])
with build.options(source_dir="src", macros=macros,
deps={
(build.deps.pthreadpool, build.deps.cpuinfo, build.deps.fxdiv, build.deps.fp16): any,
build.deps.psimd: backend == "psimd" or backend == "arm",
},
extra_include_dirs={
("src", "src/ref"): any,
"src/x86_64-fma": options.target.is_x86_64
}):
nnpack_objects = [
build.cc("init.c"),
build.cc("convolution-inference.c"),
]
if not options.convolution_only:
# Fully-connected, pooling, Softmax, ReLU layers
nnpack_objects += [
build.cc("fully-connected-inference.c"),
build.cc("pooling-output.c"),
build.cc("softmax-output.c"),
build.cc("relu-output.c"),
]
if not options.inference_only:
# Training functions for fully-connected and ReLU layers
nnpack_objects += [
build.cc("fully-connected-output.c"),
build.cc("relu-input-gradient.c"),
]
if not options.inference_only:
# Training functions for convolutional layer
nnpack_objects += [
build.cc("convolution-output.c"),
build.cc("convolution-input-gradient.c"),
build.cc("convolution-kernel-gradient.c"),
]
if backend == "x86_64":
arch_nnpack_objects = [
# Transformations
build.peachpy("x86_64-fma/2d-fourier-8x8.py"),
build.peachpy("x86_64-fma/2d-fourier-16x16.py"),
build.peachpy("x86_64-fma/2d-winograd-8x8-3x3.py"),
# Tuple GEMM
build.peachpy("x86_64-fma/blas/s8gemm.py"),
build.peachpy("x86_64-fma/blas/c8gemm.py"),
build.peachpy("x86_64-fma/blas/s4c6gemm.py"),
# Direct convolution
build.peachpy("x86_64-fma/blas/conv1x1.py"),
# BLAS microkernels
build.peachpy("x86_64-fma/blas/sgemm.py"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.peachpy("x86_64-fma/softmax.py"),
build.cc("x86_64-fma/softmax.c"),
build.peachpy("x86_64-fma/relu.py"),
# Pooling
build.peachpy("x86_64-fma/max-pooling.py"),
# BLAS microkernels
build.peachpy("x86_64-fma/blas/sdotxf.py"),
build.peachpy("x86_64-fma/blas/shdotxf.py"),
]
elif backend == "scalar":
arch_nnpack_objects = [
# Transformations
build.cc("scalar/2d-fourier-8x8.c"),
build.cc("scalar/2d-fourier-16x16.c"),
build.cc("scalar/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("scalar/blas/s2gemm.c"),
build.cc("scalar/blas/cgemm-conjb.c"),
# Direct convolution
build.cc("scalar/blas/conv1x1.c"),
# BLAS microkernels
build.cc("scalar/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Tuple GEMM
build.cc("scalar/blas/s2gemm-transc.c"),
build.cc("scalar/blas/cgemm.c"),
build.cc("scalar/blas/cgemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.cc("scalar/relu.c"),
build.cc("scalar/softmax.c"),
# BLAS microkernels
build.cc("scalar/blas/sdotxf.c"),
build.cc("scalar/blas/shdotxf.c"),
]
elif backend == "arm":
from confu import arm
with build.options(isa=arm.neon+arm.fp16 if options.target.is_arm else None):
arch_nnpack_objects = [
# Transformations
build.cc("psimd/2d-fourier-8x8.c"),
build.cc("psimd/2d-fourier-16x16.c"),
build.cc("neon/2d-winograd-8x8-3x3.c"),
build.cc("neon/2d-winograd-8x8-3x3-fp16.c"),
# Tuple GEMM
build.cc("neon/blas/h4gemm.c"),
build.cc("neon/blas/s4gemm.c"),
build.cc("neon/blas/c4gemm-conjb.c"),
build.cc("neon/blas/s4c2gemm-conjb.c"),
# Direct convolution
build.cc("neon/blas/conv1x1.c"),
# BLAS microkernels
build.cc("neon/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Transformations
build.cc("psimd/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("neon/blas/c4gemm.c"),
build.cc("neon/blas/s4c2gemm.c"),
build.cc("neon/blas/c4gemm-conjb-transc.c"),
build.cc("neon/blas/s4c2gemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# ReLU and Softmax
build.cc("neon/relu.c"),
build.cc("psimd/softmax.c"),
# BLAS microkernels
build.cc("neon/blas/sdotxf.c"),
build.cc("psimd/blas/shdotxf.c"),
]
if options.target.is_arm:
# Functions implemented in assembly
arch_nnpack_objects += [
build.cc("neon/blas/h4gemm-aarch32.S"),
build.cc("neon/blas/s4gemm-aarch32.S"),
build.cc("neon/blas/sgemm-aarch32.S"),
]
elif backend == "psimd":
arch_nnpack_objects = [
# Transformations
build.cc("psimd/2d-fourier-8x8.c"),
build.cc("psimd/2d-fourier-16x16.c"),
build.cc("psimd/2d-winograd-8x8-3x3.c"),
# Tuple GEMM
build.cc("psimd/blas/s4gemm.c"),
build.cc("psimd/blas/c4gemm-conjb.c"),
build.cc("psimd/blas/s4c2gemm-conjb.c"),
# Direct convolution
build.cc("psimd/blas/conv1x1.c"),
# BLAS microkernels
build.cc("psimd/blas/sgemm.c"),
]
if not options.inference_only:
arch_nnpack_objects += [
# Tuple GEMM
build.cc("psimd/blas/c4gemm.c"),
build.cc("psimd/blas/s4c2gemm.c"),
build.cc("psimd/blas/c4gemm-conjb-transc.c"),
build.cc("psimd/blas/s4c2gemm-conjb-transc.c"),
]
if not options.convolution_only:
arch_nnpack_objects += [
# Activations
build.cc("psimd/relu.c"),
build.cc("psimd/softmax.c"),
# BLAS microkernels
build.cc("psimd/blas/sdotxf.c"),
build.cc("psimd/blas/shdotxf.c"),
]
reference_layer_objects = [
build.cc("ref/convolution-output.c"),
build.cc("ref/convolution-input-gradient.c"),
build.cc("ref/convolution-kernel.c"),
build.cc("ref/fully-connected-output.c"),
build.cc("ref/max-pooling-output.c"),
build.cc("ref/softmax-output.c"),
build.cc("ref/relu-output.c"),
build.cc("ref/relu-input-gradient.c"),
]
reference_fft_objects = [
build.cc("ref/fft/aos.c"),
build.cc("ref/fft/soa.c"),
build.cc("ref/fft/forward-real.c"),
build.cc("ref/fft/forward-dualreal.c"),
build.cc("ref/fft/inverse-real.c"),
build.cc("ref/fft/inverse-dualreal.c"),
]
if backend == "x86_64":
arch_fft_stub_objects = [
build.peachpy("x86_64-fma/fft-soa.py"),
build.peachpy("x86_64-fma/fft-aos.py"),
build.peachpy("x86_64-fma/fft-dualreal.py"),
build.peachpy("x86_64-fma/ifft-dualreal.py"),
build.peachpy("x86_64-fma/fft-real.py"),
build.peachpy("x86_64-fma/ifft-real.py"),
]
arch_winograd_stub_objects = [
build.peachpy("x86_64-fma/winograd-f6k3.py"),
]
arch_math_stub_objects = [
]
elif backend == "scalar":
arch_fft_stub_objects = [
build.cc("scalar/fft-aos.c"),
build.cc("scalar/fft-soa.c"),
build.cc("scalar/fft-real.c"),
build.cc("scalar/fft-dualreal.c"),
]
arch_winograd_stub_objects = [
build.cc("scalar/winograd-f6k3.c"),
]
elif backend == "psimd" or backend == "arm":
arch_fft_stub_objects = [
build.cc("psimd/fft-aos.c"),
build.cc("psimd/fft-soa.c"),
build.cc("psimd/fft-real.c"),
build.cc("psimd/fft-dualreal.c"),
]
if backend == "psimd":
arch_winograd_stub_objects = [
build.cc("psimd/winograd-f6k3.c"),
]
else:
# ARM NEON Winograd transform optionally uses FP16 storage
with build.options(isa=arm.neon+arm.fp16 if options.target.is_arm else None):
arch_winograd_stub_objects = [
build.cc("neon/winograd-f6k3.c"),
]
arch_math_stub_objects = [
build.cc("psimd/exp.c"),
]
fft_objects = reference_fft_objects + arch_fft_stub_objects
nnpack_objects = nnpack_objects + arch_nnpack_objects
build.static_library("nnpack", nnpack_objects)
# Build tests for micro-kernels. Link to the micro-kernels implementations
with build.options(source_dir="test", extra_include_dirs="test",
deps={
(build.deps.googletest, build.deps.cpuinfo, build.deps.clog, build.deps.fp16): any,
"log": build.target.is_android}):
build.unittest("fourier-reference-test",
reference_fft_objects + [build.cxx("fourier/reference.cc")])
if backend == "x86_64":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/x86_64-avx2.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/x86_64-fma3.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/x86_64-fma3.cc")])
elif backend == "psimd":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/psimd.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/psimd.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/psimd.cc")])
elif backend == "arm":
# No ARM-specific Fourier implementation; use PSIMD
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/psimd.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/neon.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/neon.cc")])
build.smoketest("sxgemm-test",
arch_nnpack_objects + [build.cxx("sxgemm/neon.cc")])
build.smoketest("hxgemm-test",
arch_nnpack_objects + [build.cxx("hxgemm/neon.cc")])
elif backend == "scalar":
build.smoketest("fourier-test",
reference_fft_objects + arch_fft_stub_objects + [build.cxx("fourier/scalar.cc")])
build.smoketest("winograd-test",
arch_winograd_stub_objects + arch_nnpack_objects + [build.cxx("winograd/scalar.cc")])
build.smoketest("sgemm-test",
arch_nnpack_objects + [build.cxx("sgemm/scalar.cc")])
# Build test for layers. Link to the library.
with build.options(source_dir="test", include_dirs="test", deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog, build.deps.googletest.core, build.deps.fp16): any,
"rt": build.target.is_linux,
"log": build.target.is_android,
}):
if not options.inference_only:
build.smoketest("convolution-output-smoketest",
reference_layer_objects + [build.cxx("convolution-output/smoke.cc")])
build.unittest("convolution-output-alexnet-test",
reference_layer_objects + [build.cxx("convolution-output/alexnet.cc")])
build.unittest("convolution-output-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-output/vgg-a.cc")])
build.unittest("convolution-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-output/overfeat-fast.cc")])
build.smoketest("convolution-input-gradient-smoketest",
reference_layer_objects + [build.cxx("convolution-input-gradient/smoke.cc")])
build.unittest("convolution-input-gradient-alexnet-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/alexnet.cc")])
build.unittest("convolution-input-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/vgg-a.cc")])
build.unittest("convolution-input-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-input-gradient/overfeat-fast.cc")])
build.smoketest("convolution-kernel-gradient-smoketest",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/smoke.cc")])
build.unittest("convolution-kernel-gradient-alexnet-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/alexnet.cc")])
build.unittest("convolution-kernel-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/vgg-a.cc")])
build.unittest("convolution-kernel-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-kernel-gradient/overfeat-fast.cc")])
build.smoketest("convolution-inference-smoketest",
reference_layer_objects + [build.cxx("convolution-inference/smoke.cc")])
build.unittest("convolution-inference-alexnet-test",
reference_layer_objects + [build.cxx("convolution-inference/alexnet.cc")])
build.unittest("convolution-inference-vgg-a-test",
reference_layer_objects + [build.cxx("convolution-inference/vgg-a.cc")])
build.unittest("convolution-inference-overfeat-fast-test",
reference_layer_objects + [build.cxx("convolution-inference/overfeat-fast.cc")])
if not options.convolution_only:
build.unittest("fully-connected-inference-alexnet-test",
reference_layer_objects + [build.cxx("fully-connected-inference/alexnet.cc")])
build.unittest("fully-connected-inference-vgg-a-test",
reference_layer_objects + [build.cxx("fully-connected-inference/vgg-a.cc")])
build.unittest("fully-connected-inference-overfeat-fast-test",
reference_layer_objects + [build.cxx("fully-connected-inference/overfeat-fast.cc")])
if not options.inference_only:
build.smoketest("fully-connected-output-smoketest",
reference_layer_objects + [build.cxx("fully-connected-output/smoke.cc")])
build.unittest("fully-connected-output-alexnet-test",
reference_layer_objects + [build.cxx("fully-connected-output/alexnet.cc")])
build.unittest("fully-connected-output-vgg-a-test",
reference_layer_objects + [build.cxx("fully-connected-output/vgg-a.cc")])
build.unittest("fully-connected-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("fully-connected-output/overfeat-fast.cc")])
build.smoketest("max-pooling-output-smoketest",
reference_layer_objects + [build.cxx("max-pooling-output/smoke.cc")])
build.unittest("max-pooling-output-vgg-a-test",
reference_layer_objects + [build.cxx("max-pooling-output/vgg-a.cc")])
build.unittest("max-pooling-output-overfeat-fast",
reference_layer_objects + [build.cxx("max-pooling-output/overfeat-fast.cc")])
build.unittest("relu-output-alexnet-test",
reference_layer_objects + [build.cxx("relu-output/alexnet.cc")])
build.unittest("relu-output-vgg-a-test",
reference_layer_objects + [build.cxx("relu-output/vgg-a.cc")])
build.unittest("relu-output-overfeat-fast-test",
reference_layer_objects + [build.cxx("relu-output/overfeat-fast.cc")])
if not options.inference_only:
build.unittest("relu-input-gradient-alexnet-test",
reference_layer_objects + [build.cxx("relu-input-gradient/alexnet.cc")])
build.unittest("relu-input-gradient-vgg-a-test",
reference_layer_objects + [build.cxx("relu-input-gradient/vgg-a.cc")])
build.unittest("relu-input-gradient-overfeat-fast-test",
reference_layer_objects + [build.cxx("relu-input-gradient/overfeat-fast.cc")])
build.smoketest("softmax-output-smoketest",
reference_layer_objects + [build.cxx("softmax-output/smoke.cc")])
build.unittest("softmax-output-imagenet-test",
reference_layer_objects + [build.cxx("softmax-output/imagenet.cc")])
# Build automatic benchmarks
with build.options(source_dir="bench", extra_include_dirs=["bench", "test"], macros=macros, deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog, build.deps.fp16, build.deps.googlebenchmark): all,
"rt": build.target.is_linux,
"log": build.target.is_android}):
build.benchmark("convolution-inference-bench", build.cxx("convolution-inference.cc"))
build.benchmark("sgemm-bench", build.cxx("sgemm.cc"))
build.benchmark("sxgemm-bench", build.cxx("sxgemm.cc"))
build.benchmark("hxgemm-bench", build.cxx("hxgemm.cc"))
build.benchmark("conv1x1-bench", build.cxx("conv1x1.cc"))
build.benchmark("winograd-bench", build.cxx("winograd.cc"))
# Build benchmarking utilities
if not options.inference_only and not build.target.is_android:
with build.options(source_dir="bench", extra_include_dirs="bench", macros=macros, deps={
(build, build.deps.pthreadpool, build.deps.cpuinfo, build.deps.clog): all,
"rt": build.target.is_linux,
"log": build.target.is_android}):
support_objects = [build.cc("median.c")]
if build.target.is_x86_64:
support_objects += [build.peachpy("memread.py")]
else:
support_objects += [build.cc("memread.c")]
if build.target.is_linux and build.target.is_x86_64:
support_objects += [build.cc("perf_counter.c")]
build.executable("transform-benchmark",
[build.cc("transform.c")] + support_objects)
build.executable("convolution-benchmark",
[build.cc("convolution.c")] + support_objects)
if not options.convolution_only:
build.executable("fully-connected-benchmark",
[build.cc("fully-connected.c")] + support_objects)
build.executable("pooling-benchmark",
[build.cc("pooling.c")] + support_objects)
build.executable("relu-benchmark",
[build.cc("relu.c")] + support_objects)
return build
if __name__ == "__main__":
import sys
main(sys.argv[1:]).generate()
|
#!/usr/bin/env python
from __future__ import print_function
def extract_time(line, prefix):
if line.startswith(prefix):
line = line[len(prefix):].lstrip()
line = line[:line.index(" ms")].rstrip()
return line
def convolution(mode, batch_size, input_channels, output_channels, image_size, kernel_size, padding, algorithm, transform_strategy=None, threads=None, verbose=False, use_selldr=False):
import subprocess
if use_selldr:
import os
import sys
nacl_sdk_dir = os.getenv("NACL_SDK_ROOT")
if nacl_sdk_dir is None:
print("Error: can not find Native Client SDK: set NACL_SDK_ROOT envorinment variable and try again", file=sys.stderr)
sys.exit(1)
benchmark_args = [os.path.join(nacl_sdk_dir, "tools", "sel_ldr.py"), "--",
"bin/convolution-benchmark"]
else:
benchmark_args = ["bin/convolution-benchmark"]
benchmark_args += [
"-m", mode,
"-b", str(batch_size),
"-ic", str(input_channels),
"-oc", str(output_channels),
"-is", str(image_size[0]), str(image_size[1]),
"-ip", str(padding),
"-ks", str(kernel_size[0]), str(kernel_size[1]),
"-a", algorithm
]
if mode == "inference" and transform_strategy is not None:
benchmark_args += ["-ts", transform_strategy]
if threads is not None:
benchmark_args += ["-t", str(threads)]
benchmark = subprocess.Popen(benchmark_args, stdout=subprocess.PIPE)
benchmark_stdout, _ = benchmark.communicate()
if benchmark.returncode == 0:
output_lines = [line for line in benchmark_stdout.splitlines() if len(line)]
total, input_transform, kernel_transform, output_transform, block_multiplication, overhead = None, None, None, None, None, None
for output_line in output_lines:
total = total or extract_time(output_line, "Time:")
input_transform = input_transform or extract_time(output_line, "Input transform:")
kernel_transform = kernel_transform or extract_time(output_line, "Kernel transform:")
output_transform = output_transform or extract_time(output_line, "Output transform:")
block_multiplication = block_multiplication or extract_time(output_line, "Block multiplication:")
overhead = overhead or extract_time(output_line, "Overhead:")
if verbose:
return (total, input_transform, kernel_transform, output_transform, block_multiplication, overhead)
else:
return (total,)
def fully_connected(mode, batch_size, input_channels, output_channels, threads=None, verbose=False, use_selldr=False):
import subprocess
if use_selldr:
import os
import sys
nacl_sdk_dir = os.getenv("NACL_SDK_ROOT")
if nacl_sdk_dir is None:
print("Error: can not find Native Client SDK: set NACL_SDK_ROOT envorinment variable and try again", file=sys.stderr)
sys.exit(1)
benchmark_args = [os.path.join(nacl_sdk_dir, "tools", "sel_ldr.py"), "--",
"bin/fully-connected-benchmark"]
else:
benchmark_args = ["bin/fully-connected-benchmark"]
benchmark_args += [
"-m", mode,
"-b", str(batch_size),
"-ic", str(input_channels),
"-oc", str(output_channels)
]
if threads is not None:
benchmark_args += ["-t", str(threads)]
benchmark = subprocess.Popen(benchmark_args, stdout=subprocess.PIPE)
benchmark_stdout, _ = benchmark.communicate()
if benchmark.returncode == 0:
output_lines = [line for line in benchmark_stdout.splitlines() if len(line)]
total, input_transform, kernel_transform, block_multiplication, overhead = None, None, None, None, None
for output_line in output_lines:
total = total or extract_time(output_line, "Time:")
input_transform = input_transform or extract_time(output_line, "Input packing:")
kernel_transform = kernel_transform or extract_time(output_line, "Kernel packing:")
block_multiplication = block_multiplication or extract_time(output_line, "Block multiplication:")
overhead = overhead or extract_time(output_line, "Overhead:")
if verbose:
return (total, input_transform, kernel_transform, block_multiplication, overhead)
else:
return (total,)
overfeat_fast_layers = [
("conv2", 96, 256, (24, 24), (5, 5), 0),
("conv3", 256, 512, (12, 12), (3, 3), 1),
("conv4", 512, 1024, (12, 12), (3, 3), 1),
("conv5", 1024, 1024, (12, 12), (3, 3), 1),
("fc6", 36864, 3072),
("fc7", 3072, 4096),
("fc8", 4096, 1000),
]
alexnet_layers = [
("conv2", 64, 192, (27, 27), (5, 5), 2),
("conv3", 192, 384, (13, 13), (3, 3), 1),
("conv4", 384, 256, (13, 13), (3, 3), 1),
("conv5", 256, 256, (13, 13), (3, 3), 1),
("fc6", 12544, 4096),
("fc7", 4096, 4096),
("fc8", 4096, 1000),
]
vgg_a_layers = [
("conv1", 3, 64, (224, 224), (3, 3), 1),
("conv2", 64, 128, (112, 112), (3, 3), 1),
("conv3.1", 128, 256, (56, 56), (3, 3), 1),
("conv3.2", 256, 256, (56, 56), (3, 3), 1),
("conv4.1", 256, 512, (28, 28), (3, 3), 1),
("conv4.2", 512, 512, (28, 28), (3, 3), 1),
("conv5", 512, 512, (14, 14), (3, 3), 1),
("fc6", 25088, 4096),
("fc7", 4096, 4096),
("fc8", 4096, 1000),
]
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="NNPACK benchmarking script")
parser.add_argument("--enable-selldr", dest="use_selldr", action="store_true")
parser.add_argument("-l", "--layer", dest="layer", required=True, choices=["convolution", "fully-connected", "pooling"])
parser.add_argument("-n", "--network", dest="network", required=True, choices=["vgg-a", "alexnet", "overfeat-fast"])
parser.add_argument("-m", "--mode", dest="mode", required=True, choices=["inference", "output", "input-gradient", "kernel-gradient"])
parser.add_argument("--transform-strategy", dest="transform_strategy", default="compute", choices=["compute", "precompute"])
parser.add_argument("-b", "--batch", dest="batch", type=int)
parser.add_argument("-t", "--threads", dest="threads")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False)
options = parser.parse_args()
network_layers, default_batch = {
"vgg-a": (vgg_a_layers, 64),
"alexnet": (alexnet_layers, 128),
"overfeat-fast": (overfeat_fast_layers, 128)
}[options.network]
layer_prefix = {
"convolution": "conv",
"fully-connected": "fc",
"pooling": "pool"
}[options.layer]
network_layers = [layer for layer in network_layers if layer[0].startswith(layer_prefix)]
batch = default_batch
if options.batch is not None:
batch = options.batch
if batch != 1 and options.mode == "inference":
raise ValueError("Non-unit batch {batch} is not allowed in inference mode".format(batch=batch))
elif options.mode == "inference":
batch = 1
if options.transform_strategy is not None:
if options.layer != "convolution":
raise ValueError("Transform strategy {transform_strategy} is meaningless for non-convolutional layers".format(transform_strategy=transform_strategy))
elif options.mode != "inference":
raise ValueError("Transform strategy {transform_strategy} is meaningless in non-inference mode".format(transform_strategy=transform_strategy))
if options.layer == "convolution":
for name, input_channels, output_channels, image_size, kernel_size, padding in network_layers:
measurements = [name]
for algorithm in ["implicit-gemm", "ft8x8", "ft16x16", "wt8x8"]:
if algorithm.startswith("wt") and kernel_size != (3, 3):
continue
measurements += list(convolution(options.mode, batch, input_channels, output_channels,
image_size, kernel_size, padding, algorithm,
transform_strategy=options.transform_strategy,
threads=options.threads, verbose=options.verbose, use_selldr=options.use_selldr))
print("\t".join(map(str, measurements)))
elif options.layer == "fully-connected":
for name, input_channels, output_channels in network_layers:
measurements = fully_connected(options.mode, batch, input_channels, output_channels,
threads=options.threads, verbose=options.verbose, use_selldr=options.use_selldr)
print("{name}\t{measurements}".format(name=name, measurements="\t".join(measurements)))
|
arg_mem = Argument(ptr(), "mem")
arg_len = Argument(size_t, "n")
with Function("read_memory", (arg_mem, arg_len)):
reg_mem = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_mem, arg_mem)
reg_len = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_len, arg_len)
main_loop = Loop()
SUB(reg_len, 64)
JB(main_loop.end)
with main_loop:
MOVAPS(xmm0, [reg_mem])
ADD(reg_mem, 64)
SUB(reg_len, 64)
JAE(main_loop.begin)
RETURN()
|
import fft.complex_soa
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft16_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real = YMMRegister(), YMMRegister()
ymm_imag = YMMRegister(), YMMRegister()
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS(ymm_data, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft16_within_rows(ymm_real, ymm_imag)
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_data)
RETURN()
with Function("nnp_fft8_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real, ymm_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_real, [reg_t])
VMOVUPS(ymm_imag, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag)
VMOVUPS([reg_f], ymm_real)
VMOVUPS([reg_f + YMMRegister.size], ymm_imag)
RETURN()
with Function("nnp_ifft8_soa__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_real, ymm_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_real, [reg_t])
VMOVUPS(ymm_imag, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag, transformation="inverse")
VMOVUPS([reg_f], ymm_real)
VMOVUPS([reg_f + YMMRegister.size], ymm_imag)
RETURN()
with Function("nnp_ifft16_soa__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_real = YMMRegister(), YMMRegister()
ymm_imag = YMMRegister(), YMMRegister()
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS(ymm_data, [reg_f + i * YMMRegister.size])
fft.complex_soa.ifft16_within_rows(ymm_real, ymm_imag)
for i, ymm_data in enumerate(ymm_real + ymm_imag):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_data)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import fft16x16
import fft.complex_soa
import fft.two_real_to_two_complex_soa_perm_planar
import fft.two_complex_soa_perm_to_two_real_planar
arg_t_pointer = Argument(ptr(const_float_), name="t")
arg_f_pointer = Argument(ptr(float_), name="f")
arg_x_pointer = Argument(ptr(const_float_), name="x")
arg_t_stride = Argument(size_t, name="stride_t")
arg_f_stride = Argument(size_t, name="stride_f")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for post_operation in ["stream", "store"]:
fft16x16_arguments = (arg_t_pointer, arg_f_pointer, arg_t_stride, arg_f_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_fft16x16_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
fft16x16_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t_pointer)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_row_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_end, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
ADD(reg_row_end, reg_row_start)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
ymm_column_start, ymm_column_end = YMMRegister(), YMMRegister()
VMOVD(ymm_column_start.as_xmm, reg_column_start.as_dword)
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_start, ymm_column_start.as_xmm)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_column_01234567 = YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_column_start_gt_01234567, ymm_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_01234567, ymm_column_start, ymm_column_01234567)
VPCMPGTD(ymm_column_end_gt_01234567, ymm_column_end, ymm_column_01234567)
ymm_column_89ABCDEF = YMMRegister()
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_89ABCDEF, ymm_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_column_end_gt_89ABCDEF, ymm_column_end, ymm_column_89ABCDEF)
ymm_load_mask_columns_0_to_8 = YMMRegister()
VPANDN(ymm_load_mask_columns_0_to_8, ymm_column_start_gt_01234567, ymm_column_end_gt_01234567)
ymm_load_mask_columns_8_to_16 = YMMRegister()
VPANDN(ymm_load_mask_columns_8_to_16, ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF)
load_mask_columns_8_to_16 = LocalVariable(ymm_load_mask_columns_8_to_16)
VMOVDQA(load_mask_columns_8_to_16, ymm_load_mask_columns_8_to_16)
# data points to the first element, which is loaded into lane `reg_column_start`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SHL(reg_column_start, 2)
SUB(reg_t0, reg_column_start.as_qword)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_t_stride, 2)
# t8_offset = stride * (8 - row_start)
reg_t8_offset = GeneralPurposeRegister64()
MOV(reg_t8_offset.as_dword, 8)
SUB(reg_t8_offset.as_dword, reg_row_start)
IMUL(reg_t8_offset, reg_t_stride)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t8_offset * 1])
CMP(reg_row_start, 8)
CMOVAE(reg_t8, reg_t0)
reg_t0_column_8, reg_t8_column_8 = GeneralPurposeRegister64(), GeneralPurposeRegister64()
LEA(reg_t0_column_8, [reg_t0 + YMMRegister.size])
LEA(reg_t8_column_8, [reg_t8 + YMMRegister.size])
vfft_columns_0_to_8 = [LocalVariable(YMMRegister.size) for _ in range(16)]
vfft_columns_8_to_16 = [YMMRegister() if i < 4 else LocalVariable(YMMRegister.size) for i in range(16)]
fft16x16.forward_vfft(reg_t0, reg_t8, reg_t_stride, data_out=vfft_columns_0_to_8,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, ymm_load_mask=ymm_load_mask_columns_0_to_8)
ymm_load_mask_columns_8_to_16 = YMMRegister()
VMOVDQA(ymm_load_mask_columns_8_to_16, load_mask_columns_8_to_16)
fft16x16.forward_vfft(reg_t0_column_8, reg_t8_column_8, reg_t_stride, data_out=vfft_columns_8_to_16,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, ymm_load_mask=ymm_load_mask_columns_8_to_16)
for row_batch_start, row_batch_end in [(0, 2), (2, 5), (5, 8)]:
ymm_wr_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
ymm_wi_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(ymm_wr[0], vfft_columns_0_to_8[row*2+0])
VMOVAPS(ymm_wr[1], vfft_columns_8_to_16[row*2+0])
VMOVAPS(ymm_wi[0], vfft_columns_0_to_8[row*2+1])
VMOVAPS(ymm_wi[1], vfft_columns_8_to_16[row*2+1])
fft.complex_soa.fft16_within_rows(ymm_wr_list, ymm_wi_list, bit_reversal=False)
if row_batch_start == 0:
fft.two_real_to_two_complex_soa_perm_planar.fft16_within_rows_postprocess(ymm_wr_list[0], ymm_wi_list[0], bit_reversal=True)
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for row_batch_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_batch_offset
for column in range(2):
VSTOREPS([reg_f], ymm_wr[column])
VSTOREPS([reg_f + YMMRegister.size], ymm_wi[column])
if row + 1 != 8 or column + 1 != 2:
ADD(reg_f, reg_f_stride)
RETURN()
arg_f_pointer = Argument(ptr(const_float_), name="f_pointer")
arg_t_pointer = Argument(ptr(float_), name="t_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_f_stride = Argument(size_t, name="f_stride")
arg_t_stride = Argument(size_t, name="t_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
ifft16x16_arguments = (arg_f_pointer, arg_t_pointer, arg_bias, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
else:
ifft16x16_arguments = (arg_f_pointer, arg_t_pointer, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
if with_offset:
ifft16x16_arguments += (arg_row_offset, arg_column_offset)
with Function("nnp_ifft16x16{with_offset}{with_bias}{with_relu}__avx2".format(
with_offset="_with_offset" if with_offset else "",
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
ifft16x16_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_row_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_end, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
if with_offset:
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
ADD(reg_row_end, reg_row_start)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
else:
reg_row_start = None
if with_offset:
ymm_column_start, ymm_column_end = YMMRegister(), YMMRegister()
VMOVD(ymm_column_start.as_xmm, reg_column_start.as_dword)
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_start, ymm_column_start.as_xmm)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_column_01234567 = YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_column_start_gt_01234567, ymm_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_01234567, ymm_column_start, ymm_column_01234567)
VPCMPGTD(ymm_column_end_gt_01234567, ymm_column_end, ymm_column_01234567)
ymm_column_89ABCDEF = YMMRegister()
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_column_start_gt_89ABCDEF, ymm_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_column_end_gt_89ABCDEF, ymm_column_end, ymm_column_89ABCDEF)
ymm_store_mask_columns_0_to_8 = YMMRegister()
VPANDN(ymm_store_mask_columns_0_to_8, ymm_column_start_gt_01234567, ymm_column_end_gt_01234567)
store_mask_columns_0_to_8 = LocalVariable(ymm_store_mask_columns_0_to_8)
VMOVDQA(store_mask_columns_0_to_8, ymm_store_mask_columns_0_to_8)
ymm_store_mask_columns_8_to_16 = YMMRegister()
VPANDN(ymm_store_mask_columns_8_to_16, ymm_column_start_gt_89ABCDEF, ymm_column_end_gt_89ABCDEF)
store_mask_columns_8_to_16 = LocalVariable(ymm_store_mask_columns_8_to_16)
VMOVDQA(store_mask_columns_8_to_16, ymm_store_mask_columns_8_to_16)
SHL(reg_column_start, 2)
SUB(reg_t0, reg_column_start.as_qword)
else:
ymm_column_end = YMMRegister()
VMOVD(ymm_column_end.as_xmm, reg_column_end.as_dword)
VPBROADCASTD(ymm_column_end, ymm_column_end.as_xmm)
ymm_store_mask_columns_0_to_8, ymm_store_mask_columns_8_to_16 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_store_mask_columns_0_to_8, ymm_column_end, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPCMPGTD(ymm_store_mask_columns_8_to_16, ymm_column_end, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
store_mask_columns_0_to_8 = LocalVariable(ymm_store_mask_columns_0_to_8)
VMOVDQA(store_mask_columns_0_to_8, ymm_store_mask_columns_0_to_8)
store_mask_columns_8_to_16 = LocalVariable(ymm_store_mask_columns_8_to_16)
VMOVDQA(store_mask_columns_8_to_16, ymm_store_mask_columns_8_to_16)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_t_stride, 2)
vfft_columns_0_to_8 = [YMMRegister() if i > 10 else LocalVariable(YMMRegister.size) for i in range(16)]
vfft_columns_8_to_16 = [LocalVariable(YMMRegister.size) for _ in range(16)]
for row_batch_start, row_batch_end in [(0, 2), (2, 5), (5, 8)]:
ymm_wr_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
ymm_wi_list = [(YMMRegister(), YMMRegister()) for _ in range(row_batch_start, row_batch_end)]
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(ymm_wr[0], [reg_f])
VMOVAPS(ymm_wi[0], [reg_f + YMMRegister.size])
ADD(reg_f, reg_f_stride)
if with_bias and row == 0:
ymm_bias = YMMRegister()
VMOVSS(ymm_bias.as_xmm, [reg_bias])
VFMADD231PS(ymm_wr[0], ymm_bias, Constant.float32x8(256.0))
VMOVAPS(ymm_wr[1], [reg_f])
VMOVAPS(ymm_wi[1], [reg_f + YMMRegister.size])
if row + 1 != 8:
ADD(reg_f, reg_f_stride)
if row_batch_start == 0:
fft.two_complex_soa_perm_to_two_real_planar.ifft16_within_rows_preprocess(ymm_wr_list[0], ymm_wi_list[0], bit_reversal=True)
fft.complex_soa.ifft16_within_rows(ymm_wr_list, ymm_wi_list, bit_reversal=False)
for row_offset, (ymm_wr, ymm_wi) in enumerate(zip(ymm_wr_list, ymm_wi_list)):
row = row_batch_start + row_offset
VMOVAPS(vfft_columns_0_to_8[row*2+0], ymm_wr[0])
VMOVAPS(vfft_columns_8_to_16[row*2+0], ymm_wr[1])
VMOVAPS(vfft_columns_0_to_8[row*2+1], ymm_wi[0])
VMOVAPS(vfft_columns_8_to_16[row*2+1], ymm_wi[1])
if reg_row_start is not None:
# t8_offset = stride * (8 - row_start)
reg_t8_offset = GeneralPurposeRegister64()
MOV(reg_t8_offset.as_dword, 8)
SUB(reg_t8_offset.as_dword, reg_row_start)
IMUL(reg_t8_offset, reg_t_stride)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t8_offset * 1])
CMP(reg_row_start, 8)
CMOVAE(reg_t8, reg_t0)
else:
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + reg_t_stride * 8])
reg_t0_column_8, reg_t8_column_8 = GeneralPurposeRegister64(), GeneralPurposeRegister64()
LEA(reg_t0_column_8, [reg_t0 + YMMRegister.size])
LEA(reg_t8_column_8, [reg_t8 + YMMRegister.size])
fft16x16.inverse_vfft(reg_t0, reg_t8, reg_t_stride, data_in=vfft_columns_0_to_8,
reg_row_start=reg_row_start, reg_row_end=reg_row_end, store_mask=store_mask_columns_0_to_8, relu=with_relu)
with Block() as store_columns_8_to_16:
CMP(reg_column_end, 8)
JB(store_columns_8_to_16.end)
fft16x16.inverse_vfft(reg_t0_column_8, reg_t8_column_8, reg_t_stride, data_in=vfft_columns_8_to_16, \
reg_row_start=reg_row_start, reg_row_end=reg_row_end, store_mask=store_mask_columns_8_to_16, relu=with_relu)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import butterfly, sqrt2_over_2
from common import butterfly, sqrt2_over_2, cos_npi_over_8, interleave
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
def load_ymm_variable(variable):
assert isinstance(variable, (YMMRegister, LocalVariable))
ymm_variable = variable
if isinstance(variable, LocalVariable):
assert variable.size == YMMRegister.size
ymm_variable = YMMRegister()
VMOVAPS(ymm_variable, variable)
return ymm_variable
def store_ymm_result(variable, result):
assert isinstance(result, YMMRegister)
if isinstance(variable, YMMRegister):
SWAP.REGISTERS(variable, result)
else:
VMOVAPS(variable, result)
def forward_vfft(reg_t0, reg_t8, reg_t_stride, data_out, reg_row_start=None, reg_row_end=None, ymm_load_mask=None):
assert isinstance(reg_t0, GeneralPurposeRegister64)
assert isinstance(reg_t8, GeneralPurposeRegister64)
assert isinstance(reg_t_stride, GeneralPurposeRegister64)
assert isinstance(data_out, list) and len(data_out) == 16
assert ymm_load_mask is None or isinstance(ymm_load_mask, YMMRegister)
out_real, out_imag = data_out[0::2], data_out[1::2]
real, imag = [YMMRegister() for _ in range(8)], [YMMRegister() for _ in range(8)]
imag[0] = LocalVariable(YMMRegister.size)
imag[4] = LocalVariable(YMMRegister.size)
data = interleave(real, imag)
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
row_lo = i
row_hi = row_lo + 8
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VXORPS(ymm_data_lo, ymm_data_lo, ymm_data_lo)
skip_data_lo = Label()
if reg_row_start:
CMP(reg_row_start, row_lo)
JA(skip_data_lo)
if reg_row_end:
CMP(reg_row_end, row_lo)
JBE(skip_data_lo)
if ymm_load_mask is None:
VMOVUPS(ymm_data_lo, [reg_t0])
else:
VMASKMOVPS(ymm_data_lo, ymm_load_mask, [reg_t0])
if i + 1 != 8:
ADD(reg_t0, reg_t_stride)
LABEL(skip_data_lo)
VMOVAPS(ymm_data_hi, ymm_data_lo)
skip_data_hi = Label()
if reg_row_start:
CMP(reg_row_start, row_hi)
JA(skip_data_hi)
if reg_row_end:
CMP(reg_row_end, row_hi)
JBE(skip_data_hi)
if ymm_load_mask is None:
VMOVUPS(ymm_data_hi, [reg_t8])
butterfly(ymm_data_lo, ymm_data_hi)
else:
ymm_temp_hi = YMMRegister()
VMASKMOVPS(ymm_temp_hi, ymm_load_mask, [reg_t8])
VSUBPS(ymm_data_hi, ymm_data_lo, ymm_temp_hi)
VADDPS(ymm_data_lo, ymm_data_lo, ymm_temp_hi)
if i + 1 != 8:
ADD(reg_t8, reg_t_stride)
LABEL(skip_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = w6.im, -w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(imag[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re + w5.im), SQRT2_OVER_2 * (w5.im - w5.re)
butterfly(imag[5], real[5])
SWAP.REGISTERS(real[5], imag[5])
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re - w7.im), -SQRT2_OVER_2 * (w7.re + w7.im)
butterfly(real[7], imag[7], negate_b=True)
fft4_negate_b[id(real[7])] = True
fft4_negate_b[id(imag[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
ymm_sqrt2_over_2 = YMMRegister()
fft2_scale_b[id(real[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(real[7])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[7])] = ymm_sqrt2_over_2
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = w3.im, -w3.re
# w7.re, w7.im = w7.im, -w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(imag[3])] = True
fft2_negate_b[id(imag[7])] = True
# 4x FFT2: butterfly
# Process the first two elements separately
ymm_real0, ymm_real1 = butterfly(real[0], real[1], writeback=False)
store_ymm_result(out_real[4], ymm_real1) # bit-reversal: 1->4
ymm_imag0, ymm_imag1 = butterfly(imag[0], imag[1], negate_out_b=True, writeback=False)
store_ymm_result(out_imag[4], ymm_imag1) # bit-reversal: 1->4
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
for i, (data_lo, data_hi) in enumerate(zip(data[4:6] + data[8:10] + data[12:14], data[6:8] + data[10:12] + data[14:16])):
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)))
butterfly(ymm_real0, ymm_imag0)
store_ymm_result(out_real[0], ymm_real0)
store_ymm_result(out_imag[0], ymm_imag0)
# Bit reversal
for i in range(8):
new_i = fft8_bitreverse(i)
if new_i > i:
real[i], real[new_i] = real[new_i], real[i]
imag[i], imag[new_i] = imag[new_i], imag[i]
data = interleave(real, imag)
ymm_two_g2_real, ymm_two_g2_imag = YMMRegister(), YMMRegister()
ymm_two_h2_real, ymm_two_h2_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g2_real, real[2], real[6])
VSUBPS(ymm_two_h2_imag, real[6], real[2])
VSUBPS(ymm_two_g2_imag, imag[2], imag[6])
VADDPS(ymm_two_h2_real, imag[2], imag[6])
ymm_two_g1_real, ymm_two_g1_imag = YMMRegister(), YMMRegister()
ymm_two_h1_real, ymm_two_h1_imag = YMMRegister(), YMMRegister()
ymm_real1 = load_ymm_variable(real[1])
VADDPS(ymm_two_g1_real, ymm_real1, real[7])
VSUBPS(ymm_two_h1_imag, real[7], ymm_real1)
ymm_imag1 = load_ymm_variable(imag[1])
VSUBPS(ymm_two_g1_imag, ymm_imag1, imag[7])
VADDPS(ymm_two_h1_real, ymm_imag1, imag[7])
ymm_two_h2_add, ymm_two_h2_sub = YMMRegister(), YMMRegister()
VADDPS(ymm_two_h2_add, ymm_two_h2_real, ymm_two_h2_imag)
VSUBPS(ymm_two_h2_sub, ymm_two_h2_imag, ymm_two_h2_real)
ymm_two_g3_real, ymm_two_g3_imag = YMMRegister(), YMMRegister()
ymm_two_h3_real, ymm_two_h3_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_g3_real, real[3], real[5])
VSUBPS(ymm_two_h3_imag, real[5], real[3])
VSUBPS(ymm_two_g3_imag, imag[3], imag[5])
VADDPS(ymm_two_h3_real, imag[3], imag[5])
# const float two_w2_real = two_g2_real + SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w2_imag = two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
# const float two_w6_real = two_g2_real - SQRT2_OVER_2 * (two_h2_real + two_h2_imag);
# const float two_w6_imag = -two_g2_imag + SQRT2_OVER_2 * (two_h2_imag - two_h2_real);
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
ymm_two_w2_real, ymm_two_w6_real = YMMRegister(), ymm_two_g2_real
VMOVAPS(ymm_two_w2_real, ymm_two_g2_real)
VFMADD231PS(ymm_two_w2_real, ymm_two_h2_add, ymm_sqrt2_over_2)
VFNMADD231PS(ymm_two_w6_real, ymm_two_h2_add, ymm_sqrt2_over_2)
ymm_two_w2_imag, ymm_two_w6_imag = YMMRegister(), ymm_two_g2_imag
VMOVAPS(ymm_two_w2_imag, ymm_two_g2_imag)
VFMADD231PS(ymm_two_w2_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_two_w6_imag, ymm_two_h2_sub, ymm_sqrt2_over_2)
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_two_w2_real, ymm_two_w2_real, ymm_half)
store_ymm_result(out_real[2], ymm_two_w2_real)
VMULPS(ymm_two_w6_real, ymm_two_w6_real, ymm_half)
store_ymm_result(out_real[6], ymm_two_w6_real)
VMULPS(ymm_two_w2_imag, ymm_two_w2_imag, ymm_half)
store_ymm_result(out_imag[2], ymm_two_w2_imag)
VMULPS(ymm_two_w6_imag, ymm_two_w6_imag, ymm_half)
store_ymm_result(out_imag[6], ymm_two_w6_imag)
# const float two_w1_real = two_g1_real + two_h1_real * COS_1PI_OVER_8 + two_h1_imag * COS_3PI_OVER_8;
# const float two_w1_imag = two_g1_imag + two_h1_imag * COS_1PI_OVER_8 - two_h1_real * COS_3PI_OVER_8;
# const float two_w7_real = two_g1_real - two_h1_real * COS_1PI_OVER_8 - two_h1_imag * COS_3PI_OVER_8;
# const float two_w7_imag = -two_g1_imag + two_h1_imag * COS_1PI_OVER_8 - two_h1_real * COS_3PI_OVER_8;
# const float two_w3_real = two_g3_real + two_h3_real * COS_3PI_OVER_8 + two_h3_imag * COS_1PI_OVER_8;
# const float two_w3_imag = two_g3_imag + two_h3_imag * COS_3PI_OVER_8 - two_h3_real * COS_1PI_OVER_8;
# const float two_w5_real = two_g3_real - two_h3_real * COS_3PI_OVER_8 - two_h3_imag * COS_1PI_OVER_8;
# const float two_w5_imag = -two_g3_imag + two_h3_imag * COS_3PI_OVER_8 - two_h3_real * COS_1PI_OVER_8;
ymm_cos_1pi_over_8 = YMMRegister()
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
ymm_two_w1_real, ymm_two_w7_real = YMMRegister(), ymm_two_g1_real
VMOVAPS(ymm_two_w1_real, ymm_two_g1_real)
VFMADD231PS(ymm_two_w1_real, ymm_two_h1_real, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_two_w7_real, ymm_two_h1_real, ymm_cos_1pi_over_8)
ymm_two_w1_imag, ymm_two_w7_imag = YMMRegister(), ymm_two_g1_imag
VMOVAPS(ymm_two_w1_imag, ymm_two_g1_imag)
VFMADD231PS(ymm_two_w1_imag, ymm_two_h1_imag, ymm_cos_1pi_over_8)
VFMSUB231PS(ymm_two_w7_imag, ymm_two_h1_imag, ymm_cos_1pi_over_8)
ymm_two_w3_real, ymm_two_w5_real = YMMRegister(), ymm_two_g3_real
VMOVAPS(ymm_two_w3_real, ymm_two_g3_real)
VFMADD231PS(ymm_two_w3_real, ymm_two_h3_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_two_w5_real, ymm_two_h3_imag, ymm_cos_1pi_over_8)
ymm_two_w3_imag, ymm_two_w5_imag = YMMRegister(), ymm_two_g3_imag
VMOVAPS(ymm_two_w3_imag, ymm_two_g3_imag)
VFNMADD231PS(ymm_two_w3_imag, ymm_two_h3_real, ymm_cos_1pi_over_8)
VFNMSUB231PS(ymm_two_w5_imag, ymm_two_h3_real, ymm_cos_1pi_over_8)
ymm_cos_3pi_over_8 = YMMRegister()
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VFMADD231PS(ymm_two_w1_real, ymm_two_h1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w7_real, ymm_two_h1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w1_imag, ymm_two_h1_real, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w7_imag, ymm_two_h1_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w3_real, ymm_two_h3_real, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_two_w5_real, ymm_two_h3_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w3_imag, ymm_two_h3_imag, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_two_w5_imag, ymm_two_h3_imag, ymm_cos_3pi_over_8)
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_two_w1_real, ymm_two_w1_real, ymm_half)
store_ymm_result(out_real[1], ymm_two_w1_real)
VMULPS(ymm_two_w7_real, ymm_two_w7_real, ymm_half)
store_ymm_result(out_real[7], ymm_two_w7_real)
VMULPS(ymm_two_w1_imag, ymm_two_w1_imag, ymm_half)
store_ymm_result(out_imag[1], ymm_two_w1_imag)
VMULPS(ymm_two_w7_imag, ymm_two_w7_imag, ymm_half)
store_ymm_result(out_imag[7], ymm_two_w7_imag)
VMULPS(ymm_two_w3_real, ymm_two_w3_real, ymm_half)
store_ymm_result(out_real[3], ymm_two_w3_real)
VMULPS(ymm_two_w5_real, ymm_two_w5_real, ymm_half)
store_ymm_result(out_real[5], ymm_two_w5_real)
VMULPS(ymm_two_w3_imag, ymm_two_w3_imag, ymm_half)
store_ymm_result(out_imag[3], ymm_two_w3_imag)
VMULPS(ymm_two_w5_imag, ymm_two_w5_imag, ymm_half)
store_ymm_result(out_imag[5], ymm_two_w5_imag)
def inverse_vfft(reg_t0, reg_t8, reg_t_stride, data_in, reg_row_start=None, reg_row_end=None, store_mask=None, relu=False):
assert isinstance(reg_t0, GeneralPurposeRegister64)
assert isinstance(reg_t8, GeneralPurposeRegister64)
assert isinstance(reg_t_stride, GeneralPurposeRegister64)
assert isinstance(data_in, list) and len(data_in) == 16
assert reg_row_end is None or isinstance(reg_row_end, GeneralPurposeRegister32)
assert store_mask is None or isinstance(store_mask, LocalVariable) and store_mask.size == YMMRegister.size
in_real, in_imag = data_in[0::2], data_in[1::2]
ymm_scale_factor = YMMRegister()
VMOVAPS(ymm_scale_factor, Constant.float32x8(0.0625))
ymm_W1_real, ymm_W1_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W1_real, ymm_scale_factor, in_real[1])
VMULPS(ymm_W1_imag, ymm_scale_factor, in_imag[1])
ymm_W2_real, ymm_W2_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W2_real, ymm_scale_factor, in_real[2])
VMULPS(ymm_W2_imag, ymm_scale_factor, in_imag[2])
ymm_W3_real, ymm_W3_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_W3_real, ymm_scale_factor, in_real[3])
VMULPS(ymm_W3_imag, ymm_scale_factor, in_imag[3])
# G[n].real, H[n].real = W[n].real + W[8-n].real, W[n].real - W[8-n].real
# G[n].imag, H[n].imag = W[n].imag - W[8-n].imag, W[n].imag + W[8-n].imag
ymm_W7_real, ymm_W7_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W7_real, in_real[7])
ymm_G1_real, ymm_H1_real = butterfly(ymm_W1_real, ymm_W7_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W7_imag, in_imag[7])
ymm_G1_imag, ymm_H1_imag = butterfly(ymm_W1_imag, ymm_W7_imag, scale_b=ymm_scale_factor, negate_b=True)
ymm_W6_real, ymm_W6_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W6_real, in_real[6])
ymm_G2_real, ymm_H2_real = butterfly(ymm_W2_real, ymm_W6_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W6_imag, in_imag[6])
ymm_G2_imag, ymm_H2_imag = butterfly(ymm_W2_imag, ymm_W6_imag, scale_b=ymm_scale_factor, negate_b=True)
ymm_W5_real, ymm_W5_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_W5_real, in_real[5])
ymm_G3_real, ymm_H3_real = butterfly(ymm_W3_real, ymm_W5_real, scale_b=ymm_scale_factor)
VMOVUPS(ymm_W5_imag, in_imag[5])
ymm_G3_imag, ymm_H3_imag = butterfly(ymm_W3_imag, ymm_W5_imag, scale_b=ymm_scale_factor, negate_b=True)
# H[2]+, H[2]- = H[2].real + H[2].imag, H[2].real - H[2].imag
ymm_H2_add, ymm_H2_sub = butterfly(ymm_H2_real, ymm_H2_imag)
# w[ n].real = G[ n].real - H[ n].real * cos((N-n)*pi/2N) - H[ n].imag * cos(n*pi/2N)
# w[2N-n].real = G[ n].real + H[ n].real * cos((N-n)*pi/2N) + H[ n].imag * cos(n*pi/2N)
# w[ n].imag = G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[2N-n].imag = -G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[ N-n].real = G[N-n].real - H[N-n].real * cos(n*pi/2N) - H[N-n].imag * cos((N-n)*pi/2N)
# w[ N+n].real = G[N-n].real + H[N-n].real * cos(n*pi/2N) + H[N-n].imag * cos((N-n)*pi/2N)
# w[ N-n].imag = G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
# w[ N+n].imag = -G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
ymm_cos_1pi_over_8, ymm_cos_3pi_over_8 = YMMRegister(), YMMRegister()
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
ymm_w1_real, ymm_w7_real = YMMRegister(), ymm_G1_real
VMOVAPS(ymm_w1_real, ymm_G1_real)
VFNMADD231PS(ymm_w1_real, ymm_H1_real, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_w7_real, ymm_H1_real, ymm_cos_3pi_over_8)
ymm_w1_imag, ymm_w7_imag = YMMRegister(), ymm_G1_imag
VMOVAPS(ymm_w1_imag, ymm_G1_imag)
VFMADD231PS(ymm_w1_imag, ymm_H1_real, ymm_cos_1pi_over_8)
VFMSUB231PS(ymm_w7_imag, ymm_H1_real, ymm_cos_1pi_over_8)
ymm_w3_real, ymm_w5_real = YMMRegister(), ymm_G3_real
VMOVAPS(ymm_w3_real, ymm_G3_real)
VFNMADD231PS(ymm_w3_real, ymm_H3_real, ymm_cos_1pi_over_8)
VFMADD231PS(ymm_w5_real, ymm_H3_real, ymm_cos_1pi_over_8)
ymm_w3_imag, ymm_w5_imag = YMMRegister(), ymm_G3_imag
VMOVAPS(ymm_w3_imag, ymm_G3_imag)
VFMADD231PS(ymm_w3_imag, ymm_H3_real, ymm_cos_3pi_over_8)
VFMSUB231PS(ymm_w5_imag, ymm_H3_real, ymm_cos_3pi_over_8)
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
# w[ N/2].real = G[N/2].real - H[N/2]+ * sqrt(2)/2
# w[ N/2].imag = G[N/2].imag + H[N/2]- * sqrt(2)/2
# w[3N/2].real = G[N/2].real + H[N/2]+ * sqrt(2)/2
# w[3N/2].imag = -G[N/2].imag + H[N/2]- * sqrt(2)/2
ymm_w2_real, ymm_w6_real = YMMRegister(), ymm_G2_real
VMOVAPS(ymm_w2_real, ymm_G2_real)
VFNMADD231PS(ymm_w2_real, ymm_H2_add, ymm_sqrt2_over_2)
VFMADD231PS(ymm_w6_real, ymm_H2_add, ymm_sqrt2_over_2)
ymm_w2_imag, ymm_w6_imag = YMMRegister(), ymm_G2_imag
VMOVAPS(ymm_w2_imag, ymm_G2_imag)
VFMADD231PS(ymm_w2_imag, ymm_H2_sub, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_w6_imag, ymm_H2_sub, ymm_sqrt2_over_2)
# w[ n].real = G[ n].real - H[ n].real * cos((N-n)*pi/2N) - H[ n].imag * cos(n*pi/2N)
# w[2N-n].real = G[ n].real + H[ n].real * cos((N-n)*pi/2N) + H[ n].imag * cos(n*pi/2N)
# w[ n].imag = G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[2N-n].imag = -G[ n].imag + H[ n].real * cos(n*pi/2N) - H[ n].imag * cos((N-n)*pi/2N)
# w[ N-n].real = G[N-n].real - H[N-n].real * cos(n*pi/2N) - H[N-n].imag * cos((N-n)*pi/2N)
# w[ N+n].real = G[N-n].real + H[N-n].real * cos(n*pi/2N) + H[N-n].imag * cos((N-n)*pi/2N)
# w[ N-n].imag = G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
# w[ N+n].imag = -G[N-n].imag + H[N-n].real * cos((N-n)*pi/2N) - H[N-n].imag * cos(n*pi/2N)
ymm_cos_1pi_over_8, ymm_cos_3pi_over_8 = YMMRegister(), YMMRegister()
VMOVAPS(ymm_cos_1pi_over_8, Constant.float32x8(cos_npi_over_8[1]))
VMOVAPS(ymm_cos_3pi_over_8, Constant.float32x8(cos_npi_over_8[3]))
VFNMADD231PS(ymm_w1_real, ymm_H1_imag, ymm_cos_1pi_over_8)
VFMADD231PS(ymm_w7_real, ymm_H1_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_w1_imag, ymm_H1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w7_imag, ymm_H1_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w3_real, ymm_H3_imag, ymm_cos_3pi_over_8)
VFMADD231PS(ymm_w5_real, ymm_H3_imag, ymm_cos_3pi_over_8)
VFNMADD231PS(ymm_w3_imag, ymm_H3_imag, ymm_cos_1pi_over_8)
VFNMADD231PS(ymm_w5_imag, ymm_H3_imag, ymm_cos_1pi_over_8)
data = [
LocalVariable(YMMRegister.size), YMMRegister(),
ymm_w1_real, ymm_w1_imag,
ymm_w2_real, ymm_w2_imag,
ymm_w3_real, ymm_w3_imag,
LocalVariable(YMMRegister.size), LocalVariable(YMMRegister.size),
ymm_w5_real, ymm_w5_imag,
ymm_w6_real, ymm_w6_imag,
ymm_w7_real, ymm_w7_imag
]
real, imag = data[0::2], data[1::2]
# TODO: optimize
ymm_w0_real, ymm_w0_imag = YMMRegister(), imag[0]
VMOVUPS(ymm_w0_real, in_real[0])
VMOVUPS(ymm_w0_imag, in_imag[0])
VMULPS(ymm_w0_real, ymm_w0_real, Constant.float32x8(0.0625))
butterfly(ymm_w0_real, ymm_w0_imag, scale_b=Constant.float32x8(0.0625))
VMOVAPS(real[0], ymm_w0_real)
# TODO: optimize
ymm_w4_real, ymm_w4_imag = YMMRegister(), YMMRegister()
VMOVUPS(ymm_w4_real, in_real[4])
VMOVUPS(ymm_w4_imag, in_imag[4])
VMULPS(ymm_w4_real, ymm_w4_real, Constant.float32x8(0.125))
VMULPS(ymm_w4_imag, ymm_w4_imag, Constant.float32x8(-0.125))
VMOVAPS(real[4], ymm_w4_real)
VMOVAPS(imag[4], ymm_w4_imag)
# Bit reversal
for i in range(8):
new_i = fft8_bitreverse(i)
if new_i > i:
real[i], real[new_i] = real[new_i], real[i]
imag[i], imag[new_i] = imag[new_i], imag[i]
data = interleave(real, imag)
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
butterfly(data_lo, data_hi)
# 2x FFT4: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft8_scale_b, fft8_negate_b = {}, {}
# w3.re, w3.im = -w3.im, w3.re
# w7.re, w7.im = -w7.im, w7.re
SWAP.REGISTERS(real[3], imag[3])
fft4_negate_b[id(real[3])] = True
SWAP.REGISTERS(real[7], imag[7])
fft4_negate_b[id(real[7])] = True
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False))
# FFT8: multiplication by twiddle factors
# w6.re, w6.im = -w6.im, w6.re
SWAP.REGISTERS(real[6], imag[6])
fft8_negate_b[id(real[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re - w5.im), SQRT2_OVER_2 * (w5.re + w5.im)
butterfly(real[5], imag[5], negate_b=True)
fft8_scale_b[id(real[5])] = Constant.float32x8(sqrt2_over_2)
fft8_scale_b[id(imag[5])] = Constant.float32x8(sqrt2_over_2)
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re + w7.im), SQRT2_OVER_2 * (w7.re - w7.im)
butterfly(real[7], imag[7])
fft8_scale_b[id(real[7])] = Constant.float32x8(sqrt2_over_2)
fft8_negate_b[id(real[7])] = True
fft8_scale_b[id(imag[7])] = Constant.float32x8(sqrt2_over_2)
ymm_store_mask = YMMRegister()
if store_mask:
VMOVAPS(ymm_store_mask, store_mask)
# FFT8: butterfly
with Block() as store_data:
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
row_lo = i
row_hi = row_lo + 8
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
scale_b=fft8_scale_b.get(id(data_hi)),
negate_b=fft8_negate_b.get(id(data_hi), False),
writeback=False)
if relu:
ymm_zero = YMMRegister()
VMOVAPS(ymm_zero, Constant.float32x8(-0.0))
with Block() as store_data_lo:
if reg_row_start:
CMP(reg_row_start, row_lo)
JA(store_data_lo.end)
if reg_row_end:
CMP(reg_row_end, row_lo)
JBE(store_data_lo.end)
elif reg_row_end:
CMP(reg_row_end, row_lo)
JBE(store_data.end)
if relu:
VMAXPS(ymm_data_lo, ymm_zero, ymm_data_lo)
if store_mask:
VMASKMOVPS([reg_t0], ymm_store_mask, ymm_data_lo)
else:
VMOVUPS([reg_t0], ymm_data_lo)
if i + 1 != 8:
ADD(reg_t0, reg_t_stride)
with Block() as store_data_hi:
if reg_row_start:
CMP(reg_row_start, row_hi)
JA(store_data_hi.end)
if reg_row_end:
CMP(reg_row_end, row_hi)
JBE(store_data_hi.end)
if relu:
VMAXPS(ymm_data_hi, ymm_zero, ymm_data_hi)
if store_mask:
VMASKMOVPS([reg_t8], ymm_store_mask, ymm_data_hi)
else:
VMOVUPS([reg_t8], ymm_data_hi)
if i + 1 != 8:
ADD(reg_t8, reg_t_stride)
|
from __future__ import absolute_import
from __future__ import division
import fft.complex_soa
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft4_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_data = [YMMRegister() for _ in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft4_across_rows(ymm_real, ymm_imag)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_i)
RETURN()
from common import butterfly, sqrt2_over_2
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
with Function("nnp_fft8_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
data = [YMMRegister() for _ in range(16)]
data[0] = LocalVariable(data[0])
data[8] = LocalVariable(data[8])
real, imag = data[0::2], data[1::2]
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VMOVUPS(ymm_data_lo, [reg_t + i * YMMRegister.size])
VMOVUPS(ymm_data_hi, [reg_t + (i + 8) * YMMRegister.size])
butterfly(ymm_data_lo, ymm_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = w6.im, -w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(imag[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re + w5.im), SQRT2_OVER_2 * (w5.im - w5.re)
butterfly(imag[5], real[5])
SWAP.REGISTERS(real[5], imag[5])
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re - w7.im), -SQRT2_OVER_2 * (w7.re + w7.im)
butterfly(real[7], imag[7], negate_b=True)
fft4_negate_b[id(real[7])] = True
fft4_negate_b[id(imag[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
ymm_sqrt2_over_2 = YMMRegister()
fft2_scale_b[id(real[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[5])] = ymm_sqrt2_over_2
fft2_scale_b[id(real[7])] = ymm_sqrt2_over_2
fft2_scale_b[id(imag[7])] = ymm_sqrt2_over_2
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = w3.im, -w3.re
# w7.re, w7.im = w7.im, -w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(imag[3])] = True
fft2_negate_b[id(imag[7])] = True
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)),
writeback=False)
index_lo = (i // 2) * 2
index_hi = index_lo + 1
VMOVUPS([reg_f + (fft8_bitreverse(index_lo) * 2 + i % 2) * YMMRegister.size], ymm_data_lo)
VMOVUPS([reg_f + (fft8_bitreverse(index_hi) * 2 + i % 2) * YMMRegister.size], ymm_data_hi)
if i == 0:
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
RETURN()
with Function("nnp_ifft8_8aos__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
data = [YMMRegister() for _ in range(16)]
data[0] = LocalVariable(data[0])
data[8] = LocalVariable(data[8])
real, imag = data[0::2], data[1::2]
for i, (data_lo, data_hi) in enumerate(zip(data[0:8], data[8:16])):
ymm_data_lo, ymm_data_hi = data_lo, data_hi
if isinstance(data_lo, LocalVariable):
ymm_data_lo = YMMRegister()
if isinstance(data_hi, LocalVariable):
ymm_data_hi = YMMRegister()
VMOVUPS(ymm_data_lo, [reg_t + i * YMMRegister.size])
VMOVUPS(ymm_data_hi, [reg_t + (i + 8) * YMMRegister.size])
butterfly(ymm_data_lo, ymm_data_hi)
if isinstance(data_lo, LocalVariable):
VMOVAPS(data_lo, ymm_data_lo)
if isinstance(data_hi, LocalVariable):
VMOVAPS(data_hi, ymm_data_hi)
# FFT8: multiplication by twiddle factors
fft4_scale_b, fft4_negate_b = {}, {}
fft2_scale_b, fft2_negate_b = {}, {}
# w6.re, w6.im = -w6.im, w6.re
SWAP.REGISTERS(real[6], imag[6])
fft4_negate_b[id(real[6])] = True
# w5.re, w5.im = SQRT2_OVER_2 * (w5.re - w5.im), SQRT2_OVER_2 * (w5.re + w5.im)
butterfly(real[5], imag[5], negate_b=True)
# w7.re, w7.im = -SQRT2_OVER_2 * (w7.re + w7.im), SQRT2_OVER_2 * (w7.re - w7.im)
butterfly(real[7], imag[7])
fft4_negate_b[id(real[7])] = True
# Propogate multiplication by sqrt2_over_2 until the last butterfly in FFT2
fft2_scale_b[id(real[5])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(imag[5])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(real[7])] = Constant.float32x8(sqrt2_over_2)
fft2_scale_b[id(imag[7])] = Constant.float32x8(sqrt2_over_2)
# 2x FFT4: butterfly
for data_lo, data_hi in zip(data[0:4] + data[8:12], data[4:8] + data[12:16]):
butterfly(data_lo, data_hi, negate_b=fft4_negate_b.get(id(data_hi), False), scale_b=fft4_scale_b.get(id(data_hi)))
# 2x FFT4: multiplication by twiddle factors
# w3.re, w3.im = -w3.im, w3.re
# w7.re, w7.im = -w7.im, w7.re
SWAP.REGISTERS(real[3], imag[3])
SWAP.REGISTERS(real[7], imag[7])
fft2_negate_b[id(real[3])] = True
fft2_negate_b[id(real[7])] = True
# 4x FFT2: butterfly
for i, (data_lo, data_hi) in enumerate(zip(data[0:2] + data[4:6] + data[8:10] + data[12:14], data[2:4] + data[6:8] + data[10:12] + data[14:16])):
ymm_data_lo, ymm_data_hi = \
butterfly(data_lo, data_hi,
negate_b=fft2_negate_b.get(id(data_hi), False), scale_b=fft2_scale_b.get(id(data_hi)),
writeback=False)
index_lo = (i // 2) * 2
index_hi = index_lo + 1
VMULPS(ymm_data_lo, ymm_data_lo, Constant.float32x8(0.125))
VMULPS(ymm_data_hi, ymm_data_hi, Constant.float32x8(0.125))
VMOVUPS([reg_f + (fft8_bitreverse(index_lo) * 2 + i % 2) * YMMRegister.size], ymm_data_lo)
VMOVUPS([reg_f + (fft8_bitreverse(index_hi) * 2 + i % 2) * YMMRegister.size], ymm_data_hi)
RETURN()
|
import fft.complex_soa
import fft.two_real_to_two_complex_soa_perm_planar
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft8_dualreal__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_seq_a, ymm_seq_b = YMMRegister(), YMMRegister()
VMOVUPS(ymm_seq_a, [reg_t])
VMOVUPS(ymm_seq_b, [reg_t + YMMRegister.size])
fft.complex_soa.fft8_within_rows(ymm_seq_a, ymm_seq_b)
ymm_wr, ymm_wi = ymm_seq_a, ymm_seq_b
fft.two_real_to_two_complex_soa_perm_planar.fft8_within_rows_postprocess(ymm_wr, ymm_wi)
ymm_xhr, ymm_xhi = ymm_wr, ymm_wi
VMOVUPS([reg_f], ymm_xhr)
VMOVUPS([reg_f + YMMRegister.size], ymm_xhi)
RETURN()
with Function("nnp_fft16_dualreal__avx2",
(arg_t, arg_f),
target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_seq_a = YMMRegister(), YMMRegister()
ymm_seq_b = YMMRegister(), YMMRegister()
for i, ymm_a in enumerate(ymm_seq_a + ymm_seq_b):
VMOVUPS(ymm_a, [reg_t + i * YMMRegister.size])
fft.complex_soa.fft16_within_rows(ymm_seq_a, ymm_seq_b)
ymm_wr, ymm_wi = ymm_seq_a, ymm_seq_b
fft.two_real_to_two_complex_soa_perm_planar.fft16_within_rows_postprocess(ymm_wr, ymm_wi)
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_w)
RETURN()
|
import fft.complex_soa_perm_to_real
from common import butterfly, cos_npi_over_8, sqrt2_over_2
def fft8_bitreverse(n):
return int(format(n, "03b")[::-1], 2)
arg_f = Argument(ptr(const_float_), name="f")
arg_t = Argument(ptr(float_), name="t")
with Function("nnp_ifft8_8real__fma3",
(arg_f, arg_t),
target=uarch.default + isa.fma3):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_data = [YMMRegister() for _ in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_f + i * YMMRegister.size])
fft.complex_soa_perm_to_real.ifft8_across_rows(ymm_data)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_i)
RETURN()
import fft16x16
with Function("nnp_ifft16_8real__fma3",
(arg_f, arg_t),
target=uarch.default + isa.fma3):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t)
reg_stride = GeneralPurposeRegister64()
MOV(reg_stride, YMMRegister.size)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + 8 * YMMRegister.size])
fft16x16.inverse_vfft(reg_t0, reg_t8, reg_stride,
data_in=[yword[reg_f + YMMRegister.size * i] for i in range(16)])
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import winograd.o6x6k3x3
import block8x8
from common import _MM_SHUFFLE
for post_operation in ["store", "stream"]:
arg_d_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wd_pointer = Argument(ptr(float_), name="wd_pointer")
arg_d_stride = Argument(size_t, name="d_stride")
arg_wd_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
with Function("nnp_iwt8x8_3x3_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
(arg_d_pointer, arg_wd_pointer, arg_d_stride, arg_wd_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset),
target=uarch.default + isa.fma3 + isa.avx2):
reg_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_d, arg_d_pointer)
reg_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wd, arg_wd_pointer)
reg_stride_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_d, arg_d_stride)
reg_stride_wd = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wd, arg_wd_stride)
reg_row_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_cnt, arg_row_count)
reg_col_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_cnt, arg_column_count)
reg_row_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_off, arg_row_offset)
reg_col_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_off, arg_column_offset)
ymm_data = [YMMRegister() for _ in range(8)]
block8x8.load_with_padding(ymm_data, reg_d, reg_stride_d, reg_row_off, reg_row_cnt, reg_col_off, reg_col_cnt)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
winograd.o6x6k3x3.transpose8x8(ymm_data)
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_row in ymm_data:
VSTOREPS([reg_wd], ymm_row)
if ymm_row is not ymm_data[-1]:
ADD(reg_wd, reg_stride_wd)
RETURN()
for reverse_kernel in [False, True]:
for post_operation in ["store", "stream"]:
arg_g_pointer = Argument(ptr(const_float_), name="d_pointer")
arg_wg_pointer = Argument(ptr(float_), name="wd_pointer")
arg_g_stride = Argument(size_t, name="d_stride")
arg_wg_stride = Argument(size_t, name="wd_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
kwt_arguments = (arg_g_pointer, arg_wg_pointer, arg_g_stride, arg_wg_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_kwt8x8_3{reverse}x3{reverse}_and_{post_operation}__avx2".format(
reverse="R" if reverse_kernel else "", post_operation=post_operation),
kwt_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_g, arg_g_pointer)
reg_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_wg, arg_wg_pointer)
reg_stride_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_g, arg_g_stride)
reg_stride_wg = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_wg, arg_wg_stride)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride_g, 2)
xmm_load_mask = XMMRegister()
VMOVAPS(xmm_load_mask.as_ymm, Constant.float32x8(-0.0, -0.0, -0.0, +0.0, +0.0, +0.0, +0.0, +0.0))
xmm_g = [XMMRegister() for _ in range(3)]
for xmm in xmm_g:
VMASKMOVPS(xmm, xmm_load_mask, [reg_g])
if xmm is not xmm_g[-1]:
ADD(reg_g, reg_stride_g)
if reverse_kernel:
xmm_g = xmm_g[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform([xmm.as_ymm for xmm in xmm_g], rescale_coefficients=False)
ymm_g_rows = winograd.o6x6k3x3.transpose8x3([ymm.as_xmm for ymm in ymm_wg_rows])
if reverse_kernel:
ymm_g_rows = ymm_g_rows[::-1]
ymm_wg_rows = winograd.o6x6k3x3.kernel_transform(ymm_g_rows, rescale_coefficients=False)
rcp_9 = float.fromhex("0x1.C71C72p-4")
rcp_81 = float.fromhex("0x1.948B10p-7")
rcp_90 = float.fromhex("0x1.6C16C2p-7")
rcp_180 = float.fromhex("0x1.6C16C2p-8")
rcp_810 = float.fromhex("0x1.43A274p-10")
rcp_1620 = float.fromhex("0x1.43A274p-11")
rcp_8100 = float.fromhex("0x1.02E85Cp-13")
rcp_16200 = float.fromhex("0x1.02E85Cp-14")
rcp_32400 = float.fromhex("0x1.02E85Cp-15")
ymm_edge_scale = YMMRegister()
VMOVAPS(ymm_edge_scale, Constant.float32x8( 1.0, -2.0 * rcp_9, -2.0 * rcp_9, rcp_90, rcp_90, rcp_180, rcp_180, 1.0))
VMULPS(ymm_wg_rows[0], ymm_wg_rows[0], ymm_edge_scale)
VMULPS(ymm_wg_rows[7], ymm_wg_rows[7], ymm_edge_scale)
ymm_row12_scale = YMMRegister()
VMOVAPS(ymm_row12_scale, Constant.float32x8(-2.0 * rcp_9, 4.0 * rcp_81, 4.0 * rcp_81, -2.0 * rcp_810, -2.0 * rcp_810, -2.0 * rcp_1620, -2.0 * rcp_1620, -2.0 * rcp_9))
VMULPS(ymm_wg_rows[1], ymm_wg_rows[1], ymm_row12_scale)
VMULPS(ymm_wg_rows[2], ymm_wg_rows[2], ymm_row12_scale)
ymm_row34_scale = YMMRegister()
VMOVAPS(ymm_row34_scale, Constant.float32x8( rcp_90, -2.0 * rcp_810, -2.0 * rcp_810, rcp_8100, rcp_8100, rcp_16200, rcp_16200, rcp_90))
VMULPS(ymm_wg_rows[3], ymm_wg_rows[3], ymm_row34_scale)
VMULPS(ymm_wg_rows[4], ymm_wg_rows[4], ymm_row34_scale)
ymm_row56_scale = YMMRegister()
VMOVAPS(ymm_row56_scale, Constant.float32x8( rcp_180, -2.0 * rcp_1620, -2.0 * rcp_1620, rcp_16200, rcp_16200, rcp_32400, rcp_32400, rcp_180))
VMULPS(ymm_wg_rows[5], ymm_wg_rows[5], ymm_row56_scale)
VMULPS(ymm_wg_rows[6], ymm_wg_rows[6], ymm_row56_scale)
# Write output with stride
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_wg_row in ymm_wg_rows:
VSTOREPS([reg_wg], ymm_wg_row)
if ymm_wg_row is not ymm_wg_rows[-1]:
ADD(reg_wg, reg_stride_wg)
RETURN()
arg_m_pointer = Argument(ptr(const_float_), name="m_pointer")
arg_s_pointer = Argument(ptr(float_), name="s_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_m_stride = Argument(size_t, name="m_stride")
arg_s_stride = Argument(size_t, name="s_stride")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_count = Argument(uint32_t, name="column_count")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_column_offset = Argument(uint32_t, name="column_offset")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_bias, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count)
else:
owt8x8_arguments = (arg_m_pointer, arg_s_pointer, arg_m_stride, arg_s_stride, arg_row_count, arg_column_count)
if with_offset:
# Note: the version with offset has offset arguments, but they are never used (assumed 0).
owt8x8_arguments += (arg_row_offset, arg_column_offset)
with Function("nnp_owt8x8_3x3{with_bias}{with_relu}__avx2".format(
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
owt8x8_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_m = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m, arg_m_pointer)
reg_s = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s, arg_s_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
xmm_bias = XMMRegister()
VINSERTPS(xmm_bias, xmm_bias, [reg_bias], 0b1101 | 1<<4)
reg_m_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m_stride, arg_m_stride)
reg_s_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s_stride, arg_s_stride)
reg_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_count, arg_row_count)
reg_column_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_count, arg_column_count)
ymm_m = [YMMRegister() for _ in range(8)]
for ymm in ymm_m:
if with_bias and ymm is ymm_m[1]:
VADDPS(ymm, xmm_bias.as_ymm, [reg_m])
else:
VMOVAPS(ymm, [reg_m])
if ymm is not ymm_m[-1]:
ADD(reg_m, reg_m_stride)
ymm_t = winograd.o6x6k3x3.output_transform(ymm_m)
ymm_tt = winograd.o6x6k3x3.transpose6x8(ymm_t)
ymm_s = winograd.o6x6k3x3.output_transform(ymm_tt)
block8x8.store_packed(ymm_s, reg_s, reg_s_stride, reg_row_count, reg_column_count, None, None, with_relu)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
import block8x8
import fft.complex_soa
import fft.real_to_complex_soa_perm
import fft.complex_soa_perm_to_real
import fft.two_real_to_two_complex_soa_perm_planar
import fft.two_complex_soa_perm_to_two_real_planar
arg_t_pointer = Argument(ptr(const_float_), name="t_pointer")
arg_f_pointer = Argument(ptr(float_), name="f_pointer")
arg_x_pointer = Argument(ptr(const_float_), name="x_pointer")
arg_t_stride = Argument(size_t, name="t_stride")
arg_f_stride = Argument(size_t, name="f_stride")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_offset = Argument(uint32_t, name="column_offset")
arg_column_count = Argument(uint32_t, name="column_count")
for post_operation in ["stream", "store"]:
fft8x8_arguments = (arg_t_pointer, arg_f_pointer, arg_t_stride, arg_f_stride, arg_row_count, arg_column_count, arg_row_offset, arg_column_offset)
with Function("nnp_fft8x8_with_offset_and_{post_operation}__avx2".format(post_operation=post_operation),
fft8x8_arguments, target=uarch.default + isa.fma3 + isa.avx2):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t_pointer)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_inct = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_inct, arg_t_stride)
reg_incf = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_incf, arg_f_stride)
reg_row_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_cnt, arg_row_count)
reg_col_cnt = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_cnt, arg_column_count)
reg_row_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_off, arg_row_offset)
reg_col_off = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_col_off, arg_column_offset)
ymm_data = [YMMRegister(i) for i in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
block8x8.load_with_padding(ymm_data, reg_t, reg_inct, reg_row_off, reg_row_cnt, reg_col_off, reg_col_cnt)
fft.real_to_complex_soa_perm.fft8_across_rows(ymm_data)
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag)
fft.two_real_to_two_complex_soa_perm_planar.fft8_within_rows_postprocess(ymm_real[0], ymm_imag[0])
VSTOREPS = {"store": VMOVAPS, "stream": VMOVNTPS}[post_operation]
for ymm_re, ymm_im in zip(ymm_real, ymm_imag):
VSTOREPS([reg_f], ymm_re)
VSTOREPS([reg_f + YMMRegister.size], ymm_im)
if ymm_re is not ymm_real[-1]:
ADD(reg_f, reg_incf)
RETURN()
arg_f_pointer = Argument(ptr(const_float_), name="f_pointer")
arg_t_pointer = Argument(ptr(float_), name="t_pointer")
arg_bias = Argument(ptr(const_float_), name="bias_pointer")
arg_f_stride = Argument(size_t, name="f_stride")
arg_t_stride = Argument(size_t, name="t_stride")
arg_row_offset = Argument(uint32_t, name="row_offset")
arg_row_count = Argument(uint32_t, name="row_count")
arg_column_offset = Argument(uint32_t, name="column_offset")
arg_column_count = Argument(uint32_t, name="column_count")
for with_offset, with_bias, with_relu in [(True, False, False), (False, True, False), (False, True, True)]:
if with_bias:
ifft8x8_arguments = (arg_f_pointer, arg_t_pointer, arg_bias, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
else:
ifft8x8_arguments = (arg_f_pointer, arg_t_pointer, arg_f_stride, arg_t_stride, arg_row_count, arg_column_count)
if with_offset:
ifft8x8_arguments += arg_row_offset, arg_column_offset
with Function("nnp_ifft8x8{with_offset}{with_bias}{with_relu}__avx2".format(
with_offset="_with_offset" if with_offset else "",
with_bias="_with_bias" if with_bias else "",
with_relu="_with_relu" if with_relu else ""),
ifft8x8_arguments,
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f_pointer)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t_pointer)
if with_bias:
reg_bias = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_bias, arg_bias)
reg_f_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f_stride, arg_f_stride)
reg_t_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t_stride, arg_t_stride)
reg_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_count, arg_row_count)
reg_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_end, arg_column_count)
if with_offset:
reg_row_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_row_start, arg_row_offset)
reg_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_column_start, arg_column_offset)
ADD(reg_column_end, reg_column_start)
else:
reg_row_start = None
reg_column_start = None
ymm_data = [YMMRegister(i) for i in range(8)]
ymm_real, ymm_imag = ymm_data[0::2], ymm_data[1::2]
if with_bias:
ymm_bias = YMMRegister()
VMOVSS(ymm_bias.as_xmm, [reg_bias])
for ymm_re, ymm_im in zip(ymm_real, ymm_imag):
VMOVAPS(ymm_re, [reg_f])
VMOVAPS(ymm_im, [reg_f + YMMRegister.size])
if with_bias and ymm_re is ymm_real[0]:
VFMADD231PS(ymm_re, ymm_bias, Constant.float32x8(64.0))
if ymm_im is not ymm_imag[-1]:
ADD(reg_f, reg_f_stride)
fft.two_complex_soa_perm_to_two_real_planar.ifft8_within_rows_preprocess(ymm_real[0], ymm_imag[0])
fft.complex_soa.fft8_within_rows(ymm_real, ymm_imag, transformation="inverse")
fft.complex_soa_perm_to_real.ifft8_across_rows(ymm_data)
block8x8.store_packed(ymm_data, reg_t, reg_t_stride, reg_row_count, reg_column_end, reg_row_start, reg_column_start, with_relu)
RETURN()
|
import fft.complex_soa
import fft.two_complex_soa_perm_to_two_real_planar
arg_f = Argument(ptr(const_float_), name="f")
arg_t = Argument(ptr(float_), name="t")
with Function("nnp_ifft8_dualreal__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_xhr, ymm_xhi = YMMRegister(), YMMRegister()
VMOVUPS(ymm_xhr, [reg_f])
VMOVUPS(ymm_xhi, [reg_f + YMMRegister.size])
fft.two_complex_soa_perm_to_two_real_planar.ifft8_within_rows_preprocess(ymm_xhr, ymm_xhi)
ymm_wr, ymm_wi = ymm_xhr, ymm_xhi
fft.complex_soa.fft8_within_rows(ymm_wr, ymm_wi, transformation="inverse")
ymm_seq_a, ymm_seq_b = ymm_wr, ymm_wi
VMOVUPS([reg_t], ymm_seq_a)
VMOVUPS([reg_t + YMMRegister.size], ymm_seq_b)
RETURN()
with Function("nnp_ifft16_dualreal__avx2",
(arg_f, arg_t),
target=uarch.default + isa.fma3 + isa.avx2):
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
ymm_wr = YMMRegister(), YMMRegister()
ymm_wi = YMMRegister(), YMMRegister()
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS(ymm_w, [reg_f + i * YMMRegister.size])
fft.two_complex_soa_perm_to_two_real_planar.ifft16_within_rows_preprocess(ymm_wr, ymm_wi)
fft.complex_soa.ifft16_within_rows(ymm_wr, ymm_wi)
for i, ymm_w in enumerate(ymm_wr + ymm_wi):
VMOVUPS([reg_t + i * YMMRegister.size], ymm_w)
RETURN()
|
import fft.real_to_complex_soa_perm
arg_t = Argument(ptr(const_float_), name="t")
arg_f = Argument(ptr(float_), name="f")
with Function("nnp_fft8_8real__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
ymm_data = [YMMRegister() for _ in range(8)]
for i, ymm_i in enumerate(ymm_data):
VMOVUPS(ymm_i, [reg_t + i * YMMRegister.size])
fft.real_to_complex_soa_perm.fft8_across_rows(ymm_data)
for i, ymm_i in enumerate(ymm_data):
VMOVUPS([reg_f + i * YMMRegister.size], ymm_i)
RETURN()
import fft16x16
with Function("nnp_fft16_8real__fma3",
(arg_t, arg_f),
target=uarch.default + isa.fma3):
reg_t0 = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_t0, arg_t)
reg_f = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_f, arg_f)
reg_stride = GeneralPurposeRegister64()
MOV(reg_stride, YMMRegister.size)
reg_t8 = GeneralPurposeRegister64()
LEA(reg_t8, [reg_t0 + 8 * YMMRegister.size])
fft16x16.forward_vfft(reg_t0, reg_t8, reg_stride,
data_out=[yword[reg_f + YMMRegister.size * i] for i in range(16)])
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
def load_with_padding(ymm_data, reg_data, reg_stride, reg_row_offset, reg_row_count, reg_column_offset, reg_column_count):
assert isinstance(ymm_data, list) and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_data)
assert isinstance(reg_data, GeneralPurposeRegister64)
assert isinstance(reg_stride, GeneralPurposeRegister64)
assert isinstance(reg_row_offset, GeneralPurposeRegister32)
assert isinstance(reg_row_count, GeneralPurposeRegister32)
assert isinstance(reg_column_offset, GeneralPurposeRegister32)
assert isinstance(reg_column_count, GeneralPurposeRegister32)
reg_column_end = GeneralPurposeRegister64()
LEA(reg_column_end, [reg_column_offset.as_qword + reg_column_count.as_qword * 1])
ymm_before_column_end_mask = YMMRegister()
VMOVD(ymm_before_column_end_mask.as_xmm, reg_column_end.as_dword)
ymm_before_column_start_mask = YMMRegister()
VMOVD(ymm_before_column_start_mask.as_xmm, reg_column_offset.as_dword)
ymm_column_index_mask = YMMRegister()
VMOVAPD(ymm_column_index_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPBROADCASTD(ymm_before_column_end_mask, ymm_before_column_end_mask.as_xmm)
VPCMPGTD(ymm_before_column_end_mask, ymm_before_column_end_mask, ymm_column_index_mask)
VPBROADCASTD(ymm_before_column_start_mask, ymm_before_column_start_mask.as_xmm)
VPCMPGTD(ymm_before_column_start_mask, ymm_before_column_start_mask, ymm_column_index_mask)
ymm_load_mask = YMMRegister()
VPANDN(ymm_load_mask, ymm_before_column_start_mask, ymm_before_column_end_mask)
# Multiply by sizeof(float) to get offset in bytes
SHL(reg_column_offset, 2)
# data points to the first element, which is loaded into lane `reg_column_offset`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SUB(reg_data, reg_column_offset.as_qword)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride, 2)
# Zero all elements. Rows which are not loaded are initialized here.
for ymm_row in ymm_data:
VXORPS(ymm_row, ymm_row, ymm_row)
with Block() as load_rows:
for i, ymm_row in enumerate(ymm_data):
with Block() as load_row:
CMP(reg_row_offset, i)
JA(load_row.end)
VMASKMOVPS(ymm_row, ymm_load_mask, [reg_data])
if i + 1 != len(ymm_data):
ADD(reg_data, reg_stride)
SUB(reg_row_count, 1)
JZ(load_rows.end)
def store_packed(ymm_data, reg_data, reg_stride, reg_row_count, reg_column_end, reg_row_offset=None, reg_column_start=None, relu=False):
assert isinstance(ymm_data, list) and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_data)
assert isinstance(reg_data, GeneralPurposeRegister64)
assert isinstance(reg_stride, GeneralPurposeRegister64)
assert isinstance(reg_row_count, GeneralPurposeRegister32)
assert isinstance(reg_column_end, GeneralPurposeRegister32)
assert reg_row_offset is None or isinstance(reg_row_offset, GeneralPurposeRegister32)
assert reg_column_start is None or isinstance(reg_column_start, GeneralPurposeRegister32)
if reg_column_start is None:
ymm_store_mask = YMMRegister()
VMOVD(ymm_store_mask.as_xmm, reg_column_end)
VPBROADCASTD(ymm_store_mask, ymm_store_mask.as_xmm)
VPCMPGTD(ymm_store_mask, ymm_store_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
else:
ymm_before_column_end_mask = YMMRegister()
VMOVD(ymm_before_column_end_mask.as_xmm, reg_column_end)
ymm_before_column_start_mask = YMMRegister()
VMOVD(ymm_before_column_start_mask.as_xmm, reg_column_start)
SHL(reg_column_start, 2)
SUB(reg_data, reg_column_start.as_qword)
ymm_column_index_mask = YMMRegister()
VMOVDQA(ymm_column_index_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VPBROADCASTD(ymm_before_column_end_mask, ymm_before_column_end_mask.as_xmm)
VPCMPGTD(ymm_before_column_end_mask, ymm_before_column_end_mask, ymm_column_index_mask)
VPBROADCASTD(ymm_before_column_start_mask, ymm_before_column_start_mask.as_xmm)
VPCMPGTD(ymm_before_column_start_mask, ymm_before_column_start_mask, ymm_column_index_mask)
ymm_store_mask = YMMRegister()
VPANDN(ymm_store_mask, ymm_before_column_start_mask, ymm_before_column_end_mask)
# stride is in elements; multiply by sizeof(float) to get stride in bytes
SHL(reg_stride, 2)
if relu:
ymm_zero = YMMRegister()
VMOVAPS(ymm_zero, Constant.float32x8(-0.0))
with Block() as store_rows:
for i, ymm_row in enumerate(ymm_data):
with Block() as store_row:
if reg_row_offset is not None:
CMP(reg_row_offset, i)
JA(store_row.end)
if relu:
VMAXPS(ymm_row, ymm_zero, ymm_row)
VMASKMOVPS([reg_data], ymm_store_mask, ymm_row)
if ymm_row is not ymm_data[-1]:
ADD(reg_data, reg_stride)
SUB(reg_row_count, 1)
JZ(store_rows.end)
|
import winograd.o6x6k3x3
arg_d_pointer = Argument(ptr(const_float_), name="d")
arg_w_pointer = Argument(ptr(float_), name="w")
with Function("nnp_iwt_f6k3__fma3", (arg_d_pointer, arg_w_pointer),
target=uarch.default + isa.fma3):
reg_d = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_d, arg_d_pointer)
reg_w = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_w, arg_w_pointer)
ymm_data = [YMMRegister() for _ in range(8)]
for i, ymm_row in enumerate(ymm_data):
VMOVUPS(ymm_row, [reg_d + i * YMMRegister.size])
ymm_data = winograd.o6x6k3x3.input_transform(ymm_data)
for i, ymm_row in enumerate(ymm_data):
VMOVUPS([reg_w + i * YMMRegister.size], ymm_row)
RETURN()
arg_g_pointer = Argument(ptr(const_float_), name="g")
arg_w_pointer = Argument(ptr(float_), name="w")
with Function("nnp_kwt_f6k3__fma3", (arg_g_pointer, arg_w_pointer),
target=uarch.default + isa.fma3):
reg_g = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_g, arg_g_pointer)
reg_w = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_w, arg_w_pointer)
ymm_data = [YMMRegister() for _ in range(3)]
for i, ymm_row in enumerate(ymm_data):
VMOVUPS(ymm_row, [reg_g + i * YMMRegister.size])
ymm_data = winograd.o6x6k3x3.kernel_transform(ymm_data)
for i, ymm_row in enumerate(ymm_data):
VMOVUPS([reg_w + i * YMMRegister.size], ymm_row)
RETURN()
arg_m_pointer = Argument(ptr(const_float_), name="m")
arg_s_pointer = Argument(ptr(float_), name="s")
with Function("nnp_owt_f6k3__fma3", (arg_m_pointer, arg_s_pointer),
target=uarch.default + isa.fma3):
reg_m = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_m, arg_m_pointer)
reg_s = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_s, arg_s_pointer)
ymm_m = [YMMRegister() for _ in range(8)]
for i, ymm_row in enumerate(ymm_m):
VMOVUPS(ymm_row, [reg_m + i * YMMRegister.size])
ymm_s = winograd.o6x6k3x3.output_transform(ymm_m)
for i, ymm_row in enumerate(ymm_s):
VMOVUPS([reg_s + i * YMMRegister.size], ymm_row)
RETURN()
|
from common import _MM_SHUFFLE
arg_src_pointer = Argument(ptr(const_float_), name="src_pointer")
arg_dst_pointer = Argument(ptr(float_), name="dst_pointer")
arg_src_stride = Argument(size_t, name="src_stride")
arg_src_row_offset = Argument(uint32_t, name="src_row_offset")
arg_src_row_count = Argument(uint32_t, name="src_row_count")
arg_src_column_offset = Argument(uint32_t, name="src_column_offset")
arg_src_column_count = Argument(uint32_t, name="src_column_count")
arg_dst_column_count = Argument(uint32_t, name="dst_column_count")
with Function("nnp_maxpool_2x2_2x2__avx2",
(arg_src_pointer, arg_dst_pointer, arg_src_stride,
arg_src_row_offset, arg_src_row_count, arg_src_column_offset, arg_src_column_count,
arg_dst_column_count),
target=uarch.default + isa.fma3 + isa.avx2):
reg_src_ptr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_src_ptr, arg_src_pointer)
reg_dst_ptr = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_dst_ptr, arg_dst_pointer)
reg_src_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_src_stride, arg_src_stride)
reg_src_row_index = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_row_index, arg_src_row_offset)
reg_src_row_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_row_count, arg_src_row_count)
reg_src_column_start = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_column_start, arg_src_column_offset)
reg_src_column_end = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_src_column_end, arg_src_column_count)
ADD(reg_src_column_end, reg_src_column_start)
reg_dst_column_count = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_dst_column_count, arg_dst_column_count)
ymm_src_column_start, ymm_src_column_end, ymm_dst_column_count = YMMRegister(), YMMRegister(), YMMRegister()
VMOVD(ymm_src_column_start.as_xmm, reg_src_column_start)
VMOVD(ymm_src_column_end.as_xmm, reg_src_column_end)
VMOVD(ymm_dst_column_count.as_xmm, reg_dst_column_count)
VPBROADCASTD(ymm_src_column_start, ymm_src_column_start.as_xmm)
VPBROADCASTD(ymm_src_column_end, ymm_src_column_end.as_xmm)
VPBROADCASTD(ymm_dst_column_count, ymm_dst_column_count.as_xmm)
ymm_column_01234567, ymm_column_89ABCDEF = YMMRegister(), YMMRegister()
VMOVDQA(ymm_column_01234567, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
VMOVDQA(ymm_column_89ABCDEF, Constant.uint32x8(8, 9, 10, 11, 12, 13, 14, 15))
ymm_src_column_start_gt_01234567, ymm_src_column_end_gt_01234567 = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_src_column_start_gt_01234567, ymm_src_column_start, ymm_column_01234567)
VPCMPGTD(ymm_src_column_end_gt_01234567, ymm_src_column_end, ymm_column_01234567)
ymm_src_column_start_gt_89ABCDEF, ymm_src_column_end_gt_89ABCDEF = YMMRegister(), YMMRegister()
VPCMPGTD(ymm_src_column_start_gt_89ABCDEF, ymm_src_column_start, ymm_column_89ABCDEF)
VPCMPGTD(ymm_src_column_end_gt_89ABCDEF, ymm_src_column_end, ymm_column_89ABCDEF)
ymm_src_mask_columns_0_to_8, ymm_src_mask_columns_8_to_16 = YMMRegister(), YMMRegister()
VPANDN(ymm_src_mask_columns_0_to_8, ymm_src_column_start_gt_01234567, ymm_src_column_end_gt_01234567)
VPANDN(ymm_src_mask_columns_8_to_16, ymm_src_column_start_gt_89ABCDEF, ymm_src_column_end_gt_89ABCDEF)
ymm_dst_mask_columns_0_to_8 = YMMRegister()
VPCMPGTD(ymm_dst_mask_columns_0_to_8, ymm_dst_column_count, ymm_column_01234567)
# data points to the first element, which is loaded into lane `reg_column_start`
# However, VMASKMOVPS expects pointer to the first lane, even if it is not loaded.
# Adjust the pointer by subtracting column_offset, in bytes
SHL(reg_src_column_start, 2)
SUB(reg_src_ptr, reg_src_column_start.as_qword)
# Multiply stride by sizeof(float) to convert from elements to bytes
SHL(reg_src_stride, 2)
ymm_row0 = YMMRegister(), YMMRegister()
ymm_row1 = YMMRegister(), YMMRegister()
ymm_minus_inf = YMMRegister()
VMOVAPS(ymm_minus_inf, Constant.float32x8(-float("inf")))
VMOVAPS(ymm_row0[0], ymm_minus_inf)
VMOVAPS(ymm_row0[1], ymm_minus_inf)
VMOVAPS(ymm_row1[0], ymm_minus_inf)
VMOVAPS(ymm_row1[1], ymm_minus_inf)
NEG(reg_src_row_index)
with Block() as load_row0:
CMP(reg_src_row_index, reg_src_row_count)
JAE(load_row0.end)
VMASKMOVPS(ymm_row0[0], ymm_src_mask_columns_0_to_8, [reg_src_ptr])
VBLENDVPS(ymm_row0[0], ymm_minus_inf, ymm_row0[0], ymm_src_mask_columns_0_to_8)
VMASKMOVPS(ymm_row0[1], ymm_src_mask_columns_8_to_16, [reg_src_ptr + YMMRegister.size])
VBLENDVPS(ymm_row0[1], ymm_minus_inf, ymm_row0[1], ymm_src_mask_columns_8_to_16)
ADD(reg_src_ptr, reg_src_stride)
with Block() as load_row1:
INC(reg_src_row_index)
CMP(reg_src_row_index, reg_src_row_count)
JAE(load_row1.end)
VMASKMOVPS(ymm_row1[0], ymm_src_mask_columns_0_to_8, [reg_src_ptr])
VBLENDVPS(ymm_row1[0], ymm_minus_inf, ymm_row1[0], ymm_src_mask_columns_0_to_8)
VMASKMOVPS(ymm_row1[1], ymm_src_mask_columns_8_to_16, [reg_src_ptr + YMMRegister.size])
VBLENDVPS(ymm_row1[1], ymm_minus_inf, ymm_row1[1], ymm_src_mask_columns_8_to_16)
# ymm_row[0] = ( x7 x6 x5 x4 x3 x2 x1 x0 )
# ymm_row[1] = ( x15 x14 x13 x12 x11 x10 x9 x8 )
ymm_row = YMMRegister(), YMMRegister()
VMAXPS(ymm_row[0], ymm_row0[0], ymm_row1[0])
VMAXPS(ymm_row[1], ymm_row0[1], ymm_row1[1])
# ymm_row[0] = ( x14 x12 x6 x4 x10 x8 x2 x0 )
# ymm_row[1] = ( x15 x13 x7 x5 x11 x9 x3 x1 )
ymm_tmp = YMMRegister()
VSHUFPS(ymm_tmp, ymm_row[0], ymm_row[1], _MM_SHUFFLE(2, 0, 2, 0))
VSHUFPS(ymm_row[1], ymm_row[0], ymm_row[1], _MM_SHUFFLE(3, 1, 3, 1))
SWAP.REGISTERS(ymm_row[0], ymm_tmp)
# ymm_out = ( y7 y6 y3 y2 y5 y4 y1 y0 )
ymm_out = YMMRegister()
VMAXPS(ymm_out, ymm_row[0], ymm_row[1])
VPERMPD(ymm_out, ymm_out, _MM_SHUFFLE(3, 1, 2, 0))
VMASKMOVPS([reg_dst_ptr], ymm_dst_mask_columns_0_to_8, ymm_out)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
sqrt2_over_2 = float.fromhex("0x1.6A09E6p-1")
cos_1pi_over_8 = float.fromhex("0x1.D906BCp-1")
cos_3pi_over_8 = float.fromhex("0x1.87DE2Ap-2")
tan_1pi_over_8 = float.fromhex("0x1.A8279Ap-2")
tan_3pi_over_8 = float.fromhex("0x1.3504F4p+1")
cos_npi_over_8 = [
1.0,
cos_1pi_over_8,
sqrt2_over_2,
cos_3pi_over_8,
0.0,
-cos_3pi_over_8,
-sqrt2_over_2,
-cos_1pi_over_8,
]
cos_npi_over_4 = [
1.0,
sqrt2_over_2,
0.0,
-sqrt2_over_2
]
sin_npi_over_8 = [
0.0,
cos_3pi_over_8,
sqrt2_over_2,
cos_1pi_over_8,
1.0,
cos_1pi_over_8,
sqrt2_over_2,
cos_3pi_over_8
]
sin_npi_over_4 = [
0.0,
sqrt2_over_2,
1.0,
sqrt2_over_2
]
def _MM_SHUFFLE(z, y, x, w):
assert z & ~0b11 == 0
assert y & ~0b11 == 0
assert x & ~0b11 == 0
assert w & ~0b11 == 0
return (z << 6) | (y << 4) | (x << 2) | w
def _MM_SHUFFLE2(x, y):
assert x & ~1 == 0
assert y & ~1 == 0
return (x << 1) | y
def butterfly(a, b, negate_a=False, negate_b=False, scale_a=None, scale_b=None, negate_out_b=False, writeback=True):
assert isinstance(a, YMMRegister) or isinstance(a, LocalVariable) and a.size == YMMRegister.size
assert isinstance(b, YMMRegister) or isinstance(b, LocalVariable) and b.size == YMMRegister.size
assert isinstance(negate_a, bool)
assert isinstance(negate_b, bool)
assert isinstance(negate_out_b, bool)
assert scale_b is None or \
isinstance(scale_b, YMMRegister) or \
isinstance(scale_b, (LocalVariable, Constant)) and scale_b.size == YMMRegister.size
assert scale_a is None or \
isinstance(scale_a, YMMRegister) or \
isinstance(scale_a, (LocalVariable, Constant)) and scale_a.size == YMMRegister.size
assert scale_a is None or scale_b is None
assert isinstance(writeback, bool)
assert not negate_out_b or not negate_a and not negate_b and scale_a is None and scale_b is None
ymm_a, ymm_b = a, b
if isinstance(a, LocalVariable):
ymm_a = YMMRegister()
VMOVAPS(ymm_a, a)
if isinstance(b, LocalVariable):
ymm_b = YMMRegister()
VMOVAPS(ymm_b, b)
if scale_b is None and scale_a is None:
assert not negate_a, "Negation of a is supported only in combination with scaling"
ymm_new_a = YMMRegister()
VADDPS(ymm_new_a, ymm_a, ymm_b)
ymm_new_b = YMMRegister()
if not negate_out_b:
VSUBPS(ymm_new_b, ymm_a, ymm_b)
else:
VSUBPS(ymm_new_b, ymm_b, ymm_a)
if not negate_b:
SWAP.REGISTERS(ymm_new_a, ymm_a)
SWAP.REGISTERS(ymm_new_b, ymm_b)
else:
SWAP.REGISTERS(ymm_new_a, ymm_b)
SWAP.REGISTERS(ymm_new_b, ymm_a)
elif scale_a is not None:
ymm_a_copy = YMMRegister()
VMOVAPS(ymm_a_copy, ymm_a)
if not negate_a and not negate_b:
VFMADD132PS(ymm_a, ymm_b, scale_a)
VFMSUB132PS(ymm_a_copy, ymm_b, scale_a)
elif not negate_a and negate_b:
VFMSUB132PS(ymm_a, ymm_b, scale_a)
VFMADD132PS(ymm_a_copy, ymm_b, scale_a)
elif negate_a and not negate_b:
VFMMADD132PS(ymm_a, ymm_b, scale_a)
VFNMSUB132PS(ymm_a_copy, ymm_b, scale_a)
elif negate_a and negate_b:
VFNMSUB132PS(ymm_a, ymm_b, scale_a)
VFNMADD132PS(ymm_a_copy, ymm_b, scale_a)
SWAP.REGISTERS(ymm_b, ymm_a_copy)
elif scale_b is not None:
ymm_a_copy = YMMRegister()
VMOVAPS(ymm_a_copy, ymm_a)
if not negate_a and not negate_b:
VFMADD231PS(ymm_a, ymm_b, scale_b)
VFNMADD231PS(ymm_a_copy, ymm_b, scale_b)
elif not negate_a and negate_b:
VFNMADD231PS(ymm_a, ymm_b, scale_b)
VFMADD231PS(ymm_a_copy, ymm_b, scale_b)
elif negate_a and not negate_b:
VFMSUB231PS(ymm_a, ymm_b, scale_b)
VFNMSUB231PS(ymm_a_copy, ymm_b, scale_b)
elif negate_a and negate_b:
VFNMSUB231PS(ymm_a, ymm_b, scale_b)
VFMSUB231PS(ymm_a_copy, ymm_b, scale_b)
SWAP.REGISTERS(ymm_b, ymm_a_copy)
if writeback and isinstance(a, LocalVariable):
VMOVAPS(a, ymm_a)
if writeback and isinstance(b, LocalVariable):
VMOVAPS(b, ymm_b)
return ymm_a, ymm_b
def transpose2x2x128(ymm_a, ymm_b, use_blend=True):
# ymm_a = (a.lo, a.hi)
# ymm_b = (b.lo, b.hi)
if use_blend:
# ymm_ab = (a.hi, b.lo)
ymm_ab = YMMRegister()
VPERM2F128(ymm_ab, ymm_a, ymm_b, 0x21)
# ymm_a = (a.lo, b.lo)
VBLENDPS(ymm_a, ymm_a, ymm_ab, 0xF0)
# ymm_b = (a.hi, b.hi)
VBLENDPS(ymm_b, ymm_b, ymm_ab, 0x0F)
else:
# ymm_new_a = (a.lo, b.lo)
ymm_new_a = YMMRegister()
VINSERTF128(ymm_new_a, ymm_a, ymm_b.as_xmm, 1)
# ymm_new_b = (a.hi, b.hi)
ymm_new_b = YMMRegister()
VPERM2F128(ymm_new_b, ymm_a, ymm_b, 0x31)
SWAP.REGISTERS(ymm_a, ymm_new_a)
SWAP.REGISTERS(ymm_b, ymm_new_b)
def transpose2x2x2x64(ymm_a, ymm_b, use_blend=True):
# ymm_a = (a0, a1, a2, a3)
# ymm_b = (b0, b1, a2, b3)
if use_blend:
# ymm_ab = (a1, b0, a3, b2)
ymm_ab = YMMRegister()
VSHUFPD(ymm_ab, ymm_a, ymm_b, 0b0101)
# ymm_a = (a0, b0, a2, b2)
VBLENDPS(ymm_a, ymm_a, ymm_ab, 0b11001100)
# ymm_b = (a1, b1, a3, b3)
VBLENDPS(ymm_b, ymm_b, ymm_ab, 0b00110011)
else:
# ymm_new_a = (a0, b0, a2, b2)
ymm_new_a = YMMRegister()
VUNPCKLPD(ymm_new_a, ymm_a, ymm_b)
# ymm_new_b = (a1, b1, a3, b3)
ymm_new_b = YMMRegister()
VUNPCKHPD(ymm_new_b, ymm_a, ymm_b)
SWAP.REGISTERS(ymm_a, ymm_new_a)
SWAP.REGISTERS(ymm_b, ymm_new_b)
def compute_masks(masks, reg_column_offset, reg_column_count):
assert isinstance(masks, list) and all(isinstance(mask, (YMMRegister, LocalVariable)) for mask in masks)
assert isinstance(reg_column_offset, GeneralPurposeRegister64)
assert isinstance(reg_column_count, GeneralPurposeRegister64)
def interleave(sequence_a, sequence_b):
assert isinstance(sequence_a, list) and isinstance(sequence_b, list) or isinstance(sequence_a, tuple) and isinstance(sequence_b, tuple)
if isinstance(sequence_a, list):
return list(sum(zip(sequence_a, sequence_b), ()))
else:
return sum(zip(sequence_a, sequence_b), ())
|
arg_input = Argument(ptr(const_float_), "input")
arg_output = Argument(ptr(float_), "output")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_relu__avx2",
(arg_input, arg_output, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_output = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output, arg_output)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Stream (aligned!) data to memory and update output pointer
VMOVNTPS([reg_output], ymm_data)
ADD(reg_output, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_data = Argument(ptr(float_), "data")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_inplace_relu__avx2",
(arg_data, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_data = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_data, arg_data)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load data
ymm_data = YMMRegister()
VMOVAPS(ymm_data, [reg_data])
# Scale data with negative slope (for negative inputs)
ymm_scaled_data = YMMRegister()
VMULPS(ymm_scaled_data, ymm_data, ymm_negative_slope)
# Select scaled data if input is negative
VBLENDVPS(ymm_data, ymm_data, ymm_scaled_data, ymm_data)
# Store data back to the same location and update pointer
VMOVAPS([reg_data], ymm_data)
ADD(reg_data, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
arg_output_gradient = Argument(ptr(const_float_), "output_gradient")
arg_input = Argument(ptr(const_float_), "input")
arg_input_gradient = Argument(ptr(float_), "input_gradient")
arg_length = Argument(size_t, "length")
arg_negative_slope = Argument(float_, "negative_slope")
with Function("nnp_grad_relu__avx2",
(arg_output_gradient, arg_input, arg_input_gradient, arg_length, arg_negative_slope),
target=uarch.default + isa.avx2):
reg_output_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_output_gradient, arg_output_gradient)
reg_input = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input, arg_input)
reg_input_gradient = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_gradient, arg_input_gradient)
reg_length = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_length, arg_length)
ymm_negative_slope = YMMRegister()
LOAD.ARGUMENT(ymm_negative_slope.as_xmm, arg_negative_slope)
VBROADCASTSS(ymm_negative_slope, ymm_negative_slope.as_xmm)
loop = Loop()
TEST(reg_length, reg_length)
JZ(loop.end)
with loop:
# Load (unaligned!) gradient and update output gradient pointer
ymm_gradient = YMMRegister()
VMOVUPS(ymm_gradient, [reg_output_gradient])
ADD(reg_output_gradient, YMMRegister.size)
# Load (unaligned!) data and update input pointer
ymm_data = YMMRegister()
VMOVUPS(ymm_data, [reg_input])
ADD(reg_input, YMMRegister.size)
# Scale gradient with negative slope (for negative inputs)
ymm_scaled_gradient = YMMRegister()
VMULPS(ymm_scaled_gradient, ymm_gradient, ymm_negative_slope)
# Select scaled gradient if input is negative
VBLENDVPS(ymm_gradient, ymm_gradient, ymm_scaled_gradient, ymm_data)
# Stream (aligned!) gradient to memory and update input gradient pointer
VMOVAPS([reg_input_gradient], ymm_gradient)
ADD(reg_input_gradient, YMMRegister.size)
SUB(reg_length, YMMRegister.size // float_.size)
JNZ(loop.begin)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
log2e = float.fromhex("+0x1.715476p+3")
magic_bias = float.fromhex("+0x1.800000p+23")
zero_cutoff = float.fromhex("-0x1.9FE368p+6")
inf_cutoff = float.fromhex("+0x1.62E42Ep+6")
minus_ln2_hi = float.fromhex("-0x1.62E430p-4")
minus_ln2_lo = float.fromhex("+0x1.05C610p-32")
plus_inf = float("inf")
c2 = float.fromhex("0x1.00088Ap-1")
c3 = float.fromhex("0x1.555A86p-3")
t0 = float.fromhex("0x1.000000p+0")
t1 = float.fromhex("0x1.172B84p+0")
t2 = float.fromhex("0x1.306FE0p+0")
t3 = float.fromhex("0x1.4BFDAEp+0")
t4 = float.fromhex("0x1.6A09E6p+0")
t5 = float.fromhex("0x1.8ACE54p+0")
t6 = float.fromhex("0x1.AE89FAp+0")
t7 = float.fromhex("0x1.D5818Ep+0")
min_exponent = (-126 << 23) & 0xFFFFFFFF
max_exponent = 127 << 23
default_exponent = 0x3F800000
mantissa_mask = 0x007FFFF8
x_arg = Argument(m256, "x")
with Function("_mm256_exp_ps", (x_arg,), m256,
target=uarch.default + isa.fma3 + isa.avx2):
ymm_x = YMMRegister()
LOAD.ARGUMENT(ymm_x, x_arg)
ymm_magic_bias = YMMRegister()
VMOVAPS(ymm_magic_bias, Constant.float32x8(magic_bias))
ymm_t = YMMRegister()
VMOVAPS(ymm_t, ymm_x)
VFMADD132PS(ymm_t, ymm_magic_bias, Constant.float32x8(log2e))
ymm_e1, ymm_e2 = YMMRegister(), YMMRegister()
VPAND(ymm_e2, ymm_t, Constant.uint32x8(mantissa_mask))
VPSLLD(ymm_e2, ymm_e2, 20)
ymm_tf = YMMRegister()
VPERMPS(ymm_tf, ymm_t, Constant.float32x8(t0, t1, t2, t3, t4, t5, t6, t7))
VSUBPS(ymm_t, ymm_t, ymm_magic_bias)
# rx = fma(t, minus_ln2_lo, fma(t, minus_ln2_hi, x))
# rx := t * minus_ln2_hi + x
# rx := t * minus_ln2_lo + rx
ymm_rx = YMMRegister()
VMOVAPS(ymm_rx, ymm_x)
VFMADD231PS(ymm_rx, ymm_t, Constant.float32x8(minus_ln2_hi))
VFMADD231PS(ymm_rx, ymm_t, Constant.float32x8(minus_ln2_lo))
VPMAXSD(ymm_e1, ymm_e2, Constant.uint32x8(min_exponent))
VPMINSD(ymm_e1, ymm_e1, Constant.uint32x8(max_exponent))
ymm_default_exponent = YMMRegister()
VMOVDQA(ymm_default_exponent, Constant.uint32x8(default_exponent))
VPSUBD(ymm_e2, ymm_e2, ymm_e1)
VPADDD(ymm_e1, ymm_e1, ymm_default_exponent)
VPADDD(ymm_e2, ymm_e2, ymm_default_exponent)
# rf = fma(rx, rx * fma(rx, c3, c2), rx)
# rf := rx * c3 + c2
# rf := rx * rf
# rf := rx * rf + rx
ymm_rf = YMMRegister()
VMOVAPS(ymm_rf, Constant.float32x8(c2))
VFMADD231PS(ymm_rf, ymm_rx, Constant.float32x8(c3))
VMULPS(ymm_rf, ymm_rf, ymm_rx)
VFMADD213PS(ymm_rf, ymm_rx, ymm_rx)
# f = fma(tf, rf, tf)
VFMADD231PS(ymm_tf, ymm_tf, ymm_rf)
ymm_f = ymm_tf
VMULPS(ymm_f, ymm_f, ymm_e1)
VMULPS(ymm_f, ymm_f, ymm_e2)
RETURN(ymm_f)
|
from common import _MM_SHUFFLE
from vecmath.exp import simd_exp
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
with Function("max__avx", (arg_n, arg_v), float_,
target=uarch.default + isa.avx):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 4
# Initialize reduction registers with the first element (v[0])
ymm_ms = [YMMRegister() for _ in range(unroll_factor)]
VBROADCASTSS(ymm_ms[0], [reg_v])
for ymm_m in ymm_ms[1:]:
VMOVAPS(ymm_m, ymm_ms[0])
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
for i, ymm_m in enumerate(ymm_ms):
VMAXPS(ymm_m, ymm_m, [reg_v + i * YMMRegister.size])
SUB(reg_v, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
VMAXPS(ymm_ms[0], ymm_ms[0], ymm_ms[1])
VMAXPS(ymm_ms[2], ymm_ms[2], ymm_ms[3])
VMAXPS(ymm_ms[0], ymm_ms[0], ymm_ms[2])
ymm_m = ymm_ms[0]
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
VMAXPS(ymm_m, ymm_m, [reg_v])
ADD(reg_v, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
reg_mask = GeneralPurposeRegister64()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
NEG(reg_n)
LEA(reg_mask, [reg_mask + reg_n * 4 + 16])
ymm_mask = YMMRegister()
VMOVUPS(ymm_mask, [reg_mask])
ymm_temp = YMMRegister()
VMASKMOVPS(ymm_temp, ymm_mask, [reg_v])
VBLENDVPS(ymm_temp, ymm_temp, ymm_m, ymm_mask)
VMAXPS(ymm_m, ymm_m, ymm_temp)
ymm_temp = YMMRegister()
VPERM2F128(ymm_temp, ymm_m, ymm_m, 0x01)
VMAXPS(ymm_m, ymm_m, ymm_temp)
VPERMILPS(ymm_temp, ymm_m, _MM_SHUFFLE(1, 0, 3, 2))
VMAXPS(ymm_m, ymm_m, ymm_temp)
VPERMILPS(ymm_temp, ymm_m, _MM_SHUFFLE(2, 3, 0, 1))
VMAXPS(ymm_m, ymm_m, ymm_temp)
RETURN(ymm_m.as_xmm)
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
arg_c = Argument(float_, "c")
with Function("sum_exp_minus_c__avx2", (arg_n, arg_v, arg_c), float_,
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 3
# Clear reduction registers
ymm_sums = [YMMRegister() for _ in range(unroll_factor)]
for ymm_sum in ymm_sums:
VXORPS(ymm_sum.as_xmm, ymm_sum.as_xmm, ymm_sum.as_xmm)
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
ymm_xs = [YMMRegister() for _ in ymm_sums]
for i, ymm_x in enumerate(ymm_xs):
VMOVUPS(ymm_x, [reg_v + i * YMMRegister.size])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_ys = simd_exp(ymm_xs)
for ymm_sum, ymm_y in zip(ymm_sums, ymm_ys):
VADDPS(ymm_sum, ymm_sum, ymm_y)
SUB(reg_v, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
VADDPS(ymm_sums[0], ymm_sums[0], ymm_sums[1])
VADDPS(ymm_sums[0], ymm_sums[0], ymm_sums[2])
ymm_sum = ymm_sums[0]
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_v])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VADDPS(ymm_sum, ymm_sum, ymm_y)
ADD(reg_v, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_v])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VANDPS(ymm_y, ymm_y, ymm_mask)
VADDPS(ymm_sum, ymm_sum, ymm_y)
ymm_temp = YMMRegister()
VPERM2F128(ymm_temp, ymm_sum, ymm_sum, 0x01)
VADDPS(ymm_sum, ymm_sum, ymm_temp)
VPERMILPS(ymm_temp, ymm_sum, _MM_SHUFFLE(1, 0, 3, 2))
VADDPS(ymm_sum, ymm_sum, ymm_temp)
VPERMILPS(ymm_temp, ymm_sum, _MM_SHUFFLE(2, 3, 0, 1))
VADDPS(ymm_sum, ymm_sum, ymm_temp)
RETURN(ymm_sum.as_xmm)
def scaled_exp_minus_c(reg_n, reg_x, reg_y, ymm_scale, ymm_c):
unroll_loop = Loop()
vector_loop = Loop()
final_block = Block()
simd_width = YMMRegister.size // float_.size
unroll_factor = 3
# Unrolled vectorized loop
SUB(reg_n, simd_width * unroll_factor)
JB(unroll_loop.end)
with unroll_loop:
ymm_xs = [YMMRegister() for _ in range(unroll_factor)]
for i, ymm_x in enumerate(ymm_xs):
VMOVUPS(ymm_x, [reg_x + i * YMMRegister.size])
VSUBPS(ymm_x, ymm_x, ymm_c)
if reg_x != reg_y:
SUB(reg_x, -unroll_factor * YMMRegister.size)
ymm_ys = simd_exp(ymm_xs)
for i, ymm_y in enumerate(ymm_ys):
VMULPS(ymm_y, ymm_y, ymm_scale)
VMOVUPS([reg_y + i * YMMRegister.size], ymm_y)
SUB(reg_y, -unroll_factor * YMMRegister.size)
SUB(reg_n, simd_width * unroll_factor)
JAE(unroll_loop.begin)
ADD(reg_n, simd_width * unroll_factor)
JZ(final_block.end)
# Vectorized loop without unrolling
SUB(reg_n, simd_width)
JB(vector_loop.end)
with vector_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
if reg_x != reg_y:
ADD(reg_x, YMMRegister.size)
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VMULPS(ymm_y, ymm_y, ymm_scale)
VMOVUPS([reg_y], ymm_y)
ADD(reg_y, YMMRegister.size)
SUB(reg_n, simd_width)
JAE(vector_loop.begin)
ADD(reg_n, simd_width)
JZ(final_block.end)
# Process remainder: 0 < reg_n < simd_width
with final_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_x])
VSUBPS(ymm_x, ymm_x, ymm_c)
ymm_y = simd_exp([ymm_x])[0]
VMULPS(ymm_y, ymm_y, ymm_scale)
VMASKMOVPS([reg_y], ymm_mask, ymm_y)
arg_n = Argument(size_t, "n")
arg_v = Argument(ptr(const_float_), "v")
arg_scale = Argument(float_, "scale")
arg_c = Argument(float_, "c")
with Function("inplace_scaled_exp_minus_c__avx2", (arg_n, arg_v, arg_scale, arg_c),
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_v = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_v, arg_v)
ymm_scale = YMMRegister()
LOAD.ARGUMENT(ymm_scale.as_xmm, arg_scale)
VBROADCASTSS(ymm_scale, ymm_scale.as_xmm)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
scaled_exp_minus_c(reg_n, reg_v, reg_v, ymm_scale, ymm_c)
RETURN()
arg_n = Argument(size_t, "n")
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(float_), "y")
arg_scale = Argument(float_, "scale")
arg_c = Argument(float_, "c")
with Function("scaled_exp_minus_c__avx2", (arg_n, arg_x, arg_y, arg_scale, arg_c),
target=uarch.default + isa.avx2):
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_y, arg_y)
ymm_scale = YMMRegister()
LOAD.ARGUMENT(ymm_scale.as_xmm, arg_scale)
VBROADCASTSS(ymm_scale, ymm_scale.as_xmm)
ymm_c = YMMRegister()
LOAD.ARGUMENT(ymm_c.as_xmm, arg_c)
VBROADCASTSS(ymm_c, ymm_c.as_xmm)
scaled_exp_minus_c(reg_n, reg_x, reg_y, ymm_scale, ymm_c)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 3, 4
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s8gemm_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
ymm_c = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(mr)]
ymm_b_n = YMMRegister()
with Loop() as loop:
for m in range(mr):
VMOVAPS(ymm_a[m], [reg_a + m * YMMRegister.size])
SUB(reg_a, -mr * YMMRegister.size)
for n in range(nr):
VMOVAPS(ymm_b_n, [reg_b + n * YMMRegister.size])
for m in range(mr):
VFMADD231PS(ymm_c[m][n], ymm_a[m], ymm_b_n)
SUB(reg_b, -nr * YMMRegister.size)
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s8gemm_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(mr)]
ymm_b_n = YMMRegister()
with Loop() as loop:
with Block() as load_a:
for m in range(mr):
VMOVAPS(ymm_a[m], [reg_a])
ADD(reg_a, YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n in range(nr):
VMOVAPS(ymm_b_n, [reg_b])
ADD(reg_b, YMMRegister.size)
for m in range(mr):
VFMADD231PS(ymm_c[m][n], ymm_a[m], ymm_b_n)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 4
arg_input_channels = Argument(size_t, "input_channels")
arg_image_size = Argument(size_t, "image_size")
arg_input = Argument(ptr(const_float_), "input")
arg_kernel = Argument(ptr(const_float_), "kernel")
arg_output = Argument(ptr(float_), "output")
with Function("nnp_conv1x1_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_input_channels, arg_image_size, arg_input, arg_kernel, arg_output),
target=uarch.default + isa.fma3):
reg_input_channels = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_channels, arg_input_channels)
SHL(reg_input_channels, 2)
reg_image_size = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_image_size, arg_image_size)
SHL(reg_image_size, 2)
reg_inputs = [GeneralPurposeRegister64() for m in range(mr)]
LOAD.ARGUMENT(reg_inputs[0], arg_input)
for m in range(1, mr):
LEA(reg_inputs[m], [reg_inputs[m - 1] + reg_image_size * 1])
reg_kernel = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_kernel, arg_kernel)
reg_outputs = [GeneralPurposeRegister64() for n in range(nr)]
LOAD.ARGUMENT(reg_outputs[0], arg_output)
for n in range(1, nr):
LEA(reg_outputs[n], [reg_outputs[n - 1] + reg_image_size * 1])
ymm_kernel = [[YMMRegister() for n in range(nr)] for m in range(mr)]
for n in range(nr):
for m in range(mr):
VBROADCASTSS(ymm_kernel[m][n], [reg_kernel + m * float_.size])
if n + 1 != nr:
ADD(reg_kernel, reg_input_channels)
main_loop = Loop()
final_block = Block()
SUB(reg_image_size, YMMRegister.size)
JB(main_loop.end)
with main_loop:
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
for reg_output, ymm_output in zip(reg_outputs, ymm_outputs):
VMOVUPS(ymm_output, [reg_output])
for m, reg_input in enumerate(reg_inputs):
# Load vector for a channel of the input image
ymm_input = YMMRegister()
VMOVUPS(ymm_input, [reg_input])
ADD(reg_input, YMMRegister.size)
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if reg_input is reg_inputs[-1]:
VMOVUPS([reg_output], ymm_output)
ADD(reg_output, YMMRegister.size)
SUB(reg_image_size, YMMRegister.size)
JAE(main_loop.begin)
ADD(reg_image_size, YMMRegister.size)
JZ(final_block.end)
with final_block:
reg_mask, ymm_mask = GeneralPurposeRegister64(), YMMRegister()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
SUB(reg_mask, reg_image_size)
VMOVDQU(ymm_mask, [reg_mask + YMMRegister.size])
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
for reg_output, ymm_output in zip(reg_outputs, ymm_outputs):
VMASKMOVPS(ymm_output, ymm_mask, [reg_output])
for m, reg_input in enumerate(reg_inputs):
# Load vector for a channel of the input image
ymm_input = YMMRegister()
VMASKMOVPS(ymm_input, ymm_mask, [reg_input])
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if reg_input is reg_inputs[-1]:
VMASKMOVPS([reg_output], ymm_mask, ymm_output)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_input_channels = Argument(size_t, "input_channels")
arg_image_size = Argument(size_t, "image_size")
arg_input = Argument(ptr(const_float_), "input")
arg_kernel = Argument(ptr(const_float_), "kernel")
arg_output = Argument(ptr(float_), "output")
with Function("nnp_conv1x1_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_input_channels, arg_image_size, arg_input, arg_kernel, arg_output),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_input_channels = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_input_channels, arg_input_channels)
SHL(reg_input_channels, 2)
reg_image_size = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_image_size, arg_image_size)
SHL(reg_image_size, 2)
reg_inputs = [GeneralPurposeRegister64() for m in range(mr)]
LOAD.ARGUMENT(reg_inputs[0], arg_input)
for m in range(1, mr):
LEA(reg_inputs[m], [reg_inputs[m - 1] + reg_image_size * 1])
reg_kernel = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_kernel, arg_kernel)
reg_outputs = [GeneralPurposeRegister64() for n in range(nr)]
LOAD.ARGUMENT(reg_outputs[0], arg_output)
for n in range(1, nr):
LEA(reg_outputs[n], [reg_outputs[n - 1] + reg_image_size * 1])
VZEROALL()
ymm_inputs = [YMMRegister() for m in range(mr)]
ymm_kernel = [[YMMRegister() for n in range(nr)] for m in range(mr)]
with Block() as load_kernels:
for n in range(nr):
with Block() as load_kernels_row:
for m in range(mr):
VBROADCASTSS(ymm_kernel[m][n], [reg_kernel + m * float_.size])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_kernels_row.end)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_kernels.end)
ADD(reg_kernel, reg_input_channels)
main_loop = Loop()
final_block = Block()
SUB(reg_image_size, YMMRegister.size)
JB(main_loop.end)
with main_loop:
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
with Block() as load_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMOVUPS(ymm_output, [reg_output])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_outputs.end)
with Block() as load_inputs:
for m, (ymm_input, reg_input) in enumerate(zip(ymm_inputs, reg_inputs)):
# Load vector for a channel of the input image
VMOVUPS(ymm_input, [reg_input])
ADD(reg_input, YMMRegister.size)
# Update all outputs using the input and corresponding kernel elements
for n, ymm_output in enumerate(ymm_outputs):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_input)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_inputs.end)
# Store vectors to different channels of the output image
with Block() as store_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMOVUPS([reg_output], ymm_output)
ADD(reg_output, YMMRegister.size)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_outputs.end)
SUB(reg_image_size, YMMRegister.size)
JAE(main_loop.begin)
ADD(reg_image_size, YMMRegister.size)
JZ(final_block.end)
with final_block:
reg_mask, ymm_mask = GeneralPurposeRegister64(), YMMRegister()
LEA(reg_mask, Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8)))
SUB(reg_mask, reg_image_size)
VMOVDQU(ymm_mask, [reg_mask + YMMRegister.size])
# Load vectors from different channels of the output image
ymm_outputs = [YMMRegister() for n in range(nr)]
with Block() as load_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMASKMOVPS(ymm_output, ymm_mask, [reg_output])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_outputs.end)
with Block() as load_inputs:
for m, (ymm_input, reg_input) in enumerate(zip(ymm_inputs, reg_inputs)):
# Load vector for a channel of the input image
VMASKMOVPS(ymm_inputs[m], ymm_mask, [reg_input])
# Update all outputs using the input and corresponding kernel elements
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VFMADD231PS(ymm_output, ymm_kernel[m][n], ymm_inputs[m])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_inputs.end)
# Store vectors to different channels of the output image
with Block() as store_outputs:
for n, (reg_output, ymm_output) in enumerate(zip(reg_outputs, ymm_outputs)):
VMASKMOVPS([reg_output], ymm_mask, ymm_output)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_outputs.end)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
simd_width = YMMRegister.size // float_.size
for fusion_factor in range(1, 8 + 1):
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(const_float_), "y")
arg_stride_y = Argument(size_t, "stride_y")
arg_sum = Argument(ptr(float_), "sum")
arg_n = Argument(size_t, "n")
with Function("nnp_sdotxf{fusion_factor}__avx2".format(fusion_factor=fusion_factor),
(arg_x, arg_y, arg_stride_y, arg_sum, arg_n),
target=uarch.default + isa.fma3 + isa.avx2):
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_ys = [GeneralPurposeRegister64() for m in range(fusion_factor)]
LOAD.ARGUMENT(reg_ys[0], arg_y)
reg_stride_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_y, arg_stride_y)
SHL(reg_stride_y, 2)
reg_sum = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_sum, arg_sum)
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
ymm_accs = [YMMRegister() for m in range(fusion_factor)]
VZEROALL()
for m in range(1, fusion_factor):
LEA(reg_ys[m], [reg_ys[m - 1] + reg_stride_y * 1])
main_loop = Loop()
end_block = Block()
SUB(reg_n, YMMRegister.size // float_.size)
JB(main_loop.end)
with main_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
VFMADD231PS(ymm_acc, ymm_x, [reg_y])
ADD(reg_y, YMMRegister.size)
SUB(reg_n, YMMRegister.size // float_.size)
JAE(main_loop.begin)
ADD(reg_n, YMMRegister.size // float_.size)
JE(end_block.end)
with end_block:
ymm_mask = YMMRegister()
VMOVD(ymm_mask.as_xmm, reg_n.as_dword)
VPBROADCASTD(ymm_mask, ymm_mask.as_xmm)
VPCMPGTD(ymm_mask, ymm_mask, Constant.uint32x8(0, 1, 2, 3, 4, 5, 6, 7))
ymm_x = YMMRegister()
VMASKMOVPS(ymm_x, ymm_mask, [reg_x])
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
ymm_y = YMMRegister()
VMASKMOVPS(ymm_y, ymm_mask, [reg_y])
VFMADD231PS(ymm_acc, ymm_x, ymm_y)
# Reduce the SIMD registers into a single elements
xmm_tmp = XMMRegister()
for i, ymm_acc in enumerate(ymm_accs):
VEXTRACTF128(xmm_tmp, ymm_acc, 1)
VADDPS(ymm_acc.as_xmm, ymm_acc.as_xmm, xmm_tmp)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VMOVSS([reg_sum + i * float_.size], ymm_acc.as_xmm)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from fp16.avx import fp16_alt_xmm_to_fp32_xmm
from fp16.avx2 import fp16_alt_xmm_to_fp32_ymm
simd_width = YMMRegister.size // float_.size
for fusion_factor in range(1, 8 + 1):
arg_x = Argument(ptr(const_float_), "x")
arg_y = Argument(ptr(const_float_), "y")
arg_stride_y = Argument(size_t, "stride_y")
arg_sum = Argument(ptr(float_), "sum")
arg_n = Argument(size_t, "n")
with Function("nnp_shdotxf{fusion_factor}__avx2".format(fusion_factor=fusion_factor),
(arg_x, arg_y, arg_stride_y, arg_sum, arg_n),
target=uarch.default + isa.fma3 + isa.avx2):
reg_x = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_x, arg_x)
reg_ys = [GeneralPurposeRegister64() for m in range(fusion_factor)]
LOAD.ARGUMENT(reg_ys[0], arg_y)
reg_stride_y = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_stride_y, arg_stride_y)
ADD(reg_stride_y, reg_stride_y)
reg_sum = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_sum, arg_sum)
reg_n = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_n, arg_n)
ymm_accs = [YMMRegister() for m in range(fusion_factor)]
VZEROALL()
for m in range(1, fusion_factor):
LEA(reg_ys[m], [reg_ys[m - 1] + reg_stride_y * 1])
main_loop = Loop()
edge_loop = Loop()
SUB(reg_n, XMMRegister.size // uint16_t.size)
JB(main_loop.end)
with main_loop:
ymm_x = YMMRegister()
VMOVUPS(ymm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
xmm_half = XMMRegister()
VMOVUPS(xmm_half, [reg_y])
ADD(reg_y, XMMRegister.size)
ymm_y = fp16_alt_xmm_to_fp32_ymm(xmm_half)
VFMADD231PS(ymm_acc, ymm_x, ymm_y)
SUB(reg_n, YMMRegister.size // float_.size)
JAE(main_loop.begin)
ADD(reg_n, XMMRegister.size // uint16_t.size)
JE(edge_loop.end)
with edge_loop:
xmm_x = XMMRegister()
VMOVSS(xmm_x, [reg_x])
ADD(reg_x, YMMRegister.size)
for reg_y, ymm_acc in zip(reg_ys, ymm_accs):
reg_half = GeneralPurposeRegister32()
MOVZX(reg_half, word[reg_y])
xmm_half = XMMRegister()
VMOVD(xmm_half, reg_half)
ADD(reg_y, uint16_t.size)
ymm_y = fp16_alt_xmm_to_fp32_ymm(xmm_half)
VFMADD231PS(ymm_acc, xmm_x.as_ymm, ymm_y)
SUB(reg_n, 1)
JAE(edge_loop.begin)
# Reduce the SIMD registers into a single elements
xmm_tmp = XMMRegister()
for i, ymm_acc in enumerate(ymm_accs):
VEXTRACTF128(xmm_tmp, ymm_acc, 1)
VADDPS(ymm_acc.as_xmm, ymm_acc.as_xmm, xmm_tmp)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VHADDPS(ymm_acc, ymm_acc, ymm_acc)
VMOVSS([reg_sum + i * float_.size], ymm_acc.as_xmm)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from common import _MM_SHUFFLE
simd_width = YMMRegister.size // float_.size
mr = 4
nr = 3 * simd_width
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_sgemm_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
ymm_c = [[YMMRegister() for n in range(0, nr, simd_width)] for m in range(mr)]
VZEROALL()
ymm_b = [YMMRegister() for n in range(0, nr, simd_width)]
ymm_a_m = YMMRegister()
with Loop() as loop:
for n in range(nr // simd_width):
VMOVAPS(ymm_b[n], [reg_b + n * YMMRegister.size])
ADD(reg_b, nr * float_.size)
for m in range(mr):
VBROADCASTSS(ymm_a_m, [reg_a + m * float_.size])
for n in range(nr // simd_width):
VFMADD231PS(ymm_c[m][n], ymm_a_m, ymm_b[n])
ADD(reg_a, mr * float_.size)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as load_and_store_c:
for m in reversed(range(mr)):
for n in range(nr // simd_width):
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c + n * YMMRegister.size])
VMOVUPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr // simd_width):
VMOVUPS([reg_c + n * YMMRegister.size], ymm_c[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_sgemm_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c = [[YMMRegister() for n in range(0, nr, simd_width)] for m in range(mr)]
VZEROALL()
ymm_b = [YMMRegister() for n in range(0, nr, simd_width)]
ymm_a_m = YMMRegister()
with Loop() as loop:
with Block() as load_b:
for n in range(nr // simd_width):
VMOVAPS(ymm_b[n], [reg_b])
ADD(reg_b, YMMRegister.size)
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(load_b.end)
with Block() as multiply_by_a:
for m in range(mr):
VBROADCASTSS(ymm_a_m, [reg_a])
ADD(reg_a, float_.size)
for n in range(nr // simd_width):
VFMADD231PS(ymm_c[m][n], ymm_a_m, ymm_b[n])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(multiply_by_a.end)
DEC(reg_k)
JNE(loop.begin)
store_c = Block()
# Load mask
reg_mask_index = GeneralPurposeRegister32()
LEA(reg_mask_index, [reg_nr.as_qword - 1])
AND(reg_mask_index, simd_width - 1)
NEG(reg_mask_index.as_qword)
const_mask_table = Constant.uint32x16(*([0xFFFFFFFF] * 8 + [0x00000000] * 8))
reg_mask = GeneralPurposeRegister64()
LEA(reg_mask, const_mask_table)
LEA(reg_mask, [reg_mask + reg_mask_index.as_qword * 4 + 32 - 4])
ymm_mask = YMMRegister()
VMOVDQU(ymm_mask, [reg_mask])
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
with Block() as update_c:
for m in range(mr):
reg_c_mn = GeneralPurposeRegister64()
MOV(reg_c_mn, reg_c)
ymm_c_mn = YMMRegister()
with Block() as update_c_full_registers:
for n in range(nr // simd_width):
# Copy the current accumulator register into a fixed register ymm_c_mn.
# If a partial register is to be stored, the storing code would expect it there.
VMOVAPS(ymm_c_mn, ymm_c[m][n])
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(update_c_full_registers.end)
VADDPS(ymm_c[m][n], ymm_c[m][n], [reg_c_mn])
VMOVUPS([reg_c_mn], ymm_c[m][n])
ADD(reg_c_mn, YMMRegister.size)
# Update (potentially) partial register
# Important: ymm_c_mn is the content of the register and [reg_c_mn] is the address of the tuple of C
ymm_temp = YMMRegister()
VMASKMOVPS(ymm_temp, ymm_mask, [reg_c_mn])
VADDPS(ymm_c_mn, ymm_c_mn, ymm_temp)
VMASKMOVPS([reg_c_mn], ymm_mask, ymm_c_mn)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
reg_c_mn = GeneralPurposeRegister64()
MOV(reg_c_mn, reg_c)
ymm_c_mn = YMMRegister()
with Block() as store_c_full_registers:
for n in range(nr // simd_width):
# Copy the current accumulator register into a fixed register ymm_c_mn.
# If a partial register is to be stored, the storing code would expect it there.
VMOVAPS(ymm_c_mn, ymm_c[m][n])
if n + 1 != nr // simd_width:
CMP(reg_nr, (n + 1) * simd_width)
JBE(store_c_full_registers.end)
VMOVUPS([reg_c_mn], ymm_c[m][n])
ADD(reg_c_mn, YMMRegister.size)
# Store (potentially) partial register
# Important: ymm_c_mn is the content of the register and [reg_c_mn] is the address of the tuple of C
VMASKMOVPS([reg_c_mn], ymm_mask, ymm_c_mn)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 2
for conjugate_b, transpose_c in [(False, False), (True, False), (True, True)]:
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_c8gemm{conjb}{transc}_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
if not transpose_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
else:
for n in range(nr):
PREFETCHT0([reg_c])
if n + 1 != nr:
ADD(reg_c, reg_row_stride)
ymm_c_re = [[YMMRegister() for n in range(nr)] for m in range(mr)]
ymm_c_im = [[YMMRegister() for n in range(nr)] for m in range(mr)]
VZEROALL()
ymm_a = [YMMRegister() for m in range(2*mr)]
ymm_a_re, ymm_a_im = ymm_a[0::2], ymm_a[1::2]
ymm_b = [YMMRegister() for n in range(2*nr)]
ymm_b_re, ymm_b_im = ymm_b[0::2], ymm_b[1::2]
with Loop() as loop:
for i, ymm in enumerate(ymm_a):
VMOVAPS(ymm, [reg_a + i * YMMRegister.size])
SUB(reg_a, -YMMRegister.size * 2 * mr)
for j, ymm in enumerate(ymm_b):
VMOVAPS(ymm, [reg_b + j * YMMRegister.size])
SUB(reg_b, -YMMRegister.size * 2 * nr)
for n in range(nr):
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
for n in range(nr):
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_c8gemm{conjb}{transc}_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(nr)] for c in range(2))
with Loop() as loop:
with Block() as load_a:
for m, (ymm_re, ymm_im) in enumerate(zip(ymm_a_re, ymm_a_im)):
VMOVAPS(ymm_re, [reg_a])
VMOVAPS(ymm_im, [reg_a + YMMRegister.size])
ADD(reg_a, 2 * YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n, (ymm_re, ymm_im) in enumerate(zip(ymm_b_re, ymm_b_im)):
VMOVAPS(ymm_re, [reg_b])
VMOVAPS(ymm_im, [reg_b + YMMRegister.size])
ADD(reg_b, 2 * YMMRegister.size)
with Block() as mutiply_by_bn:
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(mutiply_by_bn.end)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
reg_mr, reg_nr = reg_nr, reg_mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
mr, nr = 2, 2
for conjugate_b, transpose_c in [(False, False), (True, False), (True, True)]:
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s4c6gemm{conjb}{transc}_only_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
with Block() as prefetch_c:
if not transpose_c:
for m in range(mr):
PREFETCHT0([reg_c])
if m + 1 != mr:
ADD(reg_c, reg_row_stride)
else:
for n in range(nr):
PREFETCHT0([reg_c])
if n + 1 != nr:
ADD(reg_c, reg_row_stride)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(2*mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(2*nr)] for c in range(2))
with Loop() as loop:
for m in range(mr):
VMOVAPS(ymm_a_re[m], [reg_a + (2*m+0) * YMMRegister.size])
VMOVAPS(ymm_a_im[m], [reg_a + (2*m+1) * YMMRegister.size])
SUB(reg_a, -YMMRegister.size * 2 * mr)
for n in range(nr):
VMOVAPS(ymm_b_re[n], [reg_b + (2*n+0) * YMMRegister.size])
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VMOVAPS(ymm_b_im[n], [reg_b + (2*n+1) * YMMRegister.size])
VBLENDPS(ymm_b_re[n], ymm_b_re[n], ymm_b_im[n], 0b00000011)
for m in range(mr):
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
SUB(reg_b, -YMMRegister.size * 2 * nr)
ymm_zero_columns01_mask = YMMRegister()
VMOVAPS(ymm_zero_columns01_mask, Constant.uint32x8(0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
for n in range(nr):
VANDPS(ymm_b_im[n], ymm_b_im[n], ymm_zero_columns01_mask)
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in reversed(range(mr)):
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in reversed(range(mr)):
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if m != 0:
SUB(reg_c, reg_row_stride)
RETURN()
arg_mr = Argument(uint32_t, "mr")
arg_nr = Argument(uint32_t, "nr")
arg_k = Argument(size_t, "k")
arg_update = Argument(size_t, "update")
arg_a = Argument(ptr(const_float_), "a")
arg_b = Argument(ptr(const_float_), "b")
arg_c = Argument(ptr(float_), "c")
arg_row_stride = Argument(size_t, "row_stride_c")
with Function("nnp_s4c6gemm{conjb}{transc}_upto_{mr}x{nr}__fma3".format(mr=mr, nr=nr,
conjb="_conjb" if conjugate_b else "",
transc="_transc" if transpose_c else ""),
(arg_mr, arg_nr, arg_k, arg_update, arg_a, arg_b, arg_c, arg_row_stride),
target=uarch.default + isa.fma3):
reg_mr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_mr, arg_mr)
reg_nr = GeneralPurposeRegister32()
LOAD.ARGUMENT(reg_nr, arg_nr)
reg_k = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_k, arg_k)
reg_update = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_update, arg_update)
reg_a = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_a, arg_a)
reg_b = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_b, arg_b)
reg_c = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_c, arg_c)
reg_row_stride = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_row_stride, arg_row_stride)
SHL(reg_row_stride, 2)
ymm_c_re, ymm_c_im = tuple([[YMMRegister() for n in range(nr)] for m in range(mr)] for c in range(2))
VZEROALL()
ymm_a_re, ymm_a_im = tuple([YMMRegister() for m in range(mr)] for c in range(2))
ymm_b_re, ymm_b_im = tuple([YMMRegister() for n in range(nr)] for c in range(2))
with Loop() as loop:
with Block() as load_a:
for m, (ymm_re, ymm_im) in enumerate(zip(ymm_a_re, ymm_a_im)):
VMOVAPS(ymm_re, [reg_a])
VMOVAPS(ymm_im, [reg_a + YMMRegister.size])
ADD(reg_a, 2 * YMMRegister.size)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(load_a.end)
with Block() as load_b:
for n in range(nr):
VMOVAPS(ymm_b_re[n], [reg_b])
for m in range(mr):
VFMADD231PS(ymm_c_re[m][n], ymm_a_re[m], ymm_b_re[n])
VMOVAPS(ymm_b_im[n], [reg_b + YMMRegister.size])
VBLENDPS(ymm_b_re[n], ymm_b_re[n], ymm_b_im[n], 0b00000011)
for m in range(mr):
VFMADD231PS(ymm_c_im[m][n], ymm_a_im[m], ymm_b_re[n])
ADD(reg_b, YMMRegister.size * 2)
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(load_b.end)
ymm_zero_columns01_mask = YMMRegister()
VMOVAPS(ymm_zero_columns01_mask, Constant.uint32x8(0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF))
for n in range(nr):
VANDPS(ymm_b_im[n], ymm_b_im[n], ymm_zero_columns01_mask)
for m in range(mr):
if conjugate_b:
VFMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFNMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
else:
VFNMADD231PS(ymm_c_re[m][n], ymm_a_im[m], ymm_b_im[n])
VFMADD231PS(ymm_c_im[m][n], ymm_a_re[m], ymm_b_im[n])
DEC(reg_k)
JNZ(loop.begin)
store_c = Block()
# Check if we need to update C or overwrite it
TEST(reg_update, reg_update)
JZ(store_c.begin)
if transpose_c:
mr, nr = nr, mr
reg_mr, reg_nr = reg_nr, reg_mr
ymm_c_re = [list(ymm_column) for ymm_column in zip(*ymm_c_re)]
ymm_c_im = [list(ymm_column) for ymm_column in zip(*ymm_c_im)]
with Block() as update_c:
for m in range(mr):
with Block() as update_c_row:
for n in range(nr):
VADDPS(ymm_c_re[m][n], ymm_c_re[m][n], [reg_c + (2*n+0) * YMMRegister.size])
VADDPS(ymm_c_im[m][n], ymm_c_im[m][n], [reg_c + (2*n+1) * YMMRegister.size])
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(update_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(update_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
with store_c:
for m in range(mr):
with Block() as store_c_row:
for n in range(nr):
VMOVAPS([reg_c + (2*n+0) * YMMRegister.size], ymm_c_re[m][n])
VMOVAPS([reg_c + (2*n+1) * YMMRegister.size], ymm_c_im[m][n])
if n + 1 != nr:
CMP(reg_nr, n + 1)
JE(store_c_row.end)
if m + 1 != mr:
CMP(reg_mr, m + 1)
JE(store_c.end)
ADD(reg_c, reg_row_stride)
RETURN()
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import sqrt2_over_2
from common import butterfly
import fft.complex_soa
def fft8_across_rows(ymm_data):
assert isinstance(ymm_data, list) and len(ymm_data) == 8
ymm_real = ymm_data[0::2]
ymm_imag = ymm_data[1::2]
fft.complex_soa.fft4_across_rows(ymm_real, ymm_imag)
butterfly(ymm_real[0], ymm_imag[0])
# const float two_gdata1_real = crealf(data1) + crealf(data3);
# const float two_gdata1_imag = cimagf(data1) - cimagf(data3);
ymm_two_gdata1_real, ymm_two_gdata1_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_gdata1_real, ymm_real[1], ymm_real[3])
VSUBPS(ymm_two_gdata1_imag, ymm_imag[1], ymm_imag[3])
# const float two_hdata1_real = cimagf(data1) + cimagf(data3);
# const float two_hdata1_imag = crealf(data3) - crealf(data1);
ymm_two_hdata1_real, ymm_two_hdata1_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_two_hdata1_real, ymm_imag[1], ymm_imag[3])
VSUBPS(ymm_two_hdata1_imag, ymm_real[3], ymm_real[1])
# const float two_hdata1_real_plus_imag = two_hdata1_real + two_hdata1_imag;
# const float two_hdata1_real_minus_imag = two_hdata1_real - two_hdata1_imag;
ymm_two_hdata1_plus, ymm_two_hdata1_minus = YMMRegister(), YMMRegister()
VADDPS(ymm_two_hdata1_plus, ymm_two_hdata1_real, ymm_two_hdata1_imag)
VSUBPS(ymm_two_hdata1_minus, ymm_two_hdata1_real, ymm_two_hdata1_imag)
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
# const float two_data1_real = two_gdata1_real + SQRT2_OVER_2 * two_hdata1_real_plus_imag;
# const float two_data1_imag = two_gdata1_imag - SQRT2_OVER_2 * two_hdata1_real_minus_imag;
# const float two_data3_real = two_gdata1_real - SQRT2_OVER_2 * two_hdata1_real_plus_imag;
# const float two_data3_imag = -two_gdata1_imag - SQRT2_OVER_2 * two_hdata1_real_minus_imag;
ymm_two_data1_real, ymm_two_data1_imag = YMMRegister(), YMMRegister()
ymm_two_data3_real, ymm_two_data3_imag = YMMRegister(), YMMRegister()
VMOVAPS(ymm_two_data3_real, ymm_two_gdata1_real)
VMOVAPS(ymm_two_data3_imag, ymm_two_gdata1_imag)
VFMADD231PS(ymm_two_gdata1_real, ymm_two_hdata1_plus, ymm_sqrt2_over_2)
VFNMADD231PS(ymm_two_gdata1_imag, ymm_two_hdata1_minus, ymm_sqrt2_over_2)
SWAP.REGISTERS(ymm_two_data1_real, ymm_two_gdata1_real)
SWAP.REGISTERS(ymm_two_data1_imag, ymm_two_gdata1_imag)
VFNMADD231PS(ymm_two_data3_real, ymm_two_hdata1_plus, ymm_sqrt2_over_2)
VFNMSUB231PS(ymm_two_data3_imag, ymm_two_hdata1_minus, ymm_sqrt2_over_2)
# /* Store outputs */
# fdata[0] = crealf(data0) + cimagf(data0);
# fdata[1] = crealf(data0) - cimagf(data0);
# fdata[2] = 0.5f * two_data1_real;
# fdata[3] = 0.5f * two_data1_imag;
# fdata[4] = crealf(data2);
# fdata[5] = -cimagf(data2);
# fdata[6] = 0.5f * two_data3_real;
# fdata[7] = 0.5f * two_data3_imag;
ymm_half = YMMRegister()
VMOVAPS(ymm_half, Constant.float32x8(0.5))
VMULPS(ymm_real[1], ymm_two_data1_real, ymm_half)
VMULPS(ymm_imag[1], ymm_two_data1_imag, ymm_half)
VXORPS(ymm_imag[2], ymm_imag[2], Constant.float32x8(-0.0))
VMULPS(ymm_real[3], ymm_two_data3_real, ymm_half)
VMULPS(ymm_imag[3], ymm_two_data3_imag, ymm_half)
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import interleave
def ifft8_within_rows_preprocess(ymm_wr, ymm_wi):
assert isinstance(ymm_wr, YMMRegister)
assert isinstance(ymm_wi, YMMRegister)
# w0.re, w1.re, w2.re, w3.re, w4.re, w5.re, w6.re, w7.re = \
# = f0.re, f2.re - f3.im, f4.re - f5.im, f6.re - f7.im, f0.im, f6.re + f7.im, f4.re + f5.im, f2.re + f3.im
# w0.im, w1.im, w2.im, w3.im, w4.im, w5.im, w6.im, w7.im = \
# = f1.re, f3.re + f2.im, f5.re + f4.im, f7.re + f6.im, f1.im, f7.re - f6.im, f5.re - f4.im, f3.re - f2.im
ymm_shuffle_02461642 = YMMRegister()
VMOVDQA(ymm_shuffle_02461642, Constant.uint32x8(0, 2, 4, 6, 1, 6, 4, 2))
ymm_shuffle_13570753 = YMMRegister()
VMOVDQA(ymm_shuffle_13570753, Constant.uint32x8(1, 3, 5, 7, 0, 7, 5, 3))
ymm_wr_02461642 = YMMRegister()
VPERMPS(ymm_wr_02461642, ymm_shuffle_02461642, ymm_wr)
ymm_wr_13570753 = YMMRegister()
VPERMPS(ymm_wr_13570753, ymm_shuffle_13570753, ymm_wr)
ymm_wi_02461642 = YMMRegister()
VPERMPS(ymm_wi_02461642, ymm_shuffle_02461642, ymm_wi)
ymm_wi_13570753 = YMMRegister()
VPERMPS(ymm_wi_13570753, ymm_shuffle_13570753, ymm_wi)
# wr02461642 = f0.re, f2.re - f3.im, f4.re - f5.im, f6.re - f7.im, -, f6.re + f7.im, f4.re + f5.im, f2.re + f3.im
VFMADD231PS(ymm_wr_02461642, ymm_wi_13570753, Constant.float32x8(0.0, -1.0, -1.0, -1.0, 0.0, +1.0, +1.0, +1.0))
# wi13570753 = f1.re, f3.re + f2.im, f5.re + f4.im, f7.re + f6.im, -, f7.re - f6.im, f5.re - f4.im, f3.re - f2.im
VFMADD231PS(ymm_wr_13570753, ymm_wi_02461642, Constant.float32x8(0.0, +1.0, +1.0, +1.0, 0.0, -1.0, -1.0, -1.0))
VBLENDPS(ymm_wr_02461642, ymm_wr_02461642, ymm_wi_13570753, 0b00010000)
VBLENDPS(ymm_wr_13570753, ymm_wr_13570753, ymm_wi_02461642, 0b00010000)
SWAP.REGISTERS(ymm_wr_02461642, ymm_wr)
SWAP.REGISTERS(ymm_wr_13570753, ymm_wi)
def ifft16_within_rows_preprocess(ymm_wr, ymm_wi, bit_reversal=False):
assert isinstance(ymm_wr, (list, tuple)) and len(ymm_wr) == 2 and all(isinstance(reg, YMMRegister) for reg in ymm_wr)
assert isinstance(ymm_wi, (list, tuple)) and len(ymm_wi) == 2 and all(isinstance(reg, YMMRegister) for reg in ymm_wi)
# w0.re, w1.re, w2.re, w3.re, w4.re, w5.re, w6.re, w7.re = \
# = f0.re, f2.re - f3.im, f4.re - f5.im, f6.re - f7.im, f8.re - f9.im, f10.re - f11.im, f12.re - f13.im, f14.re - f15.im
# w8.re, w9.re, w10.re, w11.re, w12.re, w13.re, w14.re, w15.re = \
# = f0.im, f14.re + f15.im, f12.re + f13.im, f10.re + f11.im, f8.re + f9.im, f6.re + f7.im, f4.re + f5.im, f2.re + f3.im
#
# w0.im, w1.im, w2.im, w3.im, w4.im, w5.im, w6.im, w7.im = \
# = f1.re, f3.re + f2.im, f5.re + f4.im, f7.re + f6.im, r9.re + f8.im, f11.re + f10.im, f13.re + f12.im, f15.re + f14.im
# w8.im, w9.im, w10.im, w11.im, w12.im, w13.im, w14.im, w15.im = \
# = f1.im, f15.re - f14.im, f13.re - f12.im, f11.re - f10.im, f9.re - f8.im, f7.re - f6.im, f5.re - f4.im, f3.re - f2.im
# Step 1.A:
# w0.re, w1.re, w2.re, w3.re, -, w13.re, w14.re, w15.re = \
# = f0.re, f2.re - f3.im, f4.re - f5.im, f6.re - f7.im, -, f6.re + f7.im, f4.re + f5.im, f2.re + f3.im
# w0.im, w1.im, w2.im, w3.im, -, w13.im, w14.im, w15.im = \
# = f1.re, f3.re + f2.im, f5.re + f4.im, f7.re + f6.im, -, f7.re - f6.im, f5.re - f4.im, f3.re - f2.im
ymm_shuffle_02461642 = YMMRegister()
VMOVDQA(ymm_shuffle_02461642, Constant.uint32x8(0, 2, 4, 6, 1, 6, 4, 2))
ymm_shuffle_13570753 = YMMRegister()
VMOVDQA(ymm_shuffle_13570753, Constant.uint32x8(1, 3, 5, 7, 0, 7, 5, 3))
ymm_fr_02461642, ymm_fi_13570753 = YMMRegister(), YMMRegister()
VPERMPS(ymm_fr_02461642, ymm_shuffle_02461642, ymm_wr[0])
VPERMPS(ymm_fi_13570753, ymm_shuffle_13570753, ymm_wi[0])
VFMADD231PS(ymm_fr_02461642, ymm_fi_13570753, Constant.float32x8(0.0, -1.0, -1.0, -1.0, 0.0, +1.0, +1.0, +1.0))
ymm_fr_13570753, ymm_fi_02461642 = YMMRegister(), YMMRegister()
VPERMPS(ymm_fr_13570753, ymm_shuffle_13570753, ymm_wr[0])
VPERMPS(ymm_fi_02461642, ymm_shuffle_02461642, ymm_wi[0])
VFMADD231PS(ymm_fr_13570753, ymm_fi_02461642, Constant.float32x8(0.0, +1.0, +1.0, +1.0, 0.0, -1.0, -1.0, -1.0))
ymm_wr_0123xDEF, ymm_wi_0123xDEF = ymm_fr_02461642, ymm_fr_13570753
# Step 1.B:
# -, w9.re, w10.re, w11.re, w4.re, w5.re, w6.re, w7.re = \
# = -, f14.re + f15.im, f12.re + f13.im, f10.re + f11.im, r8.re - r9.im, r10.re - r11.im, r12.re - r13.im, r14.re - f15.im
# -, w9.im, w10.im, w11.im, w4.im, w5.im, w6.im, w7.im = \
# = -, f15.re - f14.im, f13.re - f12.im, f11.re - f10.im, r9.re + f8.im, f11.re + f10.im, f13.re + f12.im, f15.re + f14.im
ymm_shuffle_06420246 = YMMRegister()
VMOVDQA(ymm_shuffle_06420246, Constant.uint32x8(0, 6, 4, 2, 0, 2, 4, 6))
ymm_shuffle_17531357 = YMMRegister()
VMOVDQA(ymm_shuffle_17531357, Constant.uint32x8(1, 7, 5, 3, 1, 3, 5, 7))
ymm_wr_xxxxCxxx, ymm_wi_xxxxCxxx = YMMRegister(), YMMRegister()
ymm_wr_0123CDEF, ymm_wi_0123CDEF = YMMRegister(), YMMRegister()
ymm_fr_8ECA8ACE, ymm_fi_9FDB9BDF = YMMRegister(), YMMRegister()
VPERMPS(ymm_fr_8ECA8ACE, ymm_shuffle_06420246, ymm_wr[1])
VPERMPS(ymm_fi_9FDB9BDF, ymm_shuffle_17531357, ymm_wi[1])
VADDPS(ymm_wr_xxxxCxxx, ymm_fr_8ECA8ACE, ymm_fi_9FDB9BDF)
VFMADD231PS(ymm_fr_8ECA8ACE, ymm_fi_9FDB9BDF, Constant.float32x8(0.0, +1.0, +1.0, +1.0, -1.0, -1.0, -1.0, -1.0))
VBLENDPS(ymm_wr_0123CDEF, ymm_wr_0123xDEF, ymm_wr_xxxxCxxx, 0b00010000)
ymm_fr_9FDB9BDF, ymm_fi_8ECA8ACE = YMMRegister(), YMMRegister()
VPERMPS(ymm_fr_9FDB9BDF, ymm_shuffle_17531357, ymm_wr[1])
VPERMPS(ymm_fi_8ECA8ACE, ymm_shuffle_06420246, ymm_wi[1])
VSUBPS(ymm_wi_xxxxCxxx, ymm_fr_9FDB9BDF, ymm_fi_8ECA8ACE)
VFMADD231PS(ymm_fr_9FDB9BDF, ymm_fi_8ECA8ACE, Constant.float32x8(0.0, -1.0, -1.0, -1.0, +1.0, +1.0, +1.0, +1.0))
VBLENDPS(ymm_wi_0123CDEF, ymm_wi_0123xDEF, ymm_wi_xxxxCxxx, 0b00010000)
ymm_wr_x9AB4567, ymm_wi_x9AB4567 = ymm_fr_8ECA8ACE, ymm_fr_9FDB9BDF
ymm_wr_89AB4567, ymm_wi_89AB4567 = YMMRegister(), YMMRegister()
VBLENDPS(ymm_wr_89AB4567, ymm_wr_x9AB4567, ymm_fi_02461642, 0b00000001)
VBLENDPS(ymm_wi_89AB4567, ymm_wi_x9AB4567, ymm_fi_13570753, 0b00000001)
ymm_wr_01234567, ymm_wr_89ABCDEF = YMMRegister(), YMMRegister()
VBLENDPS(ymm_wr_01234567, ymm_wr_0123CDEF, ymm_wr_89AB4567, 0xF0)
VBLENDPS(ymm_wr_89ABCDEF, ymm_wr_0123CDEF, ymm_wr_89AB4567, 0x0F)
ymm_wi_01234567, ymm_wi_89ABCDEF = YMMRegister(), YMMRegister()
VBLENDPS(ymm_wi_01234567, ymm_wi_0123CDEF, ymm_wi_89AB4567, 0xF0)
VBLENDPS(ymm_wi_89ABCDEF, ymm_wi_0123CDEF, ymm_wi_89AB4567, 0x0F)
SWAP.REGISTERS(ymm_wr[0], ymm_wr_01234567)
SWAP.REGISTERS(ymm_wi[0], ymm_wi_01234567)
SWAP.REGISTERS(ymm_wr[1], ymm_wr_89ABCDEF)
SWAP.REGISTERS(ymm_wi[1], ymm_wi_89ABCDEF)
if bit_reversal:
# Bit reversal
# w[0] = x0 x8 x4 x12 x2 x10 x6 x14
# w[1] = x1 x9 x5 x13 x3 x11 x7 x15
ymm_bit_reversal_mask = YMMRegister()
VMOVDQA(ymm_bit_reversal_mask, Constant.uint32x8(0, 2, 4, 6, 1, 3, 5, 7))
for ymm in interleave(ymm_wr, ymm_wi):
VPERMPS(ymm, ymm_bit_reversal_mask, ymm)
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import sqrt2_over_2
from common import butterfly
import fft.complex_soa
def ifft8_across_rows(ymm_data, bias=None):
assert isinstance(ymm_data, list) and len(ymm_data) == 8
ymm_real = ymm_data[0::2]
ymm_imag = ymm_data[1::2]
if bias is None:
# Do 1/N scaling before IFFT
ymm_one_eighth = YMMRegister()
VMOVAPS(ymm_one_eighth, Constant.float32x8(0.125))
for ymm_row in ymm_data:
if ymm_row is ymm_real[2]:
VMULPS(ymm_row, ymm_row, Constant.float32x8(0.25))
elif ymm_row is ymm_imag[2]:
VMULPS(ymm_row, ymm_row, Constant.float32x8(-0.25))
else:
VMULPS(ymm_row, ymm_row, ymm_one_eighth)
else:
# Do 1/N scaling after FFT (merge with bias addition)
VMULPS(ymm_real[2], ymm_real[2], Constant.float32x8(2.0))
VMULPS(ymm_imag[2], ymm_imag[2], Constant.float32x8(-2.0))
butterfly(ymm_real[0], ymm_imag[0])
# H1.real, H1.imag = W1.real - W3.real, W1.imag + W3.imag
ymm_h1_real, ymm_h1_imag = YMMRegister(), YMMRegister()
VSUBPS(ymm_h1_real, ymm_real[1], ymm_real[3])
VADDPS(ymm_h1_imag, ymm_imag[1], ymm_imag[3])
# G1.real, G1.imag = W1.real + W3.real, W1.imag - W3.imag
ymm_g1_real, ymm_g1_imag = YMMRegister(), YMMRegister()
VADDPS(ymm_g1_real, ymm_real[1], ymm_real[3])
VSUBPS(ymm_g1_imag, ymm_imag[1], ymm_imag[3])
# H1+, H1- = H1.real + H1.imag, H1.real - H1.imag
ymm_h1_plus, ymm_h1_minus = YMMRegister(), YMMRegister()
VADDPS(ymm_h1_plus, ymm_h1_real, ymm_h1_imag)
VSUBPS(ymm_h1_minus, ymm_h1_real, ymm_h1_imag)
ymm_sqrt2_over_2 = YMMRegister()
VMOVAPS(ymm_sqrt2_over_2, Constant.float32x8(sqrt2_over_2))
# w1.real = G1.real - SQRT2_OVER_2 * H1.plus;
# w3.real = G1.real + SQRT2_OVER_2 * H1.plus;
VMOVAPS(ymm_real[1], ymm_g1_real)
VFNMADD231PS(ymm_real[1], ymm_h1_plus, ymm_sqrt2_over_2)
VFMADD231PS(ymm_g1_real, ymm_h1_plus, ymm_sqrt2_over_2)
SWAP.REGISTERS(ymm_real[3], ymm_g1_real)
# w1.imag = G1.imag + SQRT2_OVER_2 * H1.minus;
# w3.imag = -G1.imag + SQRT2_OVER_2 * H1.minus;
VMOVAPS(ymm_imag[1], ymm_g1_imag)
VFMADD231PS(ymm_imag[1], ymm_h1_minus, ymm_sqrt2_over_2)
VFMSUB231PS(ymm_g1_imag, ymm_h1_minus, ymm_sqrt2_over_2)
SWAP.REGISTERS(ymm_imag[3], ymm_g1_imag)
fft.complex_soa.fft4_across_rows(ymm_real, ymm_imag, transformation="inverse")
if bias is not None:
ymm_bias = bias
if not isinstance(bias, YMMRegister):
ymm_bias = YMMRegister()
VMOVAPS(ymm_bias, bias)
ymm_one_eighth = YMMRegister()
VMOVAPS(ymm_one_eighth, Constant.float32x8(0.125))
# 1/N scaling
for ymm_row in ymm_data:
VFMADD132PS(ymm_row, ymm_bias, ymm_one_eighth)
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
def fft8_within_rows_postprocess(ymm_wr, ymm_wi):
assert isinstance(ymm_wr, YMMRegister)
assert isinstance(ymm_wi, YMMRegister)
ymm_shuffle_44112233 = YMMRegister()
VMOVDQA(ymm_shuffle_44112233, Constant.uint32x8(4, 4, 1, 1, 2, 2, 3, 3))
ymm_shuffle_00776655 = YMMRegister()
VMOVDQA(ymm_shuffle_00776655, Constant.uint32x8(0, 0, 7, 7, 6, 6, 5, 5))
ymm_wr_44112233 = YMMRegister()
VPERMPS(ymm_wr_44112233, ymm_shuffle_44112233, ymm_wr)
ymm_wr_00776655 = YMMRegister()
VPERMPS(ymm_wr_00776655, ymm_shuffle_00776655, ymm_wr)
ymm_wi_44112233 = YMMRegister()
VPERMPS(ymm_wi_44112233, ymm_shuffle_44112233, ymm_wi)
ymm_wi_00776655 = YMMRegister()
VPERMPS(ymm_wi_00776655, ymm_shuffle_00776655, ymm_wi)
# wr44776655 = wr0, -, wr7 + wr1, wr7 - wr1, wr6 + wr2, wr6 - wr2, wr5 + wr3, wr5 - wr3
VFMADD231PS(ymm_wr_00776655, ymm_wr_44112233, Constant.float32x8(0.0, 0.0, +1.0, -1.0, +1.0, -1.0, +1.0, -1.0))
# wi00112233 = _, wi4, wi1 - wi7, wi1 + wi7, wi2 - wi6, wi2 + wi6, wi3 - wi5, wi3 + wi5
VFMADD231PS(ymm_wi_44112233, ymm_wi_00776655, Constant.float32x8(0.0, 0.0, -1.0, +1.0, -1.0, +1.0, -1.0, +1.0))
# xhr = wr0, -, wr1 + wr7, wi1 + wi7, wr2 + wr6, wi2 + wi6, wr3 + wr5, wi3 + wi5
ymm_xhr = YMMRegister()
VBLENDPS(ymm_xhr, ymm_wr_00776655, ymm_wi_44112233, 0b10101010)
# xhI = -, wi4, wi1 - wi7, wr7 - wr1, wi2 - wi6, wr6 - wr2, wi3 - wi5, wr5 - wr3
ymm_xhi = YMMRegister()
VBLENDPS(ymm_xhi, ymm_wr_00776655, ymm_wi_44112233, 0b01010110)
# xhr = wr0, wi0, wr1 + wr7, wi1 + wi7, wr2 + wr6, wi2 + wi6, wr3 + wr5, wi3 + wi5
VBLENDPS(ymm_xhr, ymm_xhr, ymm_wi_00776655, 0b00000010)
# xhI = wr4, wi4, wi1 - wi7, wr7 - wr1, wi2 - wi6, wr6 - wr2, wi3 - wi5, wr5 - wr3
VBLENDPS(ymm_xhi, ymm_xhi, ymm_wr_44112233, 0b00000001)
ymm_scale_factor = YMMRegister()
VMOVAPS(ymm_scale_factor, Constant.float32x8(1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5))
VMULPS(ymm_xhr, ymm_xhr, ymm_scale_factor)
VMULPS(ymm_xhi, ymm_xhi, ymm_scale_factor)
# wRe[0], wIm[0], (wRe[1] + wRe[7]) / 2, (wIm[1] + wIm[7]) / 2,
# (wRe[2] + wRe[6]) / 2, (wIm[2] + wIm[6]) / 2, (wRe[3] + wRe[5]) / 2, (wIm[3] + wIm[5]) / 2
# wRe[4], wIm[4], (wIm[1] - wIm[7]) / 2, (wRe[7] - wRe[1]) / 2,
# (wIm[2] - wIm[6]) / 2, (wRe[6] - wRe[2]) / 2, (wIm[3] - wIm[5]) / 2, (wRe[5] - wRe[3]) / 2
SWAP.REGISTERS(ymm_xhr, ymm_wr)
SWAP.REGISTERS(ymm_xhi, ymm_wi)
def fft16_within_rows_postprocess(ymm_wr, ymm_wi, bit_reversal=False):
assert isinstance(ymm_wr, (list, tuple)) and len(ymm_wr) == 2 and all(isinstance(reg, YMMRegister) for reg in ymm_wr)
assert isinstance(ymm_wi, (list, tuple)) and len(ymm_wi) == 2 and all(isinstance(reg, YMMRegister) for reg in ymm_wi)
if bit_reversal:
ymm_shuffle_00112233 = YMMRegister()
VMOVDQA(ymm_shuffle_00112233, Constant.uint32x8(0, 0, 4, 4, 1, 1, 5, 5))
ymm_shuffle_44556677 = YMMRegister()
VMOVDQA(ymm_shuffle_44556677, Constant.uint32x8(2, 2, 6, 6, 3, 3, 7, 7))
ymm_shuffle_44332211 = YMMRegister()
VMOVDQA(ymm_shuffle_44332211, Constant.uint32x8(2, 2, 5, 5, 1, 1, 4, 4))
ymm_shuffle_00776655 = YMMRegister()
VMOVDQA(ymm_shuffle_00776655, Constant.uint32x8(0, 0, 7, 7, 3, 3, 6, 6))
else:
ymm_shuffle_00112233 = YMMRegister()
VMOVDQA(ymm_shuffle_00112233, Constant.uint32x8(0, 0, 1, 1, 2, 2, 3, 3))
ymm_shuffle_44556677 = YMMRegister()
VMOVDQA(ymm_shuffle_44556677, Constant.uint32x8(4, 4, 5, 5, 6, 6, 7, 7))
ymm_shuffle_44332211 = YMMRegister()
VMOVDQA(ymm_shuffle_44332211, Constant.uint32x8(4, 4, 3, 3, 2, 2, 1, 1))
ymm_shuffle_00776655 = YMMRegister()
VMOVDQA(ymm_shuffle_00776655, Constant.uint32x8(0, 0, 7, 7, 6, 6, 5, 5))
ymm_wr_00112233, ymm_wr_44556677 = YMMRegister(), YMMRegister()
VPERMPS(ymm_wr_00112233, ymm_shuffle_00112233, ymm_wr[0])
VPERMPS(ymm_wr_44556677, ymm_shuffle_44556677, ymm_wr[0])
ymm_wr_CCBBAA99, ymm_wr_88FFEEDD = YMMRegister(), YMMRegister()
VPERMPS(ymm_wr_CCBBAA99, ymm_shuffle_44332211, ymm_wr[1])
VPERMPS(ymm_wr_88FFEEDD, ymm_shuffle_00776655, ymm_wr[1])
ymm_wi_00112233, ymm_wi_44556677 = YMMRegister(), YMMRegister()
VPERMPS(ymm_wi_00112233, ymm_shuffle_00112233, ymm_wi[0])
VPERMPS(ymm_wi_44556677, ymm_shuffle_44556677, ymm_wi[0])
ymm_wi_CCBBAA99, ymm_wi_88FFEEDD = YMMRegister(), YMMRegister()
VPERMPS(ymm_wi_CCBBAA99, ymm_shuffle_44332211, ymm_wi[1])
VPERMPS(ymm_wi_88FFEEDD, ymm_shuffle_00776655, ymm_wi[1])
# wr88FFEEDD = wr8, -, wr15 + wr1, wr15 - wr1, wr14 + wr2, wr14 - wr2, wr13 + wr3, wr13 - wr3
VFMADD231PS(ymm_wr_88FFEEDD, ymm_wr_00112233, Constant.float32x8(0.0, 0.0, +1.0, -1.0, +1.0, -1.0, +1.0, -1.0))
# wrCCBBAA99 = wr12 + wr4, wr12 - wr4, wr11 + wr5, wr11 - wr5, wr10 + wr6, wr10 - wr6, wr9 + wr7, wr9 - wr7
VFMADD231PS(ymm_wr_CCBBAA99, ymm_wr_44556677, Constant.float32x8(+1.0, -1.0, +1.0, -1.0, +1.0, -1.0, +1.0, -1.0))
# wi00112233 = _, wi0, wi1 - wi15, wi1 + wi15, wi2 - wi14, wi2 + wi14, wi3 - wi13, wi3 + wi13
VFMADD231PS(ymm_wi_00112233, ymm_wi_88FFEEDD, Constant.float32x8(0.0, 0.0, -1.0, +1.0, -1.0, +1.0, -1.0, +1.0))
# wi44556677 = wi4 - wi12, wi4 + wi12, wi5 - wi11, wi5 + wi11, wi6 - wi10, wi6 + wi10, wi7 - wi9, wi7 + wi9
VADDSUBPS(ymm_wi_44556677, ymm_wi_44556677, ymm_wi_CCBBAA99)
# xhr_lo = -, wi0, wr1 + wr15, wi1 + wi15, wr2 + wr14, wi2 + wi14, wr3 + wr13, wi3 + wi13
ymm_xhr_lo, ymm_xhr_hi = YMMRegister(), YMMRegister()
VBLENDPS(ymm_xhr_lo, ymm_wr_88FFEEDD, ymm_wi_00112233, 0b10101010)
VBLENDPS(ymm_xhr_hi, ymm_wr_CCBBAA99, ymm_wi_44556677, 0b10101010)
# xhi_lo = wr8, -, wi1 - wi15, wr15 - wr1, wi2 - wi14, wr14 - wr2, wi3 - wi13, wr13 - wr3
ymm_xhi_lo, ymm_xhi_hi = YMMRegister(), YMMRegister()
VBLENDPS(ymm_xhi_lo, ymm_wr_88FFEEDD, ymm_wi_00112233, 0b01010110)
VBLENDPS(ymm_xhi_hi, ymm_wr_CCBBAA99, ymm_wi_44556677, 0b01010101)
# xhr_lo = wr0, wi0, wr1 + wr15, wi1 + wi15, wr2 + wr14, wi2 + wi14, wr3 + wr13, wi3 + wi13
VBLENDPS(ymm_xhr_lo, ymm_xhr_lo, ymm_wr_00112233, 0b00000001)
# xhi_lo = wr8, wi8, wi1 - wi7, wr7 - wr1, wi2 - wi6, wr6 - wr2, wi3 - wi5, wr5 - wr3
VBLENDPS(ymm_xhi_lo, ymm_xhi_lo, ymm_wi_88FFEEDD, 0b00000010)
ymm_scale_factor_lo = YMMRegister()
VMOVAPS(ymm_scale_factor_lo, Constant.float32x8(1.0, 1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5))
VMULPS(ymm_xhr_lo, ymm_xhr_lo, ymm_scale_factor_lo)
VMULPS(ymm_xhi_lo, ymm_xhi_lo, ymm_scale_factor_lo)
ymm_scale_factor_hi = YMMRegister()
VMOVAPS(ymm_scale_factor_hi, Constant.float32x8(0.5))
VMULPS(ymm_xhr_hi, ymm_xhr_hi, ymm_scale_factor_hi)
VMULPS(ymm_xhi_hi, ymm_xhi_hi, ymm_scale_factor_hi)
# wRe[0], wIm[0], (wRe[1] + wRe[15]) / 2, (wIm[1] + wIm[15]) / 2,
# (wRe[2] + wRe[14]) / 2, (wIm[2] + wIm[14]) / 2, (wRe[3] + wRe[13]) / 2, (wIm[3] + wIm[13]) / 2
# (wRe[4] + wRe[12]) / 2, (wIm[4] + wIm[12]) / 2, (wRe[5] + wRe[11]) / 2, (wIm[5] + wIm[11]) / 2,
# (wRe[6] + wRe[10]) / 2, (wIm[6] + wIm[10]) / 2, (wRe[7] + wRe[9]) / 2, (wIm[7] + wIm[9]) / 2
# wRe[8], wIm[8], (wIm[1] - wIm[15]) / 2, (wRe[15] - wRe[1]) / 2,
# (wIm[2] - wIm[14]) / 2, (wRe[14] - wRe[2]) / 2, (wIm[3] - wIm[13]) / 2, (wRe[13] - wRe[3]) / 2
# (wIm[4] - wIm[12]) / 2, (wRe[12] - wRe[4]) / 2, (wIm[5] - wIm[11]) / 2, (wRe[11] - wRe[5]) / 2,
# (wIm[6] - wIm[10]) / 2, (wRe[6] - wRe[10]) / 2, (wIm[7] - wIm[9]) / 2, (wRe[13] - wRe[3]) / 2
SWAP.REGISTERS(ymm_xhr_lo, ymm_wr[0])
SWAP.REGISTERS(ymm_xhr_hi, ymm_wr[1])
SWAP.REGISTERS(ymm_xhi_lo, ymm_wi[0])
SWAP.REGISTERS(ymm_xhi_hi, ymm_wi[1])
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import cos_npi_over_8, sin_npi_over_8, cos_npi_over_4, sin_npi_over_4
from common import _MM_SHUFFLE
from common import butterfly, transpose2x2x128, transpose2x2x2x64, interleave
def fft8_within_rows(ymm_real_rows, ymm_imag_rows, transformation="forward"):
if isinstance(ymm_real_rows, YMMRegister) and isinstance(ymm_imag_rows, YMMRegister):
return fft8_within_rows([ymm_real_rows], [ymm_imag_rows], transformation)
assert isinstance(ymm_real_rows, list) and all(isinstance(ymm_real, YMMRegister) for ymm_real in ymm_real_rows)
assert isinstance(ymm_imag_rows, list) and all(isinstance(ymm_imag, YMMRegister) for ymm_imag in ymm_imag_rows)
assert transformation in {"forward", "inverse"}
ymm_fft8_butterfly_factor = YMMRegister()
VMOVAPS(ymm_fft8_butterfly_factor, Constant.float32x8(+1.0, +1.0, +1.0, +1.0, -1.0, -1.0, -1.0, -1.0))
# FFT8: Butterfly
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_real_flipped = YMMRegister()
VPERM2F128(ymm_real_flipped, ymm_real, ymm_real, 0x01)
VFMADD132PS(ymm_real, ymm_real_flipped, ymm_fft8_butterfly_factor)
ymm_imag_flipped = YMMRegister()
VPERM2F128(ymm_imag_flipped, ymm_imag, ymm_imag, 0x01)
VFMADD132PS(ymm_imag, ymm_imag_flipped, ymm_fft8_butterfly_factor)
# FFT8: Multiplication by twiddle factors
ymm_fft8_cos_twiddle_factor = YMMRegister()
VMOVAPS(ymm_fft8_cos_twiddle_factor, Constant.float32x8(1.0, 1.0, 1.0, 1.0, cos_npi_over_4[0], cos_npi_over_4[1], cos_npi_over_4[2], cos_npi_over_4[3]))
ymm_fft8_sin_twiddle_factor = YMMRegister()
VMOVAPS(ymm_fft8_sin_twiddle_factor, Constant.float32x8(0.0, 0.0, 0.0, 0.0, sin_npi_over_4[0], sin_npi_over_4[1], sin_npi_over_4[2], sin_npi_over_4[3]))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real, ymm_new_imag = YMMRegister(), YMMRegister()
VMULPS(ymm_new_real, ymm_real, ymm_fft8_cos_twiddle_factor)
VMULPS(ymm_new_imag, ymm_imag, ymm_fft8_cos_twiddle_factor)
if transformation == "forward":
VFMADD231PS(ymm_new_real, ymm_imag, ymm_fft8_sin_twiddle_factor)
VFNMADD231PS(ymm_new_imag, ymm_real, ymm_fft8_sin_twiddle_factor)
else:
VFNMADD231PS(ymm_new_real, ymm_imag, ymm_fft8_sin_twiddle_factor)
VFMADD231PS(ymm_new_imag, ymm_real, ymm_fft8_sin_twiddle_factor)
SWAP.REGISTERS(ymm_real, ymm_new_real)
SWAP.REGISTERS(ymm_imag, ymm_new_imag)
# 2x FFT4: Butterfly
ymm_fft4_butterfly_factor = YMMRegister()
VMOVAPS(ymm_fft4_butterfly_factor, Constant.float32x8(+1.0, +1.0, -1.0, -1.0, +1.0, +1.0, -1.0, -1.0))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_real_flipped = YMMRegister()
VPERMILPS(ymm_real_flipped, ymm_real, _MM_SHUFFLE(1, 0, 3, 2))
VFMADD132PS(ymm_real, ymm_real_flipped, ymm_fft4_butterfly_factor)
ymm_imag_flipped = YMMRegister()
VPERMILPS(ymm_imag_flipped, ymm_imag, _MM_SHUFFLE(1, 0, 3, 2))
VFMADD132PS(ymm_imag, ymm_imag_flipped, ymm_fft4_butterfly_factor)
# 2x FFT4: Multiplication by twiddle factors
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real, ymm_new_imag = YMMRegister(), YMMRegister()
VBLENDPS(ymm_new_real, ymm_real, ymm_imag, 0b10001000)
VBLENDPS(ymm_new_imag, ymm_imag, ymm_real, 0b10001000)
if transformation == "forward":
VXORPS(ymm_new_imag, ymm_new_imag, Constant.float32x8(+0.0, +0.0, +0.0, -0.0, +0.0, +0.0, +0.0, -0.0))
else:
VXORPS(ymm_new_real, ymm_new_real, Constant.float32x8(+0.0, +0.0, +0.0, -0.0, +0.0, +0.0, +0.0, -0.0))
SWAP.REGISTERS(ymm_real, ymm_new_real)
SWAP.REGISTERS(ymm_imag, ymm_new_imag)
# 4x FFT2: Butterfly
ymm_fft2_butterfly_factor = YMMRegister()
VMOVAPS(ymm_fft2_butterfly_factor, Constant.float32x8(+1.0, -1.0, +1.0, -1.0, +1.0, -1.0, +1.0, -1.0))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_real_flipped = YMMRegister()
VPERMILPS(ymm_real_flipped, ymm_real, _MM_SHUFFLE(2, 3, 0, 1))
VFMADD132PS(ymm_real, ymm_real_flipped, ymm_fft2_butterfly_factor)
ymm_imag_flipped = YMMRegister()
VPERMILPS(ymm_imag_flipped, ymm_imag, _MM_SHUFFLE(2, 3, 0, 1))
VFMADD132PS(ymm_imag, ymm_imag_flipped, ymm_fft2_butterfly_factor)
# Bit reversal
ymm_bit_reversal_mask = YMMRegister()
VMOVAPS(ymm_bit_reversal_mask, Constant.uint32x8(0, 4, 2, 6, 1, 5, 3, 7))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
VPERMPS(ymm_real, ymm_bit_reversal_mask, ymm_real)
VPERMPS(ymm_imag, ymm_bit_reversal_mask, ymm_imag)
# Scale
if transformation == "inverse":
ymm_scale_factor = YMMRegister()
VMOVAPS(ymm_scale_factor, Constant.float32x8(0.125))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
VMULPS(ymm_real, ymm_real, ymm_scale_factor)
VMULPS(ymm_imag, ymm_imag, ymm_scale_factor)
def fft16_within_rows(ymm_real_rows, ymm_imag_rows, bit_reversal=True):
if isinstance(ymm_real_rows, tuple) and isinstance(ymm_imag_rows, tuple):
return fft16_within_rows([ymm_real_rows], [ymm_imag_rows])
assert isinstance(ymm_real_rows, list) and all(isinstance(ymm_real, tuple) and all(isinstance(ymm, YMMRegister) for ymm in ymm_real) for ymm_real in ymm_real_rows)
assert isinstance(ymm_imag_rows, list) and all(isinstance(ymm_imag, tuple) and all(isinstance(ymm, YMMRegister) for ymm in ymm_imag) for ymm_imag in ymm_imag_rows)
# FFT16: Butterfly
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1])
butterfly(ymm_imag[0], ymm_imag[1])
# FFT16: Multiplication by twiddle factors
ymm_fft16_cos_twiddle_factor, ymm_fft16_sin_twiddle_factor = YMMRegister(), YMMRegister()
VMOVAPS(ymm_fft16_cos_twiddle_factor, Constant.float32x8(*cos_npi_over_8))
VMOVAPS(ymm_fft16_sin_twiddle_factor, Constant.float32x8(*sin_npi_over_8))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real1, ymm_new_imag1 = YMMRegister(), YMMRegister()
VMULPS(ymm_new_real1, ymm_real[1], ymm_fft16_cos_twiddle_factor)
VMULPS(ymm_new_imag1, ymm_imag[1], ymm_fft16_cos_twiddle_factor)
VFMADD231PS(ymm_new_real1, ymm_imag[1], ymm_fft16_sin_twiddle_factor)
VFNMADD231PS(ymm_new_imag1, ymm_real[1], ymm_fft16_sin_twiddle_factor)
SWAP.REGISTERS(ymm_real[1], ymm_new_real1)
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag1)
# 2x FFT8: Butterfly
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
transpose2x2x128(ymm_real[0], ymm_real[1])
transpose2x2x128(ymm_imag[0], ymm_imag[1])
# w[0] = x0 x1 x2 x3 x8 x9 x10 x11
# w[1] = x4 x5 x6 x7 x12 x13 x14 x15
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1])
butterfly(ymm_imag[0], ymm_imag[1])
# 2x FFT8: Multiplication by twiddle factors
ymm_fft8_cos_twiddle_factor, ymm_fft8_sin_twiddle_factor = YMMRegister(), YMMRegister()
VMOVAPS(ymm_fft8_cos_twiddle_factor, Constant.float32x8(*(cos_npi_over_4 * 2)))
VMOVAPS(ymm_fft8_sin_twiddle_factor, Constant.float32x8(*(sin_npi_over_4 * 2)))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real1, ymm_new_imag1 = YMMRegister(), YMMRegister()
VMULPS(ymm_new_real1, ymm_real[1], ymm_fft8_cos_twiddle_factor)
VMULPS(ymm_new_imag1, ymm_imag[1], ymm_fft8_cos_twiddle_factor)
VFMADD231PS(ymm_new_real1, ymm_imag[1], ymm_fft8_sin_twiddle_factor)
VFNMADD231PS(ymm_new_imag1, ymm_real[1], ymm_fft8_sin_twiddle_factor)
SWAP.REGISTERS(ymm_real[1], ymm_new_real1)
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag1)
# 4x FFT4: Butterfly
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
transpose2x2x2x64(ymm_real[0], ymm_real[1])
transpose2x2x2x64(ymm_imag[0], ymm_imag[1])
# w[0] = x0 x1 x4 x5 x8 x9 x12 x13
# w[1] = x2 x3 x6 x7 x10 x11 x14 x15
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1])
butterfly(ymm_imag[0], ymm_imag[1])
# 4x FFT4: Multiplication by twiddle factors and 8x FFT2: Butterfly
ymm_fft4_twiddle_factor = YMMRegister()
VMOVAPS(ymm_fft4_twiddle_factor, Constant.float32x8(+1.0, +1.0, -1.0, -1.0, +1.0, +1.0, -1.0, -1.0))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real = YMMRegister(), YMMRegister()
VSHUFPS(ymm_new_real[0], ymm_real[0], ymm_real[1], _MM_SHUFFLE(2, 0, 2, 0))
VSHUFPS(ymm_new_real[1], ymm_real[0], ymm_imag[1], _MM_SHUFFLE(3, 1, 3, 1))
butterfly(ymm_new_real[0], ymm_new_real[1])
ymm_new_imag = YMMRegister(), YMMRegister()
VSHUFPS(ymm_new_imag[0], ymm_imag[0], ymm_imag[1], _MM_SHUFFLE(2, 0, 2, 0))
VSHUFPS(ymm_new_imag[1], ymm_imag[0], ymm_real[1], _MM_SHUFFLE(3, 1, 3, 1))
butterfly(ymm_new_imag[0], ymm_new_imag[1], scale_b=ymm_fft4_twiddle_factor)
SWAP.REGISTERS(ymm_real[0], ymm_new_real[0])
SWAP.REGISTERS(ymm_real[1], ymm_new_real[1])
SWAP.REGISTERS(ymm_imag[0], ymm_new_imag[0])
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag[1])
# w[0] = x0 x4 x2 x6 x8 x12 x10 x14
# w[1] = x1 x5 x3 x7 x9 x11 x13 x15
if bit_reversal:
# Bit reversal
ymm_bit_reversal_mask = YMMRegister()
VMOVDQA(ymm_bit_reversal_mask, Constant.uint32x8(0, 4, 1, 5, 2, 6, 3, 7))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
for i in range(2):
VPERMPS(ymm_real[i], ymm_bit_reversal_mask, ymm_real[i])
VPERMPS(ymm_imag[i], ymm_bit_reversal_mask, ymm_imag[i])
def ifft16_within_rows(ymm_real_rows, ymm_imag_rows, bit_reversal=True):
if isinstance(ymm_real_rows, tuple) and isinstance(ymm_imag_rows, tuple):
return ifft16_within_rows([ymm_real_rows], [ymm_imag_rows])
assert isinstance(ymm_real_rows, list) and all(isinstance(ymm_real, tuple) and all(isinstance(ymm, YMMRegister) for ymm in ymm_real) for ymm_real in ymm_real_rows)
assert isinstance(ymm_imag_rows, list) and all(isinstance(ymm_imag, tuple) and all(isinstance(ymm, YMMRegister) for ymm in ymm_imag) for ymm_imag in ymm_imag_rows)
if bit_reversal:
# Bit reversal
# w[0] = x0 x8 x4 x12 x2 x10 x6 x14
# w[1] = x1 x9 x5 x13 x3 x11 x7 x15
ymm_bit_reversal_mask = YMMRegister()
VMOVDQA(ymm_bit_reversal_mask, Constant.uint32x8(0, 2, 4, 6, 1, 3, 5, 7))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
for i in range(2):
VPERMPS(ymm_real[i], ymm_bit_reversal_mask, ymm_real[i])
VPERMPS(ymm_imag[i], ymm_bit_reversal_mask, ymm_imag[i])
# 8x FFT2: Butterfly
# w[0] = x0 x4 x2 x6 x8 x12 x10 x14
# w[1] = x1 x5 x3 x7 x9 x13 x11 x15
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1])
butterfly(ymm_imag[0], ymm_imag[1])
ymm_new_real = YMMRegister(), YMMRegister()
VUNPCKLPS(ymm_new_real[0], ymm_real[0], ymm_real[1])
VUNPCKHPS(ymm_new_real[1], ymm_real[0], ymm_imag[1])
ymm_new_imag = YMMRegister(), YMMRegister()
VUNPCKLPS(ymm_new_imag[0], ymm_imag[0], ymm_imag[1])
VUNPCKHPS(ymm_new_imag[1], ymm_imag[0], ymm_real[1])
SWAP.REGISTERS(ymm_imag[0], ymm_new_imag[0])
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag[1])
SWAP.REGISTERS(ymm_real[0], ymm_new_real[0])
SWAP.REGISTERS(ymm_real[1], ymm_new_real[1])
# w[0] = x0 x1 x4 x5 x8 x9 x12 x13
# w[1] = x2 x3 x6 x7 x10 x11 x14 x15
# 4x FFT4: Butterfly and multiplication by twiddle factors
ymm_fft4_twiddle_factor = YMMRegister()
VMOVAPS(ymm_fft4_twiddle_factor, Constant.float32x8(+1.0, -1.0, +1.0, -1.0, +1.0, -1.0, +1.0, -1.0))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1], scale_b=ymm_fft4_twiddle_factor)
butterfly(ymm_imag[0], ymm_imag[1])
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
transpose2x2x2x64(ymm_real[0], ymm_real[1])
transpose2x2x2x64(ymm_imag[0], ymm_imag[1])
# w[0] = x0 x1 x2 x3 x8 x9 x10 x11
# w[1] = x4 x5 x6 x7 x12 x13 x14 x15
# 2x FFT8: Multiplication by twiddle factors
ymm_fft8_cos_twiddle_factor, ymm_fft8_sin_twiddle_factor = YMMRegister(), YMMRegister()
VMOVAPS(ymm_fft8_cos_twiddle_factor, Constant.float32x8(*(cos_npi_over_4 * 2)))
VMOVAPS(ymm_fft8_sin_twiddle_factor, Constant.float32x8(*(sin_npi_over_4 * 2)))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real1, ymm_new_imag1 = YMMRegister(), YMMRegister()
VMULPS(ymm_new_real1, ymm_real[1], ymm_fft8_cos_twiddle_factor)
VMULPS(ymm_new_imag1, ymm_imag[1], ymm_fft8_cos_twiddle_factor)
VFNMADD231PS(ymm_new_real1, ymm_imag[1], ymm_fft8_sin_twiddle_factor)
VFMADD231PS(ymm_new_imag1, ymm_real[1], ymm_fft8_sin_twiddle_factor)
SWAP.REGISTERS(ymm_real[1], ymm_new_real1)
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag1)
# 2x FFT8: Butterfly
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1])
butterfly(ymm_imag[0], ymm_imag[1])
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
transpose2x2x128(ymm_real[0], ymm_real[1])
transpose2x2x128(ymm_imag[0], ymm_imag[1])
# w[0] = x0 x1 x2 x3 x4 x5 x6 x7
# w[1] = x8 x9 x10 x11 x12 x13 x14 x15
# FFT16: Multiplication by twiddle factors and scale
scale_factor = 0.0625
ymm_fft16_cos_scale_twiddle_factor, ymm_fft16_sin_scale_twiddle_factor = YMMRegister(), YMMRegister()
VMOVAPS(ymm_fft16_cos_scale_twiddle_factor, Constant.float32x8(*[cos * scale_factor for cos in cos_npi_over_8]))
VMOVAPS(ymm_fft16_sin_scale_twiddle_factor, Constant.float32x8(*[sin * scale_factor for sin in sin_npi_over_8]))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
ymm_new_real1, ymm_new_imag1 = YMMRegister(), YMMRegister()
VMULPS(ymm_new_real1, ymm_real[1], ymm_fft16_cos_scale_twiddle_factor)
VMULPS(ymm_new_imag1, ymm_imag[1], ymm_fft16_cos_scale_twiddle_factor)
VFNMADD231PS(ymm_new_real1, ymm_imag[1], ymm_fft16_sin_scale_twiddle_factor)
VFMADD231PS(ymm_new_imag1, ymm_real[1], ymm_fft16_sin_scale_twiddle_factor)
SWAP.REGISTERS(ymm_real[1], ymm_new_real1)
SWAP.REGISTERS(ymm_imag[1], ymm_new_imag1)
# FFT16: Butterfly and scale
ymm_scale_factor = YMMRegister()
VMOVAPS(ymm_scale_factor, Constant.float32x8(scale_factor))
for ymm_real, ymm_imag in zip(ymm_real_rows, ymm_imag_rows):
butterfly(ymm_real[0], ymm_real[1], scale_a=ymm_scale_factor)
butterfly(ymm_imag[0], ymm_imag[1], scale_a=ymm_scale_factor)
def fft4_across_rows(ymm_real, ymm_imag, transformation="forward"):
assert isinstance(ymm_real, list) and len(ymm_real) == 4
assert isinstance(ymm_imag, list) and len(ymm_imag) == 4
assert transformation in {"forward", "inverse"}
ymm_data = sum(zip(ymm_real, ymm_imag), ())
# FFT-4 Butterfly
for i in range(4):
butterfly(ymm_data[i], ymm_data[i + 4])
# Multiply by FFT-4 twiddle factors
SWAP.REGISTERS(ymm_real[3], ymm_imag[3])
# 2x FFT-2 Butterfly
butterfly(ymm_data[0], ymm_data[2])
butterfly(ymm_data[1], ymm_data[3])
if transformation == "forward":
butterfly(ymm_data[4], ymm_data[6])
butterfly(ymm_data[5], ymm_data[7], negate_b=True)
else:
butterfly(ymm_data[4], ymm_data[6], negate_b=True)
butterfly(ymm_data[5], ymm_data[7])
# Bit reversal: not needed
SWAP.REGISTERS(ymm_real[1], ymm_real[2])
SWAP.REGISTERS(ymm_imag[1], ymm_imag[2])
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
log2e = float.fromhex("+0x1.715476p+3")
magic_bias = float.fromhex("+0x1.800000p+23")
zero_cutoff = float.fromhex("-0x1.9FE368p+6")
inf_cutoff = float.fromhex("+0x1.62E42Ep+6")
minus_ln2_hi = float.fromhex("-0x1.62E430p-4")
minus_ln2_lo = float.fromhex("+0x1.05C610p-32")
plus_inf = float("inf")
c2 = float.fromhex("0x1.00088Ap-1")
c3 = float.fromhex("0x1.555A86p-3")
t0 = float.fromhex("0x1.000000p+0")
t1 = float.fromhex("0x1.172B84p+0")
t2 = float.fromhex("0x1.306FE0p+0")
t3 = float.fromhex("0x1.4BFDAEp+0")
t4 = float.fromhex("0x1.6A09E6p+0")
t5 = float.fromhex("0x1.8ACE54p+0")
t6 = float.fromhex("0x1.AE89FAp+0")
t7 = float.fromhex("0x1.D5818Ep+0")
min_exponent = (-126 << 23) & 0xFFFFFFFF
max_exponent = 127 << 23
default_exponent = 0x3F800000
mantissa_mask = 0x007FFFF8
def simd_exp(ymm_xs):
assert isinstance(ymm_xs, list) and all(isinstance(ymm_x, YMMRegister) for ymm_x in ymm_xs)
ymm_magic_bias = YMMRegister()
VMOVAPS(ymm_magic_bias, Constant.float32x8(magic_bias))
ymm_ys = [YMMRegister() for _ in ymm_xs]
var_e2s = [LocalVariable(YMMRegister.size) for _ in ymm_xs]
if len(ymm_xs) > 1:
const_log2e, const_mantissa_mask, const_lut = None, None, None
const_minus_ln2_hi, const_minus_ln2_lo = None, None
else:
const_lut = Constant.float32x8(t0, t1, t2, t3, t4, t5, t6, t7)
const_log2e = Constant.float32x8(log2e)
const_mantissa_mask = Constant.uint32x8(mantissa_mask)
const_minus_ln2_hi = Constant.float32x8(minus_ln2_hi)
const_minus_ln2_lo = Constant.float32x8(minus_ln2_lo)
for ymm_y, ymm_x, var_e2 in zip(ymm_ys, ymm_xs, var_e2s):
ymm_t = YMMRegister()
VMOVAPS(ymm_t, ymm_x)
if const_log2e is None:
const_log2e = YMMRegister()
VMOVAPS(const_log2e, Constant.float32x8(log2e))
VFMADD132PS(ymm_t, ymm_magic_bias, const_log2e)
if const_mantissa_mask is None:
const_mantissa_mask = YMMRegister()
VMOVDQA(const_mantissa_mask, Constant.uint32x8(mantissa_mask))
ymm_e2 = YMMRegister()
VPAND(ymm_e2, ymm_t, const_mantissa_mask)
VPSLLD(ymm_e2, ymm_e2, 20)
VMOVDQA(var_e2, ymm_e2)
if const_lut is None:
const_lut = YMMRegister()
VMOVAPS(const_lut, Constant.float32x8(t0, t1, t2, t3, t4, t5, t6, t7))
VPERMPS(ymm_y, ymm_t, const_lut)
VSUBPS(ymm_t, ymm_t, ymm_magic_bias)
# x = fma(t, minus_ln2_lo, fma(t, minus_ln2_hi, x))
# x := t * minus_ln2_hi + x
# x := t * minus_ln2_lo + rx
if const_minus_ln2_hi is None:
const_minus_ln2_hi = YMMRegister()
VMOVAPS(const_minus_ln2_hi, Constant.float32x8(minus_ln2_hi))
VFMADD231PS(ymm_x, ymm_t, const_minus_ln2_hi)
if const_minus_ln2_lo is None:
const_minus_ln2_lo = YMMRegister()
VMOVAPS(const_minus_ln2_lo, Constant.float32x8(minus_ln2_lo))
VFMADD231PS(ymm_x, ymm_t, const_minus_ln2_lo)
if len(ymm_xs) > 1:
const_c3 = YMMRegister()
VMOVAPS(const_c3, Constant.float32x8(c3))
else:
const_c3 = Constant.float32x8(c3)
for ymm_x, ymm_y in zip(ymm_xs, ymm_ys):
# rf = fma(rx, rx * fma(rx, c3, c2), rx)
# rf := rx * c3 + c2
# rf := rx * rf
# rf := rx * rf + rx
ymm_rf = YMMRegister()
VMOVAPS(ymm_rf, Constant.float32x8(c2))
VFMADD231PS(ymm_rf, ymm_x, const_c3)
VMULPS(ymm_rf, ymm_rf, ymm_x)
VFMADD213PS(ymm_rf, ymm_x, ymm_x)
# y = fma(y, rf, y)
VFMADD231PS(ymm_y, ymm_y, ymm_rf)
if len(ymm_xs) > 1:
const_min_exponent, const_max_exponent = YMMRegister(), YMMRegister()
VMOVDQA(const_min_exponent, Constant.uint32x8(min_exponent))
VMOVDQA(const_max_exponent, Constant.uint32x8(max_exponent))
else:
const_min_exponent = Constant.uint32x8(min_exponent)
const_max_exponent = Constant.uint32x8(max_exponent)
ymm_default_exponent = YMMRegister()
VMOVDQA(ymm_default_exponent, Constant.uint32x8(default_exponent))
for ymm_x, ymm_y, var_e2 in zip(ymm_xs, ymm_ys, var_e2s):
ymm_e1, ymm_e2 = YMMRegister(), YMMRegister()
VMOVDQA(ymm_e2, var_e2)
VPMAXSD(ymm_e1, ymm_e2, const_min_exponent)
VPMINSD(ymm_e1, ymm_e1, const_max_exponent)
VPSUBD(ymm_e2, ymm_e2, ymm_e1)
VPADDD(ymm_e1, ymm_e1, ymm_default_exponent)
VPADDD(ymm_e2, ymm_e2, ymm_default_exponent)
VMULPS(ymm_y, ymm_y, ymm_e1)
VMULPS(ymm_y, ymm_y, ymm_e2)
return ymm_ys
|
from __future__ import absolute_import
from __future__ import division
from peachpy import *
from peachpy.x86_64 import *
from common import _MM_SHUFFLE
from common import transpose2x2x128, transpose2x2x2x64, butterfly
def input_transform(ymm_d):
assert isinstance(ymm_d, list) and len(ymm_d) == 8 and all(isinstance(ymm, YMMRegister) for ymm in ymm_d)
ymm_wd = [YMMRegister() for _ in range(8)]
# wd0 = (d0 - d6) + 5.25 * (d4 - d2)
# wd1 = (d6 + d2 - 4.25 * d4) + (d1 + d5 - 4.25 * d3)
# wd2 = (d6 + d2 - 4.25 * d4) - (d1 + d5 - 4.25 * d3)
# wd3 = (d6 + 0.25 * d2 - 1.25 * d4) + 2.0 * (d5 + 0.25 * d1 - 1.25 * d3)
# wd4 = (d6 + 0.25 * d2 - 1.25 * d4) - 2.0 * (d5 + 0.25 * d1 - 1.25 * d3)
# wd5 = (d6 - 5.0 * d4 + 4.0 * d2) + 2.0 * (d1 + 0.25 * d5 - 1.25 * d3)
# wd6 = (d6 - 5.0 * d4 + 4.0 * d2) - 2.0 * (d1 + 0.25 * d5 - 1.25 * d3)
# wd7 = (d7 - d1) + 5.25 * (d3 - d5)
ymm_0_25 = YMMRegister()
VMOVAPS(ymm_0_25, Constant.float32x8(0.25))
# Compute wd0 := d0 - d6
VSUBPS(ymm_wd[0], ymm_d[0], ymm_d[6])
ymm_d4_sub_d2 = YMMRegister()
VSUBPS(ymm_d4_sub_d2, ymm_d[4], ymm_d[2])
# Compute wd7 := d7 - d1
VSUBPS(ymm_wd[7], ymm_d[7], ymm_d[1])
ymm_d3_sub_d5 = YMMRegister()
VSUBPS(ymm_d3_sub_d5, ymm_d[3], ymm_d[5])
# Compute wd1 := d2 + d6
VADDPS(ymm_wd[1], ymm_d[2], ymm_d[6])
# Compute wd2 := d1 + d5
VADDPS(ymm_wd[2], ymm_d[1], ymm_d[5])
# Compute wd4 := d5 + 0.25 * d1
VMOVAPS(ymm_wd[4], ymm_d[5])
VFMADD231PS(ymm_wd[4], ymm_d[1], ymm_0_25)
# Compute wd5 := d6 - 5.0 * d4
VMOVAPS(ymm_wd[5], Constant.float32x8(5.0))
VFNMADD132PS(ymm_wd[5], ymm_d[6], ymm_d[4])
# Compute wd3 := d6 + 0.25 * d2
VFMADD231PS(ymm_d[6], ymm_d[2], ymm_0_25)
SWAP.REGISTERS(ymm_wd[3], ymm_d[6])
# Compute wd6 := d1 + 0.25 * d5
VFMADD231PS(ymm_d[1], ymm_d[5], ymm_0_25)
SWAP.REGISTERS(ymm_wd[6], ymm_d[1])
ymm_5_25 = YMMRegister()
VMOVAPS(ymm_5_25, Constant.float32x8(5.25))
# Compute wd0 := (d0 - d6) + 5.25 * (d4 - d2)
VFMADD231PS(ymm_wd[0], ymm_d4_sub_d2, ymm_5_25)
# Compute wd7 := (d7 - d1) + 5.25 * (d3 - d5)
VFMADD231PS(ymm_wd[7], ymm_d3_sub_d5, ymm_5_25)
ymm_4_25 = YMMRegister()
VMOVAPS(ymm_4_25, Constant.float32x8(4.25))
# Compute
# wd1 := (d6 + d2) - 4.25 * d4
# wd2 := (d1 + d5) - 4.25 * d3
VFNMADD231PS(ymm_wd[1], ymm_d[4], ymm_4_25)
VFNMADD231PS(ymm_wd[2], ymm_d[3], ymm_4_25)
ymm_1_25 = YMMRegister()
VMOVAPS(ymm_1_25, Constant.float32x8(1.25))
# Compute
# wd3 := (d6 + 0.25 * d2) - 1.25 * d4
# wd4 := (d5 + 0.25 * d1) - 1.25 * d3
# wd6 := (d1 + 0.25 * d5) - 1.25 * d3
# wd5 := (d6 - 5.0 * d4) + 4.0 * d2
VFNMADD231PS(ymm_wd[3], ymm_d[4], ymm_1_25)
VFNMADD231PS(ymm_wd[4], ymm_d[3], ymm_1_25)
VFMADD231PS(ymm_wd[5], ymm_d[2], Constant.float32x8(4.0))
VFNMADD231PS(ymm_wd[6], ymm_d[3], ymm_1_25)
ymm_2 = YMMRegister()
VMOVAPS(ymm_2, Constant.float32x8(2.0))
butterfly(ymm_wd[1], ymm_wd[2])
butterfly(ymm_wd[3], ymm_wd[4], scale_b=ymm_2)
butterfly(ymm_wd[5], ymm_wd[6], scale_b=ymm_2)
return ymm_wd
def kernel_transform(g, rescale_coefficients=True):
assert isinstance(g, list) and len(g) == 3 and \
(all(isinstance(reg, XMMRegister) for reg in g) or all(isinstance(reg, YMMRegister) for reg in g))
minus_2_over_9 = float.fromhex("-0x1.C71C72p-3")
rcp_90 = float.fromhex( "0x1.6C16C2p-7")
rcp_180 = float.fromhex( "0x1.6C16C2p-8")
if isinstance(g[0], XMMRegister):
wg = [XMMRegister() for _ in range(8)]
const_2 = Constant.float32x4(2.0)
const_4 = Constant.float32x4(4.0)
const_minus_2_over_9 = Constant.float32x4(minus_2_over_9)
const_rcp_90 = Constant.float32x4(rcp_90)
const_rcp_180 = Constant.float32x4(rcp_180)
else:
wg = [YMMRegister() for _ in range(8)]
const_2 = Constant.float32x8(2.0)
const_4 = Constant.float32x8(4.0)
const_minus_2_over_9 = Constant.float32x8(minus_2_over_9)
const_rcp_90 = Constant.float32x8(rcp_90)
const_rcp_180 = Constant.float32x8(rcp_180)
# wg[0] = g0
# wg[1] = ((g0 + g2) + g1) * (-2.0 / 9)
# wg[2] = ((g0 + g2) - g1) * (-2.0 / 9)
# wg[3] = ((g0 + 4 * g2) + 2 * g1) * (1.0 / 90)
# wg[4] = ((g0 + 4 * g2) - 2 * g1) * (1.0 / 90)
# wg[5] = ((g2 + 4 * g0) + 2 * g1) * (1.0 / 180)
# wg[6] = ((g2 + 4 * g0) - 2 * g1) * (1.0 / 180)
# wg[7] = g2
# Compute wg[1] := g0 + g2
VADDPS(wg[1], g[0], g[2])
# Compute
# wg[3] := g0 + 4 * g2
# wg[5] := g2 + 4 * g0
VMOVAPS(wg[3], const_4)
VMOVAPS(wg[5], wg[3])
VFMADD132PS(wg[3], g[0], g[2])
VFMADD132PS(wg[5], g[2], g[0])
# Compute wg[1] and wg[2]
VSUBPS(wg[2], wg[1], g[1])
VADDPS(wg[1], wg[1], g[1])
var_2 = YMMRegister() if isinstance(g[0], YMMRegister) else XMMRegister()
VMOVAPS(var_2, const_2)
# Compute wg[3] and wg[4]
VMOVAPS(wg[4], wg[3])
VFNMADD231PS(wg[4], g[1], var_2)
VFMADD231PS(wg[3], g[1], var_2)
# Compute wg[5] and wg[6]
VMOVAPS(wg[6], wg[5])
VFNMADD231PS(wg[6], g[1], var_2)
VFMADD231PS(wg[5], g[1], var_2)
SWAP.REGISTERS(wg[0], g[0])
SWAP.REGISTERS(wg[7], g[2])
if rescale_coefficients:
VMULPS(wg[1], wg[1], const_minus_2_over_9)
VMULPS(wg[2], wg[2], const_minus_2_over_9)
VMULPS(wg[3], wg[3], const_rcp_90)
VMULPS(wg[4], wg[4], const_rcp_90)
VMULPS(wg[5], wg[5], const_rcp_180)
VMULPS(wg[6], wg[6], const_rcp_180)
return wg
def output_transform(ymm_m):
assert isinstance(ymm_m, list) and len(ymm_m) == 8 and all(isinstance(ymm, YMMRegister) for ymm in ymm_m)
ymm_s = [YMMRegister() for _ in range(6)]
# s0 = m0 + (m1 + m2) + (m3 + m4) + 32 * (m5 + m6)
# s1 = (m1 - m2) + 2 * (m3 - m4) + 16 * (m5 - m6)
# s2 = (m1 + m2) + 4 * (m3 + m4) + 8 * (m5 + m6)
# s3 = (m1 - m2) + 8 * (m3 - m4) + 4 * (m5 - m6)
# s4 = (m1 + m2) + 16 * (m3 + m4) + 2 * (m5 + m6)
# s5 = (m1 - m2) + 32 * (m3 - m4) + (m5 - m6) + m7
ymm_m1_add_m2, ymm_m1_sub_m2 = YMMRegister(), YMMRegister()
VADDPS(ymm_m1_add_m2, ymm_m[1], ymm_m[2])
VSUBPS(ymm_m1_sub_m2, ymm_m[1], ymm_m[2])
ymm_m3_add_m4, ymm_m3_sub_m4 = YMMRegister(), YMMRegister()
VADDPS(ymm_m3_add_m4, ymm_m[3], ymm_m[4])
VSUBPS(ymm_m3_sub_m4, ymm_m[3], ymm_m[4])
ymm_m5_add_m6, ymm_m5_sub_m6 = YMMRegister(), YMMRegister()
VADDPS(ymm_m5_add_m6, ymm_m[5], ymm_m[6])
VSUBPS(ymm_m5_sub_m6, ymm_m[5], ymm_m[6])
VADDPS(ymm_s[0], ymm_m[0], ymm_m1_add_m2)
VADDPS(ymm_s[5], ymm_m[7], ymm_m1_sub_m2)
ymm_16 = YMMRegister()
VMOVAPS(ymm_16, Constant.float32x8(16.0))
VMOVAPS(ymm_s[1], ymm_m1_sub_m2)
VFMADD231PS(ymm_s[1], ymm_m5_sub_m6, ymm_16)
VFMADD132PS(ymm_16, ymm_m1_add_m2, ymm_m3_add_m4)
SWAP.REGISTERS(ymm_s[4], ymm_16)
ymm_8 = YMMRegister()
VMOVAPS(ymm_8, Constant.float32x8(8.0))
VMOVAPS(ymm_s[2], ymm_m1_add_m2)
VFMADD231PS(ymm_s[2], ymm_m5_add_m6, ymm_8)
VFMADD132PS(ymm_8, ymm_m1_sub_m2, ymm_m3_sub_m4)
SWAP.REGISTERS(ymm_s[3], ymm_8)
ymm_32 = YMMRegister()
VMOVAPS(ymm_32, Constant.float32x8(32.0))
VFMADD231PS(ymm_s[0], ymm_m5_add_m6, ymm_32)
VFMADD231PS(ymm_s[5], ymm_m3_sub_m4, ymm_32)
ymm_2, ymm_4 = YMMRegister(), YMMRegister()
VMOVAPS(ymm_2, Constant.float32x8(2.0))
VADDPS(ymm_s[0], ymm_s[0], ymm_m3_add_m4)
VMOVAPS(ymm_4, Constant.float32x8(4.0))
VFMADD231PS(ymm_s[1], ymm_m3_sub_m4, ymm_2)
VFMADD231PS(ymm_s[4], ymm_m5_add_m6, ymm_2)
VFMADD231PS(ymm_s[2], ymm_m3_add_m4, ymm_4)
VFMADD231PS(ymm_s[3], ymm_m5_sub_m6, ymm_4)
VADDPS(ymm_s[5], ymm_s[5], ymm_m5_sub_m6)
return ymm_s
def transpose8x8(ymm_rows):
assert isinstance(ymm_rows, list) and len(ymm_rows) == 8 and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_rows)
# ymm_rows[0] = ( g07, g06, g05, g04, g03, g02, g01, g00 )
# ymm_rows[1] = ( g17, g16, g15, g14, g13, g12, g11, g10 )
# ymm_rows[2] = ( g27, g26, g25, g24, g23, g22, g21, g20 )
# ymm_rows[3] = ( g37, g36, g35, g34, g33, g32, g31, g30 )
# ymm_rows[4] = ( g47, g46, g45, g44, g43, g42, g41, g40 )
# ymm_rows[5] = ( g57, g56, g55, g54, g53, g52, g51, g50 )
# ymm_rows[6] = ( g67, g66, g65, g64, g63, g62, g61, g60 )
# ymm_rows[7] = ( g77, g76, g75, g74, g73, g72, g71, g70 )
for ymm_even_row, ymm_odd_row in zip(ymm_rows[0::2], ymm_rows[1::2]):
ymm_temp = YMMRegister()
VUNPCKLPS(ymm_temp, ymm_even_row, ymm_odd_row)
VUNPCKHPS(ymm_odd_row, ymm_even_row, ymm_odd_row)
SWAP.REGISTERS(ymm_even_row, ymm_temp)
# ymm_rows[0] = ( g15, g05, g14, g04, g11, g01, g10, g00 )
# ymm_rows[1] = ( g17, g07, g16, g06, g13, g03, g12, g02 )
# ymm_rows[2] = ( g35, g25, g34, g24, g31, g21, g30, g20 )
# ymm_rows[3] = ( g37, g27, g36, g26, g33, g23, g32, g22 )
# ymm_rows[4] = ( g55, g45, g54, g44, g51, g41, g50, g40 )
# ymm_rows[5] = ( g57, g47, g56, g46, g53, g43, g52, g42 )
# ymm_rows[6] = ( g75, g65, g74, g64, g71, g61, g70, g60 )
# ymm_rows[7] = ( g77, g67, g76, g66, g73, g63, g72, g62 )
transpose2x2x2x64(ymm_rows[0], ymm_rows[2])
transpose2x2x2x64(ymm_rows[1], ymm_rows[3])
transpose2x2x2x64(ymm_rows[4], ymm_rows[6])
transpose2x2x2x64(ymm_rows[5], ymm_rows[7])
# ymm_rows[0] = ( g34, g24, g14, g04, g30, g20, g10, g00 )
# ymm_rows[1] = ( g36, g26, g16, g06, g32, g22, g12, g02 )
# ymm_rows[2] = ( g35, g25, g15, g05, g31, g21, g11, g01 )
# ymm_rows[3] = ( g37, g27, g17, g07, g33, g23, g13, g03 )
# ymm_rows[4] = ( g74, g64, g54, g44, g70, g60, g50, g40 )
# ymm_rows[5] = ( g76, g66, g56, g46, g72, g62, g52, g42 )
# ymm_rows[6] = ( g75, g65, g55, g45, g71, g61, g51, g41 )
# ymm_rows[7] = ( g77, g67, g57, g47, g73, g63, g53, g43 )
transpose2x2x128(ymm_rows[0], ymm_rows[4])
transpose2x2x128(ymm_rows[1], ymm_rows[5])
transpose2x2x128(ymm_rows[2], ymm_rows[6])
transpose2x2x128(ymm_rows[3], ymm_rows[7])
SWAP.REGISTERS(ymm_rows[1], ymm_rows[2])
SWAP.REGISTERS(ymm_rows[5], ymm_rows[6])
def transpose6x8(ymm_rows):
assert isinstance(ymm_rows, list) and len(ymm_rows) == 6 and all(isinstance(ymm_row, YMMRegister) for ymm_row in ymm_rows)
# ymm_rows[0] = ( g07, g06, g05, g04, g03, g02, g01, g00 )
# ymm_rows[1] = ( g17, g16, g15, g14, g13, g12, g11, g10 )
# ymm_rows[2] = ( g27, g26, g25, g24, g23, g22, g21, g20 )
# ymm_rows[3] = ( g37, g36, g35, g34, g33, g32, g31, g30 )
# ymm_rows[4] = ( g47, g46, g45, g44, g43, g42, g41, g40 )
# ymm_rows[5] = ( g57, g56, g55, g54, g53, g52, g51, g50 )
for ymm_even_row, ymm_odd_row in zip(ymm_rows[0::2], ymm_rows[1::2]):
ymm_temp = YMMRegister()
VUNPCKLPS(ymm_temp, ymm_even_row, ymm_odd_row)
VUNPCKHPS(ymm_odd_row, ymm_even_row, ymm_odd_row)
SWAP.REGISTERS(ymm_even_row, ymm_temp)
# ymm_rows[0] = ( g15, g05, g14, g04, g11, g01, g10, g00 )
# ymm_rows[1] = ( g17, g07, g16, g06, g13, g03, g12, g02 )
# ymm_rows[2] = ( g35, g25, g34, g24, g31, g21, g30, g20 )
# ymm_rows[3] = ( g37, g27, g36, g26, g33, g23, g32, g22 )
# ymm_rows[4] = ( g55, g45, g54, g44, g51, g41, g50, g40 )
# ymm_rows[5] = ( g57, g47, g56, g46, g53, g43, g52, g42 )
ymm_zero_rows = [YMMRegister(), YMMRegister()]
for ymm_zero in ymm_zero_rows:
VXORPS(ymm_zero, ymm_zero, ymm_zero)
ymm_rows += ymm_zero_rows
# ymm_rows[6] = ( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 )
# ymm_rows[7] = ( 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 )
transpose2x2x2x64(ymm_rows[0], ymm_rows[2])
transpose2x2x2x64(ymm_rows[1], ymm_rows[3])
transpose2x2x2x64(ymm_rows[4], ymm_rows[6])
transpose2x2x2x64(ymm_rows[5], ymm_rows[7])
# ymm_rows[0] = ( g34, g24, g14, g04, g30, g20, g10, g00 )
# ymm_rows[1] = ( g36, g26, g16, g06, g32, g22, g12, g02 )
# ymm_rows[2] = ( g35, g25, g15, g05, g31, g21, g11, g01 )
# ymm_rows[3] = ( g37, g27, g17, g07, g33, g23, g13, g03 )
# ymm_rows[4] = ( 0.0, 0.0, g54, g44, 0.0, 0.0, g50, g40 )
# ymm_rows[5] = ( 0.0, 0.0, g56, g46, 0.0, 0.0, g52, g42 )
# ymm_rows[6] = ( 0.0, 0.0, g55, g45, 0.0, 0.0, g51, g41 )
# ymm_rows[7] = ( 0.0, 0.0, g57, g47, 0.0, 0.0, g53, g43 )
transpose2x2x128(ymm_rows[0], ymm_rows[4])
transpose2x2x128(ymm_rows[1], ymm_rows[5])
transpose2x2x128(ymm_rows[2], ymm_rows[6])
transpose2x2x128(ymm_rows[3], ymm_rows[7])
SWAP.REGISTERS(ymm_rows[1], ymm_rows[2])
SWAP.REGISTERS(ymm_rows[5], ymm_rows[6])
return ymm_rows
def transpose8x3(xmm_rows):
assert isinstance(xmm_rows, list) and len(xmm_rows) == 8 and all(isinstance(xmm_row, XMMRegister) for xmm_row in xmm_rows)
# xmm_rows[0] = ( 0.0, g02, g01, g00 )
# xmm_rows[1] = ( 0.0, g12, g11, g10 )
# xmm_rows[2] = ( 0.0, g22, g21, g20 )
# xmm_rows[3] = ( 0.0, g32, g31, g30 )
# xmm_rows[4] = ( 0.0, g42, g41, g40 )
# xmm_rows[5] = ( 0.0, g52, g51, g50 )
# xmm_rows[6] = ( 0.0, g62, g61, g60 )
# xmm_rows[7] = ( 0.0, g72, g71, g70 )
ymm_rows = [YMMRegister() for _ in range(4)]
VINSERTF128(ymm_rows[0], xmm_rows[0].as_ymm, xmm_rows[4], 1)
VINSERTF128(ymm_rows[1], xmm_rows[1].as_ymm, xmm_rows[5], 1)
VINSERTF128(ymm_rows[2], xmm_rows[2].as_ymm, xmm_rows[6], 1)
VINSERTF128(ymm_rows[3], xmm_rows[3].as_ymm, xmm_rows[7], 1)
# ymm_rows[0] = ( 0.0, g42, g41, g40, 0.0, g02, g01, g00 )
# ymm_rows[1] = ( 0.0, g52, g51, g50, 0.0, g12, g11, g10 )
# ymm_rows[2] = ( 0.0, g62, g61, g60, 0.0, g22, g21, g20 )
# ymm_rows[3] = ( 0.0, g72, g71, g70, 0.0, g32, g31, g30 )
ymm_new_rows = [YMMRegister() for _ in range(4)]
VUNPCKLPS(ymm_new_rows[0], ymm_rows[0], ymm_rows[1])
VUNPCKHPS(ymm_new_rows[1], ymm_rows[0], ymm_rows[1])
VUNPCKLPS(ymm_new_rows[2], ymm_rows[2], ymm_rows[3])
VUNPCKHPS(ymm_new_rows[3], ymm_rows[2], ymm_rows[3])
for ymm_row, ymm_new_row in zip(ymm_rows, ymm_new_rows):
SWAP.REGISTERS(ymm_row, ymm_new_row)
# ymm_rows[0] = ( g51, g41, g50, g40, g11, g01, g10, g00 )
# ymm_rows[1] = ( 0.0, 0.0, g52, g42, 0.0, 0.0, g12, g02 )
# ymm_rows[2] = ( g71, g61, g70, g60, g31, g21, g30, g20 )
# ymm_rows[3] = ( 0.0, 0.0, g72, g62, 0.0, 0.0, g32, g22 )
# ymm_rows[0] = ( g70, g60, g50, g40, g30, g20, g10, g00 )
# ymm_rows[2] = ( g71, g61, g51, g41, g31, g21, g11, g01 )
transpose2x2x2x64(ymm_rows[0], ymm_rows[2])
# ymm_rows[1] = ( g72, g62, g52, g42, g32, g22, g12, g02 )
VUNPCKLPD(ymm_rows[1], ymm_rows[1], ymm_rows[3])
SWAP.REGISTERS(ymm_rows[1], ymm_rows[2])
return ymm_rows[0:3]
if __name__ == "__main__":
import numpy
numpy.set_printoptions(linewidth=120)
import ctypes
arg_i = Argument(ptr(const_float_))
arg_o = Argument(ptr(float_))
with Function("transpose8x3", (arg_i, arg_o)) as transpose8x3_asm:
reg_i = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_i, arg_i)
reg_o = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_o, arg_o)
xmm_load_mask = XMMRegister()
VMOVAPS(xmm_load_mask, Constant.float32x4(-0.0, -0.0, -0.0, +0.0))
xmm_data = [XMMRegister() for i in range(8)]
for i, xmm in enumerate(xmm_data):
VMASKMOVPS(xmm, xmm_load_mask, [reg_i + i * 12])
ymm_data = transpose8x3(xmm_data)
for i, ymm in enumerate(ymm_data):
VMOVUPS([reg_o + i * 32], ymm)
RETURN()
with Function("transpose8x8", (arg_i, arg_o)) as transpose8x8_asm:
reg_i = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_i, arg_i)
reg_o = GeneralPurposeRegister64()
LOAD.ARGUMENT(reg_o, arg_o)
ymm_data = [YMMRegister() for i in range(8)]
for i, ymm in enumerate(ymm_data):
VMOVUPS(ymm, [reg_i + i * 32])
transpose8x8(ymm_data)
for i, ymm in enumerate(ymm_data):
VMOVUPS([reg_o + i * 32], ymm)
RETURN()
transpose8x3_fn = transpose8x3_asm.finalize(abi.detect()).encode().load()
transpose8x8_fn = transpose8x8_asm.finalize(abi.detect()).encode().load()
i = numpy.random.random(8 * 3).astype(numpy.float32)
o = numpy.empty(8 * 3, numpy.float32)
transpose8x3_fn(
i.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
o.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
# print(i.reshape([8, 3]))
# print(o.reshape([3, 8]).T)
i = numpy.random.random(8 * 8).astype(numpy.float32)
o = numpy.empty(8 * 8, numpy.float32)
transpose8x8_fn(
i.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
o.ctypes.data_as(ctypes.POINTER(ctypes.c_float)))
print(i.reshape([8, 8]))
print(o.reshape([8, 8]).T)
|
#!/usr/bin/env python
'''
This module contains some common routines used by other samples.
'''
import numpy as np
import cv2 as cv
# built-in modules
import os
import itertools as it
from contextlib import contextmanager
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv.circle(vis, (int(x), int(y)), 2, color)
|
#!/usr/bin/env python
'''
This example shows the functionalities of lines extraction finished by LSDDetector class.
USAGE: lsd_lines_extraction.py [<path_to_input_image>]
'''
import sys
import cv2 as cv
if __name__ == '__main__':
print(__doc__)
if len(sys.argv) > 1:
fname = sys.argv[1]
else :
fname = '../data/corridor.jpg'
img = cv.imread(fname)
if img is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
lsd = cv.line_descriptor_LSDDetector.createLSDDetector()
lines = lsd.detect(gray, 2, 1)
for kl in lines:
if kl.octave == 0:
# cv.line only accepts integer coordinate
pt1 = (int(kl.startPointX), int(kl.startPointY))
pt2 = (int(kl.endPointX), int(kl.endPointY))
cv.line(img, pt1, pt2, [255, 0, 0], 2)
cv.imshow('output', img)
cv.waitKey(0)
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
This sample demonstrates SEEDS Superpixels segmentation
Use [space] to toggle output mode
Usage:
seeds.py [<video source>]
'''
import numpy as np
import cv2 as cv
# relative module
import video
# built-in module
import sys
if __name__ == '__main__':
print __doc__
try:
fn = sys.argv[1]
except:
fn = 0
def nothing(*arg):
pass
cv.namedWindow('SEEDS')
cv.createTrackbar('Number of Superpixels', 'SEEDS', 400, 1000, nothing)
cv.createTrackbar('Iterations', 'SEEDS', 4, 12, nothing)
seeds = None
display_mode = 0
num_superpixels = 400
prior = 2
num_levels = 4
num_histogram_bins = 5
cap = video.create_capture(fn)
while True:
flag, img = cap.read()
converted_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
height,width,channels = converted_img.shape
num_superpixels_new = cv.getTrackbarPos('Number of Superpixels', 'SEEDS')
num_iterations = cv.getTrackbarPos('Iterations', 'SEEDS')
if not seeds or num_superpixels_new != num_superpixels:
num_superpixels = num_superpixels_new
seeds = cv.ximgproc.createSuperpixelSEEDS(width, height, channels,
num_superpixels, num_levels, prior, num_histogram_bins)
color_img = np.zeros((height,width,3), np.uint8)
color_img[:] = (0, 0, 255)
seeds.iterate(converted_img, num_iterations)
# retrieve the segmentation result
labels = seeds.getLabels()
# labels output: use the last x bits to determine the color
num_label_bits = 2
labels &= (1<<num_label_bits)-1
labels *= 1<<(16-num_label_bits)
mask = seeds.getLabelContourMask(False)
# stitch foreground & background together
mask_inv = cv.bitwise_not(mask)
result_bg = cv.bitwise_and(img, img, mask=mask_inv)
result_fg = cv.bitwise_and(color_img, color_img, mask=mask)
result = cv.add(result_bg, result_fg)
if display_mode == 0:
cv.imshow('SEEDS', result)
elif display_mode == 1:
cv.imshow('SEEDS', mask)
else:
cv.imshow('SEEDS', labels)
ch = cv.waitKey(1)
if ch == 27:
break
elif ch & 0xff == ord(' '):
display_mode = (display_mode + 1) % 3
cv.destroyAllWindows()
|
#!/usr/bin/env python
'''
Video capture sample.
Sample shows how VideoCapture class can be used to acquire video
frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
falling back to procedural video in case of error.
Usage:
video.py [--shotdir <shot path>] [source0] [source1] ...'
sourceN is an
- integer number for camera capture
- name of video file
- synth:<params> for procedural video
Synth examples:
synth:bg=../cpp/lena.jpg:noise=0.1
synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480
Keys:
ESC - exit
SPACE - save current frame to <shot path> directory
'''
import numpy as np
from numpy import pi, sin, cos
import cv2 as cv
# built-in modules
from time import clock
# local modules
import common
class VideoSynthBase(object):
def __init__(self, size=None, noise=0.0, bg = None, **params):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv.imread(bg, 1)
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv.resize(self.bg, self.frame_size)
self.noise = float(noise)
def render(self, dst):
pass
def read(self, dst=None):
w, h = self.frame_size
if self.bg is None:
buf = np.zeros((h, w, 3), np.uint8)
else:
buf = self.bg.copy()
self.render(buf)
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv.add(buf, noise, dtype=cv.CV_8UC3)
return True, buf
def isOpened(self):
return True
class Chess(VideoSynthBase):
def __init__(self, **kw):
super(Chess, self).__init__(**kw)
w, h = self.frame_size
self.grid_size = sx, sy = 10, 7
white_quads = []
black_quads = []
for i, j in np.ndindex(sy, sx):
q = [[j, i, 0], [j+1, i, 0], [j+1, i+1, 0], [j, i+1, 0]]
[white_quads, black_quads][(i + j) % 2].append(q)
self.white_quads = np.float32(white_quads)
self.black_quads = np.float32(black_quads)
fx = 0.9
self.K = np.float64([[fx*w, 0, 0.5*(w-1)],
[0, fx*w, 0.5*(h-1)],
[0.0,0.0, 1.0]])
self.dist_coef = np.float64([-0.2, 0.1, 0, 0])
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2)
def render(self, dst):
t = self.t
self.t += 1.0/30.0
sx, sy = self.grid_size
center = np.array([0.5*sx, 0.5*sy, 0.0])
phi = pi/3 + sin(t*3)*pi/8
c, s = cos(phi), sin(phi)
ofs = np.array([sin(1.2*t), cos(1.8*t), 0]) * sx * 0.2
eye_pos = center + np.array([cos(t)*c, sin(t)*c, s]) * 15.0 + ofs
target_pos = center + ofs
R, self.tvec = common.lookat(eye_pos, target_pos)
self.rvec = common.mtx2rvec(R)
self.draw_quads(dst, self.white_quads, (245, 245, 245))
self.draw_quads(dst, self.black_quads, (10, 10, 10))
classes = dict(chess=Chess)
presets = dict(
empty = 'synth:',
lena = 'synth:bg=../cpp/lena.jpg:noise=0.1',
chess = 'synth:class=chess:bg=../cpp/lena.jpg:noise=0.1:size=640x480'
)
def create_capture(source = 0, fallback = presets['chess']):
'''source: <int> or '<int>|<filename>|synth [:<param_name>=<value> [:...]]'
'''
source = str(source).strip()
chunks = source.split(':')
# handle drive letter ('c:', ...)
if len(chunks) > 1 and len(chunks[0]) == 1 and chunks[0].isalpha():
chunks[1] = chunks[0] + ':' + chunks[1]
del chunks[0]
source = chunks[0]
try: source = int(source)
except ValueError: pass
params = dict( s.split('=') for s in chunks[1:] )
cap = None
if source == 'synth':
Class = classes.get(params.get('class', None), VideoSynthBase)
try: cap = Class(**params)
except: pass
else:
cap = cv.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print 'Warning: unable to open video source: ', source
if fallback is not None:
return create_capture(fallback, None)
return cap
if __name__ == '__main__':
import sys
import getopt
print __doc__
args, sources = getopt.getopt(sys.argv[1:], '', 'shotdir=')
args = dict(args)
shotdir = args.get('--shotdir', '.')
if len(sources) == 0:
sources = [ 0 ]
caps = map(create_capture, sources)
shot_idx = 0
while True:
imgs = []
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv.imshow('capture %d' % i, img)
ch = 0xFF & cv.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv.imwrite(fn, img)
print fn, 'saved'
shot_idx += 1
cv.destroyAllWindows()
|
import numpy as np
import cv2 as cv
# aruco config
adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400))
marker_len = 5
# rapid config
obj_points = np.float32([[-0.5, 0.5, 0], [0.5, 0.5, 0], [0.5, -0.5, 0], [-0.5, -0.5, 0]]) * marker_len
tris = np.int32([[0, 2, 1], [0, 3, 2]]) # note CCW order for culling
line_len = 10
# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)
# video capture
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])
rot, trans = None, None
while cv.waitKey(1) != 27:
img = cap.read()[1]
# detection with aruco
if rot is None:
corners, ids = cv.aruco.detectMarkers(img, adict)[:2]
if ids is not None:
rvecs, tvecs = cv.aruco.estimatePoseSingleMarkers(corners, marker_len, K, None)[:2]
rot, trans = rvecs[0].ravel(), tvecs[0].ravel()
# tracking and refinement with rapid
if rot is not None:
for i in range(5): # multiple iterations
ratio, rot, trans = cv.rapid.rapid(img, 40, line_len, obj_points, tris, K, rot, trans)[:3]
if ratio < 0.8:
# bad quality, force re-detect
rot, trans = None, None
break
# drawing
cv.putText(img, "detecting" if rot is None else "tracking", (0, 20), cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255))
if rot is not None:
cv.drawFrameAxes(img, K, None, rot, trans, marker_len)
cv.imshow("tracking", img)
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy
import cv2 as cv
from tests_common import NewOpenCVTests
class structured_light_test(NewOpenCVTests):
def test_unwrap(self):
paramsPsp = cv.structured_light_SinusoidalPattern_Params();
paramsFtp = cv.structured_light_SinusoidalPattern_Params();
paramsFaps = cv.structured_light_SinusoidalPattern_Params();
paramsPsp.methodId = cv.structured_light.PSP;
paramsFtp.methodId = cv.structured_light.FTP;
paramsFaps.methodId = cv.structured_light.FAPS;
sinusPsp = cv.structured_light.SinusoidalPattern_create(paramsPsp)
sinusFtp = cv.structured_light.SinusoidalPattern_create(paramsFtp)
sinusFaps = cv.structured_light.SinusoidalPattern_create(paramsFaps)
captures = []
for i in range(0,3):
capture = self.get_sample('/cv/structured_light/data/capture_sin_%d.jpg'%i, cv.IMREAD_GRAYSCALE)
if capture is None:
raise unittest.SkipTest("Missing files with test data")
captures.append(capture)
rows,cols = captures[0].shape
unwrappedPhaseMapPspRef = self.get_sample('/cv/structured_light/data/unwrappedPspTest.jpg',
cv.IMREAD_GRAYSCALE)
unwrappedPhaseMapFtpRef = self.get_sample('/cv/structured_light/data/unwrappedFtpTest.jpg',
cv.IMREAD_GRAYSCALE)
unwrappedPhaseMapFapsRef = self.get_sample('/cv/structured_light/data/unwrappedFapsTest.jpg',
cv.IMREAD_GRAYSCALE)
wrappedPhaseMap,shadowMask = sinusPsp.computePhaseMap(captures);
unwrappedPhaseMap = sinusPsp.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapPspRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
wrappedPhaseMap,shadowMask = sinusFtp.computePhaseMap(captures);
unwrappedPhaseMap = sinusFtp.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapFtpRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
wrappedPhaseMap,shadowMask2 = sinusFaps.computePhaseMap(captures);
unwrappedPhaseMap = sinusFaps.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapFapsRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
import cv2 as cv
N = 2
modelname = "parasaurolophus_6700"
scenename = "rs1_normals"
detector = cv.ppf_match_3d_PPF3DDetector(0.025, 0.05)
print('Loading model...')
pc = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % modelname, 1)
print('Training...')
detector.trainModel(pc)
print('Loading scene...')
pcTest = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % scenename, 1)
print('Matching...')
results = detector.match(pcTest, 1.0/40.0, 0.05)
print('Performing ICP...')
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
print("Poses: ")
for i, result in enumerate(results):
#result.printPose()
print("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" % (result.modelIndex, result.numVotes, result.residual, result.pose))
if i == 0:
pct = cv.ppf_match_3d.transformPCPose(pc, result.pose)
cv.ppf_match_3d.writePLY(pct, "%sPCTrans.ply" % modelname)
|
import cv2 as cv
import numpy as np
def rotation(theta):
tx, ty, tz = theta
Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])
return np.dot(Rx, np.dot(Ry, Rz))
width = 20
height = 10
max_deg = np.pi / 12
cloud, rotated_cloud = [None]*3, [None]*3
retval, residual, pose = [None]*3, [None]*3, [None]*3
noise = np.random.normal(0.0, 0.1, height * width * 3).reshape((-1, 3))
noise2 = np.random.normal(0.0, 1.0, height * width)
x, y = np.meshgrid(
range(-width//2, width//2),
range(-height//2, height//2),
sparse=False, indexing='xy'
)
z = np.zeros((height, width))
cloud[0] = np.dstack((x, y, z)).reshape((-1, 3)).astype(np.float32)
cloud[1] = noise.astype(np.float32) + cloud[0]
cloud[2] = cloud[1]
cloud[2][:, 2] += noise2.astype(np.float32)
R = rotation([
0, #np.random.uniform(-max_deg, max_deg),
np.random.uniform(-max_deg, max_deg),
0, #np.random.uniform(-max_deg, max_deg)
])
t = np.zeros((3, 1))
Rt = np.vstack((
np.hstack((R, t)),
np.array([0, 0, 0, 1])
)).astype(np.float32)
icp = cv.ppf_match_3d_ICP(100)
I = np.eye(4)
print("Unaligned error:\t%.6f" % np.linalg.norm(I - Rt))
for i in range(3):
rotated_cloud[i] = np.matmul(Rt[0:3,0:3], cloud[i].T).T + Rt[:3,3].T
retval[i], residual[i], pose[i] = icp.registerModelToScene(rotated_cloud[i], cloud[i])
print("ICP error:\t\t%.6f" % np.linalg.norm(I - np.matmul(pose[0], Rt)))
|
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudafilters_test(NewOpenCVTests):
def setUp(self):
super(cudafilters_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_existence(self):
#Test at least the existence of wrapped functions for now
_filter = cv.cuda.createBoxFilter(cv.CV_8UC1, -1, (3, 3))
_filter = cv.cuda.createLinearFilter(cv.CV_8UC4, -1, np.eye(3))
_filter = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3)
_filter = cv.cuda.createSeparableLinearFilter(cv.CV_8UC1, -1, np.eye(3), np.eye(3))
_filter = cv.cuda.createDerivFilter(cv.CV_8UC1, -1, 1, 1, 3)
_filter = cv.cuda.createSobelFilter(cv.CV_8UC1, -1, 1, 1)
_filter = cv.cuda.createScharrFilter(cv.CV_8UC1, -1, 1, 0)
_filter = cv.cuda.createGaussianFilter(cv.CV_8UC1, -1, (3, 3), 16)
_filter = cv.cuda.createMorphologyFilter(cv.MORPH_DILATE, cv.CV_32FC1, np.eye(3))
_filter = cv.cuda.createBoxMaxFilter(cv.CV_8UC1, (3, 3))
_filter = cv.cuda.createBoxMinFilter(cv.CV_8UC1, (3, 3))
_filter = cv.cuda.createRowSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
_filter = cv.cuda.createColumnSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
_filter = cv.cuda.createMedianFilter(cv.CV_8UC1, 3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_laplacian(self):
npMat = (np.random.random((128, 128)) * 255).astype(np.uint16)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(cuMat).download(),
cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/python
import cv2 as cv
import numpy as np
import sys
img1 = cv.imread(sys.argv[1])
img1 = img1.astype(np.float32)
shift = np.array([5., 5.])
mapTest = cv.reg_MapShift(shift)
img2 = mapTest.warp(img1)
mapper = cv.reg_MapperGradShift()
mappPyr = cv.reg_MapperPyramid(mapper)
resMap = mappPyr.calculate(img1, img2)
mapShift = cv.reg.MapTypeCaster_toShift(resMap)
print(mapShift.getShift())
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys, argparse, json
import numpy as np
import scipy.io
import cv2 as cv
import timeit
from learn_color_balance import load_ground_truth
def load_json(path):
f = open(path, "r")
data = json.load(f)
return data
def save_json(obj, path):
tmp_file = path + ".bak"
f = open(tmp_file, "w")
json.dump(obj, f, indent=2)
f.flush()
os.fsync(f.fileno())
f.close()
try:
os.rename(tmp_file, path)
except:
os.remove(path)
os.rename(tmp_file, path)
def parse_sequence(input_str):
if len(input_str) == 0:
return []
else:
return [o.strip() for o in input_str.split(",") if o]
def stretch_to_8bit(arr, clip_percentile = 2.5):
arr = np.clip(arr * (255.0 / np.percentile(arr, 100 - clip_percentile)), 0, 255)
return arr.astype(np.uint8)
def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder, model_folder):
new_im = None
start_time = timeit.default_timer()
if algo=="grayworld":
inst = cv.xphoto.createGrayworldWB()
inst.setSaturationThreshold(0.95)
new_im = inst.balanceWhite(im)
elif algo=="nothing":
new_im = im
elif algo.split(":")[0]=="learning_based":
model_path = ""
if len(algo.split(":"))>1:
model_path = os.path.join(model_folder, algo.split(":")[1])
inst = cv.xphoto.createLearningBasedWB(model_path)
inst.setRangeMaxVal(range_thresh)
inst.setSaturationThreshold(0.98)
inst.setHistBinNum(bin_num)
new_im = inst.balanceWhite(im)
elif algo=="GT":
gains = gt_illuminant / min(gt_illuminant)
g1 = float(1.0 / gains[2])
g2 = float(1.0 / gains[1])
g3 = float(1.0 / gains[0])
new_im = cv.xphoto.applyChannelGains(im, g1, g2, g3)
time = 1000*(timeit.default_timer() - start_time) #time in ms
if len(dst_folder)>0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
im_name = ("%04d_" % i) + algo.replace(":","_") + ".jpg"
cv.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
#recover the illuminant from the color balancing result, assuming the standard model:
estimated_illuminant = [0, 0, 0]
eps = 0.01
estimated_illuminant[2] = np.percentile((im[:,:,0] + eps) / (new_im[:,:,0] + eps), 50)
estimated_illuminant[1] = np.percentile((im[:,:,1] + eps) / (new_im[:,:,1] + eps), 50)
estimated_illuminant[0] = np.percentile((im[:,:,2] + eps) / (new_im[:,:,2] + eps), 50)
res = np.arccos(np.dot(gt_illuminant,estimated_illuminant)/
(np.linalg.norm(gt_illuminant) * np.linalg.norm(estimated_illuminant)))
return (time, (res / np.pi) * 180)
def build_html_table(out, state, stat_list, img_range):
stat_dict = {'mean': ('Mean error', lambda arr: np.mean(arr)),
'median': ('Median error',lambda arr: np.percentile(arr, 50)),
'p05': ('5<sup>th</sup> percentile',lambda arr: np.percentile(arr, 5)),
'p20': ('20<sup>th</sup> percentile',lambda arr: np.percentile(arr, 20)),
'p80': ('80<sup>th</sup> percentile',lambda arr: np.percentile(arr, 80)),
'p95': ('95<sup>th</sup> percentile',lambda arr: np.percentile(arr, 95))
}
html_out = ['<style type="text/css">\n',
' html, body {font-family: Lucida Console, Courier New, Courier;font-size: 16px;color:#3e4758;}\n',
' .tbl{background:none repeat scroll 0 0 #FFFFFF;border-collapse:collapse;font-family:"Lucida Sans Unicode","Lucida Grande",Sans-Serif;font-size:14px;margin:20px;text-align:left;width:480px;margin-left: auto;margin-right: auto;white-space:nowrap;}\n',
' .tbl span{display:block;white-space:nowrap;}\n',
' .tbl thead tr:last-child th {padding-bottom:5px;}\n',
' .tbl tbody tr:first-child td {border-top:3px solid #6678B1;}\n',
' .tbl th{border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;}\n',
' .tbl td{border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;}\n',
' .tbl tbody tr:hover td{color:#000099;}\n',
' .tbl caption{font:italic 16px "Trebuchet MS",Verdana,Arial,Helvetica,sans-serif;padding:0 0 5px;text-align:right;white-space:normal;}\n',
' .firstingroup {border-top:2px solid #6678B1;}\n',
'</style>\n\n']
html_out += ['<table class="tbl">\n',
' <thead>\n',
' <tr>\n',
' <th align="center" valign="top"> Algorithm Name </th>\n',
' <th align="center" valign="top"> Average Time </th>\n']
for stat in stat_list:
if stat not in stat_dict.keys():
print("Error: unsupported statistic " + stat)
sys.exit(1)
html_out += [' <th align="center" valign="top"> ' +
stat_dict[stat][0] +
' </th>\n']
html_out += [' </tr>\n',
' </thead>\n',
' <tbody>\n']
for algorithm in state.keys():
arr = [state[algorithm][file]["angular_error"] for file in state[algorithm].keys() if file>=img_range[0] and file<=img_range[1]]
average_time = "%.2f ms" % np.mean([state[algorithm][file]["time"] for file in state[algorithm].keys()
if file>=img_range[0] and file<=img_range[1]])
html_out += [' <tr>\n',
' <td>' + algorithm + '</td>\n',
' <td>' + average_time + '</td>\n']
for stat in stat_list:
html_out += [' <td> ' +
"%.2f°" % stat_dict[stat][1](arr) +
' </td>\n']
html_out += [' </tr>\n']
html_out += [' </tbody>\n',
'</table>\n']
f = open(out, 'w')
f.writelines(html_out)
f.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("A benchmarking script for color balance algorithms"),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-a",
"--algorithms",
metavar="ALGORITHMS",
default="",
help=("Comma-separated list of color balance algorithms to evaluate. "
"Currently available: GT,learning_based,grayworld,nothing. "
"Use a colon to set a specific model for the learning-based "
"algorithm, e.g. learning_based:model1.yml,learning_based:model2.yml"))
parser.add_argument(
"-i",
"--input_folder",
metavar="INPUT_FOLDER",
default="",
help=("Folder containing input images to evaluate on. Assumes minimally "
"processed png images like in the Gehler-Shi (http://www.cs.sfu.ca/~colour/data/shi_gehler/) "
"or NUS 8-camera (http://www.comp.nus.edu.sg/~whitebal/illuminant/illuminant.html) datasets"))
parser.add_argument(
"-g",
"--ground_truth",
metavar="GROUND_TRUTH",
default="real_illum_568..mat",
help=("Path to the mat file containing ground truth illuminations. Currently "
"supports formats supplied by the Gehler-Shi and NUS 8-camera datasets."))
parser.add_argument(
"-o",
"--out",
metavar="OUT",
default="./white_balance_eval_result.html",
help="Path to the output html table")
parser.add_argument(
"-s",
"--state",
metavar="STATE_JSON",
default="./WB_evaluation_state.json",
help=("Path to a json file that stores the current evaluation state"))
parser.add_argument(
"-t",
"--stats",
metavar="STATS",
default="mean,median,p05,p20,p80,p95",
help=("Comma-separated list of error statistics to compute and list "
"in the output table. All the available ones are used by default"))
parser.add_argument(
"-b",
"--input_bit_depth",
metavar="INPUT_BIT_DEPTH",
default="",
help=("Assumed bit depth for input images. Should be specified in order to "
"use full bit depth for evaluation (for instance, -b 12 for 12 bit images). "
"Otherwise, input images are converted to 8 bit prior to the evaluation."))
parser.add_argument(
"-d",
"--dst_folder",
metavar="DST_FOLDER",
default="",
help=("If specified, this folder will be used to store the color correction results"))
parser.add_argument(
"-r",
"--range",
metavar="RANGE",
default="0,0",
help=("Comma-separated range of images from the dataset to evaluate on (for instance: 0,568). "
"All available images are used by default."))
parser.add_argument(
"-m",
"--model_folder",
metavar="MODEL_FOLDER",
default="",
help=("Path to the folder containing models for the learning-based color balance algorithm (optional)"))
args, other_args = parser.parse_known_args()
if not os.path.exists(args.input_folder):
print("Error: " + args.input_folder + (" does not exist. Please, correctly "
"specify the -i parameter"))
sys.exit(1)
if not os.path.exists(args.ground_truth):
print("Error: " + args.ground_truth + (" does not exist. Please, correctly "
"specify the -g parameter"))
sys.exit(1)
state = {}
if os.path.isfile(args.state):
state = load_json(args.state)
algorithm_list = parse_sequence(args.algorithms)
img_range = list(map(int, parse_sequence(args.range)))
if len(img_range)!=2:
print("Error: Please specify the -r parameter in form <first_image_index>,<last_image_index>")
sys.exit(1)
img_files = sorted(os.listdir(args.input_folder))
(gt_illuminants,black_levels) = load_ground_truth(args.ground_truth)
for algorithm in algorithm_list:
i = 0
if algorithm not in state.keys():
state[algorithm] = {}
sz = len(img_files)
for file in img_files:
if file not in state[algorithm].keys() and\
((i>=img_range[0] and i<img_range[1]) or img_range[0]==img_range[1]==0):
cur_path = os.path.join(args.input_folder, file)
im = cv.imread(cur_path, -1).astype(np.float32)
im -= black_levels[i]
range_thresh = 255
if len(args.input_bit_depth)>0:
range_thresh = 2**int(args.input_bit_depth) - 1
im = np.clip(im, 0, range_thresh).astype(np.uint16)
else:
im = stretch_to_8bit(im)
(time,angular_err) = evaluate(im, algorithm, gt_illuminants[i], i, range_thresh,
256 if range_thresh > 255 else 64, args.dst_folder, args.model_folder)
state[algorithm][file] = {"angular_error": angular_err, "time": time}
sys.stdout.write("Algorithm: %-20s Done: [%3d/%3d]\r" % (algorithm, i, sz)),
sys.stdout.flush()
save_json(state, args.state)
i+=1
save_json(state, args.state)
build_html_table(args.out, state, parse_sequence(args.stats), [img_files[img_range[0]], img_files[img_range[1]-1]])
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys, argparse
import numpy as np
import scipy.io
from sklearn.tree import DecisionTreeRegressor
import cv2 as cv
import random
def parse_sequence(input_str):
if len(input_str) == 0:
return []
else:
return [o.strip() for o in input_str.split(",") if o]
def convert_to_8bit(arr, clip_percentile = 2.5):
arr = np.clip(arr * (255.0 / np.percentile(arr, 100 - clip_percentile)), 0, 255)
return arr.astype(np.uint8)
def learn_regression_tree_ensemble(img_features, gt_illuminants, num_trees, max_tree_depth):
eps = 0.001
inst = [[img_features[i], gt_illuminants[i][0] / (sum(gt_illuminants[i]) + eps),
gt_illuminants[i][1] / (sum(gt_illuminants[i]) + eps)] for i in range(len(img_features))]
inst.sort(key = lambda obj: obj[1]) #sort by r chromaticity
stride = int(np.ceil(len(inst) / float(num_trees+1)))
sz = 2*stride
dst_model = []
for tree_idx in range(num_trees):
#local group in the training data is additionally weighted by num_trees
local_group_range = range(tree_idx*stride, min(tree_idx*stride+sz, len(inst)))
X = num_trees * [inst[i][0] for i in local_group_range]
y_r = num_trees * [inst[i][1] for i in local_group_range]
y_g = num_trees * [inst[i][2] for i in local_group_range]
#add the rest of the training data:
X = X + [inst[i][0] for i in range(len(inst)) if i not in local_group_range]
y_r = y_r + [inst[i][1] for i in range(len(inst)) if i not in local_group_range]
y_g = y_g + [inst[i][2] for i in range(len(inst)) if i not in local_group_range]
local_model = []
for feature_idx in range(len(X[0])):
tree_r = DecisionTreeRegressor(max_depth = max_tree_depth, random_state = 1234)
tree_r.fit([el[feature_idx][0] for el in X], y_r)
tree_g = DecisionTreeRegressor(max_depth = max_tree_depth, random_state = 1234)
tree_g.fit([el[feature_idx][0] for el in X], y_g)
local_model.append([tree_r, tree_g])
dst_model.append(local_model)
return dst_model
def get_tree_node_lists(tree, tree_depth):
dst_feature_idx = (2**tree_depth-1) * [0]
dst_thresh_vals = (2**tree_depth-1) * [.5]
dst_leaf_vals = (2**tree_depth) * [-1]
leaf_idx_offset = (2**tree_depth-1)
left = tree.tree_.children_left
right = tree.tree_.children_right
threshold = tree.tree_.threshold
value = tree.tree_.value
feature = tree.tree_.feature
def recurse(left, right, threshold, feature, node, dst_idx, cur_depth):
if (threshold[node] != -2):
dst_feature_idx[dst_idx] = feature[node]
dst_thresh_vals[dst_idx] = threshold[node]
if left[node] != -1:
recurse (left, right, threshold, feature, left[node], 2*dst_idx+1, cur_depth + 1)
if right[node] != -1:
recurse (left, right, threshold, feature, right[node], 2*dst_idx+2, cur_depth + 1)
else:
range_start = 2**(tree_depth - cur_depth) * dst_idx + (2**(tree_depth - cur_depth) - 1) - leaf_idx_offset
range_end = 2**(tree_depth - cur_depth) * dst_idx + (2**(tree_depth - cur_depth+1) - 2) - leaf_idx_offset + 1
dst_leaf_vals[range_start:range_end] = (range_end - range_start) * [value[node][0][0]]
recurse(left, right, threshold, feature, 0, 0, 0)
return (dst_feature_idx, dst_thresh_vals, dst_leaf_vals)
def generate_code(model, input_params, use_YML, out_file):
feature_idx = []
thresh_vals = []
leaf_vals = []
depth = int(input_params["--max_tree_depth"])
for local_model in model:
for feature in local_model:
(local_feature_idx, local_thresh_vals, local_leaf_vals) = get_tree_node_lists(feature[0], depth)
feature_idx += local_feature_idx
thresh_vals += local_thresh_vals
leaf_vals += local_leaf_vals
(local_feature_idx, local_thresh_vals, local_leaf_vals) = get_tree_node_lists(feature[1], depth)
feature_idx += local_feature_idx
thresh_vals += local_thresh_vals
leaf_vals += local_leaf_vals
if use_YML:
fs = cv.FileStorage(out_file, 1)
fs.write("num_trees", len(model))
fs.write("num_tree_nodes", 2**depth)
fs.write("feature_idx", np.array(feature_idx).astype(np.uint8))
fs.write("thresh_vals", np.array(thresh_vals).astype(np.float32))
fs.write("leaf_vals", np.array(leaf_vals).astype(np.float32))
fs.release()
else:
res = "/* This file was automatically generated by learn_color_balance.py script\n" +\
" * using the following parameters:\n"
for key in input_params:
res += " " + key + " " + input_params[key]
res += "\n */\n"
res += "const int num_features = 4;\n"
res += "const int _num_trees = " + str(len(model)) + ";\n"
res += "const int _num_tree_nodes = " + str(2**depth) + ";\n"
res += "unsigned char _feature_idx[_num_trees*num_features*2*(_num_tree_nodes-1)] = {" + str(feature_idx[0])
for i in range(1,len(feature_idx)):
res += "," + str(feature_idx[i])
res += "};\n"
res += "float _thresh_vals[_num_trees*num_features*2*(_num_tree_nodes-1)] = {" + ("%.3ff" % thresh_vals[0])[1:]
for i in range(1,len(thresh_vals)):
res += "," + ("%.3ff" % thresh_vals[i])[1:]
res += "};\n"
res += "float _leaf_vals[_num_trees*num_features*2*_num_tree_nodes] = {" + ("%.3ff" % leaf_vals[0])[1:]
for i in range(1,len(leaf_vals)):
res += "," + ("%.3ff" % leaf_vals[i])[1:]
res += "};\n"
f = open(out_file,"w")
f.write(res)
f.close()
def load_ground_truth(gt_path):
gt = scipy.io.loadmat(gt_path)
base_gt_illuminants = []
black_levels = []
if "groundtruth_illuminants" in gt.keys() and "darkness_level" in gt.keys():
#NUS 8-camera dataset format
base_gt_illuminants = gt["groundtruth_illuminants"]
black_levels = len(base_gt_illuminants) * [gt["darkness_level"][0][0]]
elif "real_rgb" in gt.keys():
#Gehler-Shi dataset format
base_gt_illuminants = gt["real_rgb"]
black_levels = 87 * [0] + (len(base_gt_illuminants) - 87) * [129]
else:
print("Error: unknown ground-truth format, only formats of Gehler-Shi and NUS 8-camera datasets are supported")
sys.exit(1)
return (base_gt_illuminants, black_levels)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("A tool for training the learning-based "
"color balance algorithm. Currently supports "
"training only on the Gehler-Shi and NUS 8-camera datasets."),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-i",
"--input_folder",
metavar="INPUT_FOLDER",
default="",
help=("Folder containing the training dataset. Assumes minimally "
"processed png images like in the Gehler-Shi (http://www.cs.sfu.ca/~colour/data/shi_gehler/) "
"or NUS 8-camera (http://www.comp.nus.edu.sg/~whitebal/illuminant/illuminant.html) datasets"))
parser.add_argument(
"-g",
"--ground_truth",
metavar="GROUND_TRUTH",
default="real_illum_568..mat",
help=("Path to the mat file containing ground truth illuminations. Currently "
"supports formats supplied by the Gehler-Shi and NUS 8-camera datasets."))
parser.add_argument(
"-r",
"--range",
metavar="RANGE",
default="0,0",
help="Range of images from the input dataset to use for training")
parser.add_argument(
"-o",
"--out",
metavar="OUT",
default="color_balance_model.yml",
help="Path to the output learnt model. Either a .yml (for loading during runtime) "
"or .hpp (for compiling with the main code) file ")
parser.add_argument(
"--hist_bin_num",
metavar="HIST_BIN_NUM",
default="64",
help=("Size of one dimension of a three-dimensional RGB histogram employed in the "
"feature extraction step."))
parser.add_argument(
"--num_trees",
metavar="NUM_TREES",
default="20",
help=("Parameter to control the size of the regression tree ensemble"))
parser.add_argument(
"--max_tree_depth",
metavar="MAX_TREE_DEPTH",
default="4",
help=("Maxmimum depth of regression trees constructed during training."))
parser.add_argument(
"-a",
"--num_augmented",
metavar="NUM_AUGMENTED",
default="2",
help=("Number of augmented samples per one training image. Training set "
"augmentation tends to improve the learnt model robustness."))
args, other_args = parser.parse_known_args()
if not os.path.exists(args.input_folder):
print("Error: " + args.input_folder + (" does not exist. Please, correctly "
"specify the -i parameter"))
sys.exit(1)
if not os.path.exists(args.ground_truth):
print("Error: " + args.ground_truth + (" does not exist. Please, correctly "
"specify the -g parameter"))
sys.exit(1)
img_range = list(map(int,parse_sequence(args.range)))
if len(img_range)!=2:
print("Error: Please specify the -r parameter in form <first_image_index>,<last_image_index>")
sys.exit(1)
use_YML = None
if args.out.endswith(".yml"):
use_YML = True
elif args.out.endswith(".hpp"):
use_YML = False
else:
print("Error: Only .hpp and .yml are supported as output formats")
sys.exit(1)
hist_bin_num = int(args.hist_bin_num)
num_trees = int(args.num_trees)
max_tree_depth = int(args.max_tree_depth)
img_files = sorted(os.listdir(args.input_folder))
(base_gt_illuminants,black_levels) = load_ground_truth(args.ground_truth)
features = []
gt_illuminants = []
i=0
sz = len(img_files)
random.seed(1234)
inst = cv.xphoto.createLearningBasedWB()
inst.setRangeMaxVal(255)
inst.setSaturationThreshold(0.98)
inst.setHistBinNum(hist_bin_num)
for file in img_files:
if (i>=img_range[0] and i<img_range[1]) or (img_range[0]==img_range[1]==0):
cur_path = os.path.join(args.input_folder,file)
im = cv.imread(cur_path, -1).astype(np.float32)
im -= black_levels[i]
im_8bit = convert_to_8bit(im)
cur_img_features = inst.extractSimpleFeatures(im_8bit, None)
features.append(cur_img_features.tolist())
gt_illuminants.append(base_gt_illuminants[i].tolist())
for iter in range(int(args.num_augmented)):
R_coef = random.uniform(0.2, 5.0)
G_coef = random.uniform(0.2, 5.0)
B_coef = random.uniform(0.2, 5.0)
im_8bit = im
im_8bit[:,:,0] *= B_coef
im_8bit[:,:,1] *= G_coef
im_8bit[:,:,2] *= R_coef
im_8bit = convert_to_8bit(im)
cur_img_features = inst.extractSimpleFeatures(im_8bit, None)
features.append(cur_img_features.tolist())
illum = base_gt_illuminants[i]
illum[0] *= R_coef
illum[1] *= G_coef
illum[2] *= B_coef
gt_illuminants.append(illum.tolist())
sys.stdout.write("Computing features: [%3d/%3d]\r" % (i, sz)),
sys.stdout.flush()
i+=1
print("\nLearning the model...")
model = learn_regression_tree_ensemble(features, gt_illuminants, num_trees, max_tree_depth)
print("Writing the model...")
generate_code(model,{"-r":args.range, "--hist_bin_num": args.hist_bin_num, "--num_trees": args.num_trees,
"--max_tree_depth": args.max_tree_depth, "--num_augmented": args.num_augmented},
use_YML, args.out)
print("Done")
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2019, Josh Chien. All rights reserved.
from argparse import ArgumentParser
import numpy as np
from PIL import Image
import io
import warnings
import os
import cairo
from cairosvg import svg2png
import math
import tempfile
def SaveArucoDictBytesList(filePath = "arucoDictBytesList.npz"):
import numpy as np
# cv2 is optional dependency
try:
import cv2
from cv2 import aruco
# Name, Flag
dictInfo = \
[
("DICT_4X4_1000", aruco.DICT_4X4_1000),
("DICT_5X5_1000", aruco.DICT_5X5_1000),
("DICT_6X6_1000", aruco.DICT_6X6_1000),
("DICT_7X7_1000", aruco.DICT_7X7_1000),
("DICT_ARUCO_ORIGINAL", aruco.DICT_ARUCO_ORIGINAL),
("DICT_APRILTAG_16h5", aruco.DICT_APRILTAG_16h5),
("DICT_APRILTAG_25h9", aruco.DICT_APRILTAG_25h9),
("DICT_APRILTAG_36h10", aruco.DICT_APRILTAG_36h10),
("DICT_APRILTAG_36h11", aruco.DICT_APRILTAG_36h11),
]
arucoDictBytesList = {}
for name, flag in dictInfo:
arucoDict = aruco.Dictionary_get(flag)
arucoDictBytesList[name] = arucoDict.bytesList
np.savez_compressed(filePath, **arucoDictBytesList)
return arucoDictBytesList
except Exception as e:
warnings.warn(str(e))
return None
return None
class MarkerPrinter:
debugMode = None # "LINE" "BLOCK"
# Static Vars
# SVG https://oreillymedia.github.io/Using_SVG/guide/units.html
# for PDF and SVG, 1 pixel = 1/72 inch, 1 cm = 1/2.54 inch, 1pixl = 2.54/72 cm, 1cm = 72/2.54 pixels
ptPerMeter = 72 / 2.54 * 100
surface = {
".SVG": cairo.SVGSurface,
".PDF": cairo.PDFSurface,
".PS": cairo.PSSurface }
if (os.path.isfile("arucoDictBytesList.npz")):
arucoDictBytesList = np.load("arucoDictBytesList.npz")
else:
warnings.warn("Missing build-in arucoDictBytesList.npz, generate it again")
arucoDictBytesList = SaveArucoDictBytesList(filePath = "arucoDictBytesList.npz")
arucoDictMarkerSize = \
{
"DICT_4X4_1000": 4,
"DICT_5X5_1000": 5,
"DICT_6X6_1000": 6,
"DICT_7X7_1000": 7,
"DICT_ARUCO_ORIGINAL": 5,
"DICT_APRILTAG_16h5": 4,
"DICT_APRILTAG_25h9": 5,
"DICT_APRILTAG_36h10": 6,
"DICT_APRILTAG_36h11": 6,
}
def ArucoBits(dictionary, markerID):
bytesList = MarkerPrinter.arucoDictBytesList[dictionary][markerID].ravel()
markerSize = MarkerPrinter.arucoDictMarkerSize[dictionary]
arucoBits = np.zeros(shape = (markerSize, markerSize), dtype = bool)
base2List = np.array( [128, 64, 32, 16, 8, 4, 2, 1], dtype = np.uint8)
currentByteIdx = 0
currentByte = bytesList[currentByteIdx]
currentBit = 0
for row in range(markerSize):
for col in range(markerSize):
if(currentByte >= base2List[currentBit]):
arucoBits[row, col] = True
currentByte -= base2List[currentBit]
currentBit = currentBit + 1
if(currentBit == 8):
currentByteIdx = currentByteIdx + 1
currentByte = bytesList[currentByteIdx]
if(8 * (currentByteIdx + 1) > arucoBits.size):
currentBit = 8 * (currentByteIdx + 1) - arucoBits.size
else:
currentBit = 0;
return arucoBits
def __DrawBlock(context,
dictionary = None, markerLength = None, borderBits = 1,
chessboardSize = (1, 1), squareLength = None, firstMarkerID = 0,
blockX = 0, blockY = 0, originX = 0, originY = 0, pageBorderX = 0, pageBorderY = 0,
mode = "CHESS" ):
if(squareLength is None):
squareLength = markerLength
if(markerLength is None):
markerLength = squareLength
if((squareLength is None) or (markerLength is None)):
raise ValueError("lenght is None")
dawMarkerBlock = False
if ((mode == "ARUCO") or (mode == "ARUCOGRID")):
dawMarkerBlock = True
elif(chessboardSize[1] % 2 == 0):
dawMarkerBlock = (( blockX % 2 == 0 ) == ( blockY % 2 == 0 ))
else:
dawMarkerBlock = (( blockX % 2 == 0 ) != ( blockY % 2 == 0 ))
if(dawMarkerBlock):
if (mode != "CHESS"):
if(dictionary is None):
raise ValueError("dictionary is None")
if (mode == "CHARUCO"):
originX = (blockX - originX) * squareLength + (squareLength - markerLength)*0.5 + pageBorderX
originY = (blockY - originY) * squareLength + (squareLength - markerLength)*0.5 + pageBorderY
else:
originX = (blockX - originX) * squareLength + pageBorderX
originY = (blockY - originY) * squareLength + pageBorderY
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.rectangle(originX, originY, markerLength, markerLength)
context.fill()
# Generate marker
if (mode == "CHARUCO"):
markerID = firstMarkerID + (blockY * chessboardSize[0] + blockX) // 2
elif (mode == "ARUCO"):
markerID = firstMarkerID
elif (mode == "ARUCOGRID"):
markerID = firstMarkerID + (blockY * chessboardSize[0] + blockX)
marker = MarkerPrinter.ArucoBits(dictionary, markerID)
markerSize = marker.shape[0]
unitLength = markerLength / (float)(markerSize + borderBits * 2)
markerBitMap = np.zeros(shape = (markerSize+borderBits*2, markerSize+borderBits*2), dtype = bool)
markerBitMap[borderBits:-borderBits,borderBits:-borderBits] = marker
markerBitMap = np.swapaxes(markerBitMap, 0, 1)
# Compute edges
hEdges = np.zeros(shape = (markerSize+1,markerSize+1), dtype = bool)
vEdges = np.zeros(shape = (markerSize+1,markerSize+1), dtype = bool)
for mx in range(markerSize):
for my in range(markerSize+1):
if ( markerBitMap[mx + borderBits, my + borderBits - 1] ^ markerBitMap[mx + borderBits, my + borderBits]):
hEdges[mx, my] = True
for mx in range(markerSize+1):
for my in range(markerSize):
if ( markerBitMap[mx + borderBits - 1, my + borderBits] ^ markerBitMap[mx + borderBits, my + borderBits]):
vEdges[mx, my] = True
# Use for debug, check edge or position is correct or not
if(MarkerPrinter.debugMode is not None):
if(MarkerPrinter.debugMode.upper() == "LINE"):
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.set_line_width(unitLength * 0.1)
for mx in range(markerSize+1):
for my in range(markerSize+1):
if(hEdges[mx, my]):
context.move_to(originX + unitLength * (mx + borderBits ), originY + unitLength * (my + borderBits ))
context.line_to(originX + unitLength * (mx + borderBits + 1), originY + unitLength * (my + borderBits ))
context.stroke()
if(vEdges[mx, my]):
context.move_to(originX + unitLength * (mx + borderBits ), originY + unitLength * (my + borderBits ))
context.line_to(originX + unitLength * (mx + borderBits ), originY + unitLength * (my + borderBits + 1))
context.stroke()
elif(MarkerPrinter.debugMode.upper() == "BLOCK"):
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
for mx in range(markerSize):
for my in range(markerSize):
if(markerBitMap[mx + borderBits, my + borderBits]):
context.rectangle(
originX + unitLength * (mx + borderBits),
originY + unitLength * (my + borderBits),
unitLength, unitLength)
context.fill()
else:
while(True):
found = False
# Find start position
sx = 0
sy = 0
for my in range(markerSize):
for mx in range(markerSize):
if(hEdges[mx, my]):
found = True
sx = mx
sy = my
if(markerBitMap[sx + borderBits, sy + borderBits - 1]):
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
else:
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
break
if(found):
break
context.move_to (originX + unitLength * (sx + borderBits), originY + unitLength * (sy + borderBits))
# Use wall follower maze solving algorithm to draw white part
cx = sx
cy = sy
cd = 3 # 0 right, 1 down, 2 left, 3 up
while(True):
nd = (cd + 1)%4
moved = False
if(nd == 0):
if(hEdges[cx, cy]):
hEdges[cx, cy] = False
cx = cx + 1
moved = True
elif(nd == 1):
if(vEdges[cx, cy]):
vEdges[cx, cy] = False
cy = cy + 1
moved = True
elif(nd == 2):
if(hEdges[cx - 1, cy]):
hEdges[cx - 1, cy] = False
cx = cx - 1
moved = True
elif(nd == 3):
if(vEdges[cx, cy - 1]):
vEdges[cx, cy - 1] = False
cy = cy - 1
moved = True
if((cx == sx) and (cy == sy)):
context.close_path ()
break
else:
if(moved):
context.line_to(originX + unitLength * (cx + borderBits), originY + unitLength * (cy + borderBits))
cd = nd
if (found):
context.fill()
else:
break
else:
originX = (blockX - originX) * squareLength + pageBorderX
originY = (blockY - originY) * squareLength + pageBorderY
context.set_source_rgba(0.0, 0.0, 0.0, 1.0)
context.rectangle(originX, originY, squareLength, squareLength)
context.fill()
def __CheckChessMarkerImage(chessboardSize, squareLength, subSize=None, pageBorder=(0,0)):
if(len(chessboardSize) != 2):
raise ValueError("len(chessboardSize) != 2")
else:
sizeX, sizeY = chessboardSize
if(len(pageBorder) != 2):
raise ValueError("len(pageBorder) != 2")
else:
pageBorderX, pageBorderY = pageBorder
if(sizeX <= 1):
raise ValueError("sizeX <= 1")
if(sizeY <= 1):
raise ValueError("sizeY <= 1")
if(squareLength <= 0):
raise ValueError("squareLength <= 0")
if(pageBorderX < 0):
raise ValueError("pageBorderX < 0")
if(pageBorderY < 0):
raise ValueError("pageBorderY < 0")
if(subSize is not None):
subSizeX, subSizeY = subSize
if(subSizeX < 0):
raise ValueError("subSizeX < 0")
if(subSizeY < 0):
raise ValueError("subSizeY < 0")
def PreviewChessMarkerImage(chessboardSize, squareLength, pageBorder=(0, 0), dpi=96):
MarkerPrinter.__CheckChessMarkerImage(chessboardSize, squareLength, pageBorder=pageBorder)
squareLength = squareLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
prevImage = None
with tempfile.TemporaryDirectory() as tmpdirname:
with MarkerPrinter.surface[".SVG"] (
os.path.join(tmpdirname, "tempSVG.svg"),
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * squareLength,
chessboardSize[1] * squareLength)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHESS")
with open(os.path.join(tmpdirname, "tempSVG.svg")) as file:
prevImage = Image.open(io.BytesIO(svg2png(bytestring=file.read(), dpi=dpi)))
return prevImage
def GenChessMarkerImage(filePath, chessboardSize, squareLength, subSize=None, pageBorder=(0, 0)):
MarkerPrinter.__CheckChessMarkerImage(chessboardSize, squareLength, subSize=subSize, pageBorder=pageBorder)
squareLength = squareLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
# Check
path, nameExt = os.path.split(filePath)
name, ext = os.path.splitext(nameExt)
if(len(path) > 0):
if not(os.path.isdir(path)):
os.makedirs(path)
if((ext.upper() != ".SVG") and (ext.upper() != ".PS") and (ext.upper() != ".PDF")):
raise ValueError("file extention is not supported, should be: svg, ps, pdf")
# Draw
with MarkerPrinter.surface[ext.upper()] (
filePath,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * squareLength,
chessboardSize[1] * squareLength)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHESS" )
if(subSize is not None):
subDivide = (\
chessboardSize[0] // subSize[0] + int(chessboardSize[0] % subSize[0] > 0),
chessboardSize[1] // subSize[1] + int(chessboardSize[1] % subSize[1] > 0))
subChessboardBlockX = np.clip ( np.arange(0, subSize[0] * subDivide[0] + 1, subSize[0]), 0, chessboardSize[0])
subChessboardBlockY = np.clip ( np.arange(0, subSize[1] * subDivide[1] + 1, subSize[1]), 0, chessboardSize[1])
subChessboardSliceX = subChessboardBlockX.astype(np.float) * squareLength
subChessboardSliceY = subChessboardBlockY.astype(np.float) * squareLength
for subXID in range(subDivide[0]):
for subYID in range(subDivide[1]):
subName = name + \
"_X" + str(subChessboardBlockX[subXID]) + "_" + str(subChessboardBlockX[subXID+1]) + \
"_Y" + str(subChessboardBlockY[subYID]) + "_" + str(subChessboardBlockY[subYID+1])
with MarkerPrinter.surface[ext.upper()](
os.path.join(path, subName + ext),
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID],
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID])
context.fill()
for bx in range(subChessboardBlockX[subXID+1] - subChessboardBlockX[subXID]):
for by in range(subChessboardBlockY[subYID+1] - subChessboardBlockY[subYID]):
MarkerPrinter.__DrawBlock(
context = context,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = subChessboardBlockX[subXID] + bx,
blockY = subChessboardBlockY[subYID] + by,
originX = subChessboardBlockX[subXID],
originY = subChessboardBlockY[subYID],
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHESS" )
def __CheckArucoMarkerImage(dictionary, markerID, markerLength, borderBits=1, pageBorder=(0, 0)):
if(len(pageBorder) != 2):
raise ValueError("len(pageBorder) != 2")
else:
pageBorderX, pageBorderY = pageBorder
if not (dictionary in MarkerPrinter.arucoDictBytesList):
raise ValueError("dictionary is not support")
if(MarkerPrinter.arucoDictBytesList[dictionary].shape[0] <= markerID ):
raise ValueError("markerID is not in aruce dictionary")
if(markerID < 0):
raise ValueError("markerID < 0")
if(markerLength <= 0):
raise ValueError("markerLength <= 0")
if(borderBits <= 0):
raise ValueError("borderBits <= 0")
if(pageBorderX < 0):
raise ValueError("pageBorderX < 0")
if(pageBorderY < 0):
raise ValueError("pageBorderY < 0")
def PreviewArucoMarkerImage(dictionary, markerID, markerLength, borderBits=1, pageBorder=(0, 0), dpi=96):
MarkerPrinter.__CheckArucoMarkerImage(dictionary, markerID, markerLength, borderBits=borderBits, pageBorder=pageBorder)
markerLength = markerLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
prevImage = None
with tempfile.TemporaryDirectory() as tmpdirname:
with MarkerPrinter.surface[".SVG"] (
os.path.join(tmpdirname, "tempSVG.svg"),
markerLength + pageBorder[0] * 2,
markerLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
markerLength + pageBorder[0] * 2,
markerLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
markerLength,
markerLength)
context.fill()
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
firstMarkerID = markerID,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "ARUCO")
with open(os.path.join(tmpdirname, "tempSVG.svg")) as file:
prevImage = Image.open(io.BytesIO(svg2png(bytestring=file.read(), dpi=dpi)))
return prevImage
def GenArucoMarkerImage(filePath, dictionary, markerID, markerLength, borderBits=1, pageBorder=(0, 0)):
MarkerPrinter.__CheckArucoMarkerImage(dictionary, markerID, markerLength, borderBits=borderBits, pageBorder=pageBorder)
markerLength = markerLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
# Check
path, nameExt = os.path.split(filePath)
name, ext = os.path.splitext(nameExt)
if(len(path) > 0):
if not(os.path.isdir(path)):
os.makedirs(path)
if((ext.upper() != ".SVG") and (ext.upper() != ".PS") and (ext.upper() != ".PDF")):
raise ValueError("file extention is not supported, should be: svg, ps, pdf")
# Draw
with MarkerPrinter.surface[ext.upper()] (
filePath,
markerLength + pageBorder[0] * 2,
markerLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
markerLength + pageBorder[0] * 2,
markerLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
markerLength,
markerLength)
context.fill()
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
firstMarkerID = markerID,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "ARUCO")
def __CheckCharucoMarkerImage(dictionary, chessboardSize, squareLength, markerLength, borderBits=1, subSize=None, pageBorder=(0, 0)):
if(len(chessboardSize) != 2):
raise ValueError("len(chessboardSize) != 2")
else:
sizeX, sizeY = chessboardSize
if(len(pageBorder) != 2):
raise ValueError("len(pageBorder) != 2")
else:
pageBorderX, pageBorderY = pageBorder
if not (dictionary in MarkerPrinter.arucoDictBytesList):
raise ValueError("dictionary is not support")
if(MarkerPrinter.arucoDictBytesList[dictionary].shape[0] < (( sizeX * sizeY ) // 2)):
raise ValueError("aruce dictionary is not enough for your board size")
if(sizeX <= 1):
raise ValueError("sizeX <= 1")
if(sizeY <= 1):
raise ValueError("sizeY <= 1")
if(squareLength <= 0):
raise ValueError("squareLength <= 0")
if(markerLength <= 0):
raise ValueError("markerLength <= 0")
if(squareLength < markerLength):
raise ValueError("squareLength < markerLength")
if(borderBits <= 0):
raise ValueError("borderBits <= 0")
if(pageBorderX < 0):
raise ValueError("pageBorderX < 0")
if(pageBorderY < 0):
raise ValueError("pageBorderY < 0")
if(subSize is not None):
subSizeX, subSizeY = subSize
if(subSizeX < 0):
raise ValueError("subSizeX < 0")
if(subSizeY < 0):
raise ValueError("subSizeY < 0")
def PreviewCharucoMarkerImage(dictionary, chessboardSize, squareLength, markerLength, borderBits=1, pageBorder=(0, 0), dpi=96):
MarkerPrinter.__CheckCharucoMarkerImage(dictionary, chessboardSize, squareLength, markerLength, borderBits=borderBits, pageBorder=pageBorder)
squareLength = squareLength * MarkerPrinter.ptPerMeter
markerLength = markerLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
prevImage = None
with tempfile.TemporaryDirectory() as tmpdirname:
with MarkerPrinter.surface[".SVG"] (
os.path.join(tmpdirname, "tempSVG.svg"),
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * squareLength,
chessboardSize[1] * squareLength)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHARUCO")
with open(os.path.join(tmpdirname, "tempSVG.svg")) as file:
prevImage = Image.open(io.BytesIO(svg2png(bytestring=file.read(), dpi=dpi)))
return prevImage
def GenCharucoMarkerImage(filePath, dictionary, chessboardSize, squareLength, markerLength, borderBits=1, subSize=None, pageBorder=(0, 0)):
MarkerPrinter.__CheckCharucoMarkerImage(dictionary, chessboardSize, squareLength, markerLength, borderBits=borderBits, subSize=subSize, pageBorder=pageBorder)
squareLength = squareLength * MarkerPrinter.ptPerMeter
markerLength = markerLength * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
# Check
path, nameExt = os.path.split(filePath)
name, ext = os.path.splitext(nameExt)
if(len(path) > 0):
if not(os.path.isdir(path)):
os.makedirs(path)
if((ext.upper() != ".SVG") and (ext.upper() != ".PS") and (ext.upper() != ".PDF")):
raise ValueError("file extention is not supported, should be: svg, ps, pdf")
# Draw
with MarkerPrinter.surface[ext.upper()] (
filePath,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * squareLength + pageBorder[0] * 2,
chessboardSize[1] * squareLength + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * squareLength,
chessboardSize[1] * squareLength)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHARUCO")
if(subSize is not None):
subDivide = (\
chessboardSize[0] // subSize[0] + int(chessboardSize[0] % subSize[0] > 0),
chessboardSize[1] // subSize[1] + int(chessboardSize[1] % subSize[1] > 0))
subChessboardBlockX = np.clip ( np.arange(0, subSize[0] * subDivide[0] + 1, subSize[0]), 0, chessboardSize[0])
subChessboardBlockY = np.clip ( np.arange(0, subSize[1] * subDivide[1] + 1, subSize[1]), 0, chessboardSize[1])
subChessboardSliceX = subChessboardBlockX.astype(np.float) * squareLength
subChessboardSliceY = subChessboardBlockY.astype(np.float) * squareLength
for subXID in range(subDivide[0]):
for subYID in range(subDivide[1]):
subName = name + \
"_X" + str(subChessboardBlockX[subXID]) + "_" + str(subChessboardBlockX[subXID+1]) + \
"_Y" + str(subChessboardBlockY[subYID]) + "_" + str(subChessboardBlockY[subYID+1])
with MarkerPrinter.surface[ext.upper()](
os.path.join(path, subName + ext),
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID],
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID])
context.fill()
for bx in range(subChessboardBlockX[subXID+1] - subChessboardBlockX[subXID]):
for by in range(subChessboardBlockY[subYID+1] - subChessboardBlockY[subYID]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = squareLength,
blockX = subChessboardBlockX[subXID] + bx,
blockY = subChessboardBlockY[subYID] + by,
originX = subChessboardBlockX[subXID],
originY = subChessboardBlockY[subYID],
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "CHARUCO")
def __CheckArucoGridMarkerImage(dictionary, chessboardSize, markerLength, markerSeparation, firstMarker, borderBits=1, subSize=None, pageBorder=(0, 0)):
if(len(chessboardSize) != 2):
raise ValueError("len(chessboardSize) != 2")
else:
sizeX, sizeY = chessboardSize
if(len(pageBorder) != 2):
raise ValueError("len(pageBorder) != 2")
else:
pageBorderX, pageBorderY = pageBorder
if not (dictionary in MarkerPrinter.arucoDictBytesList):
raise ValueError("dictionary is not support")
if(MarkerPrinter.arucoDictBytesList[dictionary].shape[0] < (( sizeX * sizeY ) + firstMarker)):
raise ValueError("aruce dictionary is not enough for your board size and firstMarker")
if(sizeX <= 1):
raise ValueError("sizeX <= 1")
if(sizeY <= 1):
raise ValueError("sizeY <= 1")
if(markerLength <= 0):
raise ValueError("markerLength <= 0")
if(markerSeparation <= 0):
raise ValueError("markerSeparation <= 0")
if(borderBits <= 0):
raise ValueError("borderBits <= 0")
if(pageBorderX < 0):
raise ValueError("pageBorderX < 0")
if(pageBorderY < 0):
raise ValueError("pageBorderY < 0")
if(subSize is not None):
subSizeX, subSizeY = subSize
if(subSizeX < 0):
raise ValueError("subSizeX < 0")
if(subSizeY < 0):
raise ValueError("subSizeY < 0")
def PreviewArucoGridMarkerImage(dictionary, chessboardSize, markerLength, markerSeparation, firstMarker, borderBits=1, pageBorder=(0, 0), dpi=96):
MarkerPrinter.__CheckArucoGridMarkerImage(dictionary, chessboardSize, markerLength, markerSeparation, firstMarker, borderBits=borderBits, pageBorder=pageBorder)
markerLength = markerLength * MarkerPrinter.ptPerMeter
markerSeparation = markerSeparation * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
prevImage = None
with tempfile.TemporaryDirectory() as tmpdirname:
with MarkerPrinter.surface[".SVG"] (
os.path.join(tmpdirname, "tempSVG.svg"),
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation + pageBorder[0] * 2,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation + pageBorder[0] * 2,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = markerLength + markerSeparation,
firstMarkerID = firstMarker,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "ARUCOGRID")
with open(os.path.join(tmpdirname, "tempSVG.svg")) as file:
prevImage = Image.open(io.BytesIO(svg2png(bytestring=file.read(), dpi=dpi)))
return prevImage
def GenArucoGridMarkerImage(filePath, dictionary, chessboardSize, markerLength, markerSeparation, firstMarker, borderBits=1, subSize=None, pageBorder=(0, 0)):
MarkerPrinter.__CheckArucoGridMarkerImage(dictionary, chessboardSize, markerLength, markerSeparation, firstMarker, borderBits=borderBits, subSize=subSize, pageBorder=pageBorder)
markerLength = markerLength * MarkerPrinter.ptPerMeter
markerSeparation = markerSeparation * MarkerPrinter.ptPerMeter
pageBorder = (pageBorder[0] * MarkerPrinter.ptPerMeter, pageBorder[1] * MarkerPrinter.ptPerMeter)
# Check
path, nameExt = os.path.split(filePath)
name, ext = os.path.splitext(nameExt)
if(len(path) > 0):
if not(os.path.isdir(path)):
os.makedirs(path)
if((ext.upper() != ".SVG") and (ext.upper() != ".PS") and (ext.upper() != ".PDF")):
raise ValueError("file extention is not supported, should be: svg, ps, pdf")
# Draw
with MarkerPrinter.surface[ext.upper()] (
filePath,
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation + pageBorder[0] * 2,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation + pageBorder[0] * 2,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
chessboardSize[0] * markerLength + (chessboardSize[0] - 1) * markerSeparation,
chessboardSize[1] * markerLength + (chessboardSize[1] - 1) * markerSeparation)
context.fill()
for bx in range(chessboardSize[0]):
for by in range(chessboardSize[1]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = markerLength + markerSeparation,
firstMarkerID = firstMarker,
blockX = bx,
blockY = by,
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "ARUCOGRID")
if(subSize is not None):
subDivide = (\
chessboardSize[0] // subSize[0] + int(chessboardSize[0] % subSize[0] > 0),
chessboardSize[1] // subSize[1] + int(chessboardSize[1] % subSize[1] > 0))
subChessboardBlockX = np.clip ( np.arange(0, subSize[0] * subDivide[0] + 1, subSize[0]), 0, chessboardSize[0])
subChessboardBlockY = np.clip ( np.arange(0, subSize[1] * subDivide[1] + 1, subSize[1]), 0, chessboardSize[1])
subChessboardSliceX = subChessboardBlockX.astype(np.float) * (markerLength + markerSeparation)
subChessboardSliceY = subChessboardBlockY.astype(np.float) * (markerLength + markerSeparation)
subChessboardSliceX[-1] -= markerSeparation
subChessboardSliceY[-1] -= markerSeparation
for subXID in range(subDivide[0]):
for subYID in range(subDivide[1]):
subName = name + \
"_X" + str(subChessboardBlockX[subXID]) + "_" + str(subChessboardBlockX[subXID+1]) + \
"_Y" + str(subChessboardBlockY[subYID]) + "_" + str(subChessboardBlockY[subYID+1])
with MarkerPrinter.surface[ext.upper()](
os.path.join(path, subName + ext),
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2) as surface:
context = cairo.Context(surface)
context.set_source_rgba(0.5, 0.5, 0.5, 1.0)
context.rectangle(0, 0,
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID] + pageBorder[0] * 2,
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID] + pageBorder[1] * 2)
context.fill()
context.set_source_rgba(1.0, 1.0, 1.0, 1.0)
context.rectangle(pageBorder[0], pageBorder[1],
subChessboardSliceX[subXID+1] - subChessboardSliceX[subXID],
subChessboardSliceY[subYID+1] - subChessboardSliceY[subYID])
context.fill()
for bx in range(subChessboardBlockX[subXID+1] - subChessboardBlockX[subXID]):
for by in range(subChessboardBlockY[subYID+1] - subChessboardBlockY[subYID]):
MarkerPrinter.__DrawBlock(
context = context,
dictionary = dictionary,
markerLength = markerLength,
borderBits = borderBits,
chessboardSize = chessboardSize,
squareLength = markerLength + markerSeparation,
firstMarkerID = firstMarker,
blockX = subChessboardBlockX[subXID] + bx,
blockY = subChessboardBlockY[subYID] + by,
originX = subChessboardBlockX[subXID],
originY = subChessboardBlockY[subYID],
pageBorderX = pageBorder[0],
pageBorderY = pageBorder[1],
mode = "ARUCOGRID")
if __name__ == '__main__':
parser = ArgumentParser()
# Save marker image parameters
chessGroup = parser.add_argument_group('chess', 'Chessboard')
arucoGroup = parser.add_argument_group('aruco', 'ArUco')
arucoGridGroup = parser.add_argument_group('aruco_grid', 'ArUco grid')
charucoGroup = parser.add_argument_group('charuco', 'ChArUco')
exclusiveGroup = parser.add_mutually_exclusive_group()
exclusiveGroup.add_argument(
"--chess", action='store_true', default=False,
help="Choose to save chessboard marker")
exclusiveGroup.add_argument(
"--aruco", action='store_true', default=False,
help="Choose to save ArUco marker")
exclusiveGroup.add_argument(
"--aruco_grid", action='store_true', default=False,
help="Choose to save ArUco grid marker")
exclusiveGroup.add_argument(
"--charuco", action='store_true', default=False,
help="Choose to save ChArUco marker")
# Utility functions parameters
exclusiveGroup.add_argument(
"--generate", dest="arucoDataFileName",
help="Generate aruco data to FILE", metavar="FILE")
exclusiveGroup.add_argument(
"--list_dictionary", action='store_true', default=False,
help="List predefined aruco dictionary")
# Parameters
# fileName
parser.add_argument(
"--file", dest="fileName", default="./image.pdf",
help="Save marker image to FILE", metavar="FILE")
for group in [chessGroup, arucoGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_file", dest="fileName",
help="Save marker image to FILE", metavar="FILE")
# dictionary
parser.add_argument(
"--dictionary", dest="dictionary", default="DICT_ARUCO_ORIGINAL",
help="Generate marker via predefined DICTIONARY aruco dictionary", metavar="DICTIONARY")
for group in [arucoGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_dictionary", dest="dictionary",
help="Generate marker via predefined DICTIONARY aruco dictionary", metavar="DICTIONARY")
# size
parser.add_argument(
"--size_x", dest="sizeX", default="16",
help="Save marker image with N board width", metavar="N")
parser.add_argument(
"--size_y", dest="sizeY", default="9",
help="Save marker image with N board height", metavar="N")
for group in [chessGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_size_x", dest="sizeX",
help="Save marker image with N board width", metavar="N")
group.add_argument(
"--" + group.title + "_size_y", dest="sizeY",
help="Save marker image with N board height", metavar="N")
# length
parser.add_argument(
"--square_length", dest="squareLength", default="0.09",
help="Save marker image with L square length (Unit: meter)", metavar="L")
parser.add_argument(
"--marker_length", dest="markerLength", default="0.07",
help="Save marker image with L marker length (Unit: meter)", metavar="L")
parser.add_argument(
"--marker_separation", dest="markerSeparation", default="0.02",
help="Save marker image with L separation length (Unit: meter)", metavar="L")
for group in [chessGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_square_length", dest="squareLength",
help="Save marker image with L blocks length (Unit: meter)", metavar="L")
for group in [arucoGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_marker_length", dest="markerLength",
help="Save marker image with L marker length (Unit: meter)", metavar="L")
for group in [arucoGridGroup]:
group.add_argument(
"--" + group.title + "_marker_separation", dest="markerSeparation",
help="Save marker image with L gap length (Unit: meter)", metavar="L")
# else
parser.add_argument(
"--marker_id", dest="markerID", default="0",
help="Save marker image with ID marker", metavar="ID")
parser.add_argument(
"--first_marker", dest="firstMarker", default="0",
help="Save marker image that start with ID marker", metavar="ID")
parser.add_argument(
"--border_bits", dest="borderBits", default="1",
help="Save marker image with N border size", metavar="N")
for group in [arucoGroup]:
group.add_argument(
"--" + group.title + "_marker_id", dest="markerID",
help="Save marker image with ID marker", metavar="ID")
for group in [arucoGridGroup]:
group.add_argument(
"--" + group.title + "_first_marker", dest="firstMarker",
help="Save marker image that start with ID marker", metavar="ID")
for group in [arucoGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_border_bits", dest="borderBits",
help="Save marker image with N border size", metavar="N")
# sub size
parser.add_argument(
"--sub_size_x", dest="subSizeX", default="0",
help="Save marker image with N chuck width", metavar="N")
parser.add_argument(
"--sub_size_y", dest="subSizeY", default="0",
help="Save marker image with N chuck height", metavar="N")
for group in [chessGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_sub_size_x", dest="subSizeX",
help="Save marker image with N chuck width", metavar="N")
group.add_argument(
"--" + group.title + "_sub_size_y", dest="subSizeY",
help="Save marker image with N chuck height", metavar="N")
# page border
parser.add_argument(
"--page_border_x", dest="pageBorderX", default="0",
help="Save with page border width L length (Unit: meter)", metavar="L")
parser.add_argument(
"--page_border_y", dest="pageBorderY", default="0",
help="Save with page border height L length (Unit: meter)", metavar="L")
for group in [chessGroup, arucoGroup, arucoGridGroup, charucoGroup]:
group.add_argument(
"--" + group.title + "_page_border_x", dest="pageBorderX", default="0",
help="Save with page border width L length (Unit: meter)", metavar="L")
group.add_argument(
"--" + group.title + "_page_border_y", dest="pageBorderY", default="0",
help="Save with page border height L length (Unit: meter)", metavar="L")
# Run
args = parser.parse_args()
if(args.arucoDataFileName is not None):
print("Generate aruco data to: " + args.arucoDataFileName)
SaveArucoDictBytesList(args.arucoDataFileName)
elif(args.list_dictionary):
print("List predefined aruco dictionary")
for i in MarkerPrinter.arucoDictBytesList.keys():
print(i)
elif(args.chess):
try:
sizeX = int(args.sizeX)
sizeY = int(args.sizeY)
squareLength = float(args.squareLength)
subSizeX = int(args.subSizeX)
subSizeY = int(args.subSizeY)
pageBorderX = float(args.pageBorderX)
pageBorderY = float(args.pageBorderY)
except ValueError as e:
warnings.warn(str(e))
else:
print("Save chessboard marker with parms: " + \
str({ \
"fileName": args.fileName, \
"sizeX": sizeX, \
"sizeY": sizeY, \
"squareLength": squareLength, \
"subSizeX": subSizeX, \
"subSizeY": subSizeY, \
"pageBorderX": pageBorderX, \
"pageBorderY": pageBorderY, \
}))
subSize = None
if(subSizeX > 0):
if(subSizeY > 0):
subSize = (subSizeX, subSizeY)
else:
subSize = (subSizeX, sizeY)
else:
if(subSizeY > 0):
subSize = (sizeX, subSizeY)
else:
subSize = None
# Gen
MarkerPrinter.GenChessMarkerImage(args.fileName, (sizeX, sizeY), squareLength, subSize = subSize, pageBorder = (pageBorderX, pageBorderY))
elif(args.aruco):
try:
markerLength = float(args.markerLength)
markerID = int(args.markerID)
borderBits = int(args.borderBits)
pageBorderX = float(args.pageBorderX)
pageBorderY = float(args.pageBorderY)
except ValueError as e:
warnings.warn(str(e))
else:
print("Save ArUco marker with parms: " + \
str({ \
"fileName": args.fileName, \
"dictionary": args.dictionary, \
"markerLength": markerLength, \
"markerID": markerID, \
"borderBits": borderBits, \
"pageBorderX": pageBorderX, \
"pageBorderY": pageBorderY, \
}))
# Gen
MarkerPrinter.GenArucoMarkerImage(args.fileName, args.dictionary, markerID, markerLength, borderBits=borderBits, pageBorder = (pageBorderX, pageBorderY))
elif(args.aruco_grid):
try:
sizeX = int(args.sizeX)
sizeY = int(args.sizeY)
markerLength = float(args.markerLength)
markerSeparation = float(args.markerSeparation)
firstMarker = int(args.firstMarker)
borderBits = int(args.borderBits)
subSizeX = int(args.subSizeX)
subSizeY = int(args.subSizeY)
pageBorderX = float(args.pageBorderX)
pageBorderY = float(args.pageBorderY)
except ValueError as e:
warnings.warn(str(e))
else:
print("Save ArUco grid marker with parms: " + \
str({ \
"fileName": args.fileName, \
"dictionary": args.dictionary, \
"sizeX": sizeX, \
"sizeY": sizeY, \
"markerLength": markerLength, \
"markerSeparation": markerSeparation, \
"firstMarker": firstMarker, \
"borderBits": borderBits, \
"subSizeX": subSizeX, \
"subSizeY": subSizeY, \
"pageBorderX": pageBorderX, \
"pageBorderY": pageBorderY, \
}))
subSize = None
if(subSizeX > 0):
if(subSizeY > 0):
subSize = (subSizeX, subSizeY)
else:
subSize = (subSizeX, sizeY)
else:
if(subSizeY > 0):
subSize = (sizeX, subSizeY)
else:
subSize = None
# Gen
MarkerPrinter.GenArucoGridMarkerImage(args.fileName, args.dictionary, (sizeX, sizeY), markerLength, markerSeparation, firstMarker, borderBits=borderBits, subSize=subSize, pageBorder = (pageBorderX, pageBorderY))
elif(args.charuco):
try:
sizeX = int(args.sizeX)
sizeY = int(args.sizeY)
squareLength = float(args.squareLength)
markerLength = float(args.markerLength)
borderBits = int(args.borderBits)
subSizeX = int(args.subSizeX)
subSizeY = int(args.subSizeY)
pageBorderX = float(args.pageBorderX)
pageBorderY = float(args.pageBorderY)
except ValueError as e:
warnings.warn(str(e))
else:
print("Save ChArUco marker with parms: " + \
str({ \
"fileName": args.fileName, \
"dictionary": args.dictionary, \
"sizeX": sizeX, \
"sizeY": sizeY, \
"squareLength": squareLength, \
"markerLength": markerLength, \
"borderBits": borderBits, \
"subSizeX": subSizeX, \
"subSizeY": subSizeY, \
"pageBorderX": pageBorderX, \
"pageBorderY": pageBorderY, \
}))
subSize = None
if(subSizeX > 0):
if(subSizeY > 0):
subSize = (subSizeX, subSizeY)
else:
subSize = (subSizeX, sizeY)
else:
if(subSizeY > 0):
subSize = (sizeX, subSizeY)
else:
subSize = None
# Gen
MarkerPrinter.GenCharucoMarkerImage(args.fileName, args.dictionary, (sizeX, sizeY), squareLength, markerLength, borderBits=borderBits, subSize=subSize, pageBorder = (pageBorderX, pageBorderY))
else:
parser.print_help()
|
#!/usr/bin/env python3
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2019, Josh Chien. All rights reserved.
from MarkerPrinter import *
import tkinter as tk
from tkinter import ttk, filedialog, messagebox
import time
import PIL.Image
import PIL.ImageTk
class MarkerPrinterGUI:
def VisDPI(self, shape):
scale0 = float(self.displayShape[0]) / float(shape[0])
scale1 = float(self.displayShape[1]) / float(shape[1])
if(scale0 > scale1):
return scale1 * 96.0
else:
return scale0 * 96.0
def OnShowingHelpGithub(self):
messagebox.showinfo("Github",
"https://github.com/dogod621/OpenCVMarkerPrinter")
def OnCloseWindow(self):
if(self.window is not None):
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.window.destroy()
self.window = None
def OnSelectCharucoMarkerDictionary(self, pDictName):
self.charucoMarkerDictionaryStr.set(pDictName)
def __SaveMarker(GenMarkerImageCallback, *args, **kwargs):
if(kwargs.get("subSize",None) is not None):
subSizeX, subSizeY = kwargs["subSize"]
kwargs["subSize"] = None
if(subSizeX > 0):
if(subSizeY > 0):
kwargs["subSize"] = (subSizeX, subSizeY)
else:
kwargs["subSize"] = (subSizeX, sizeY)
else:
if(subSizeY > 0):
kwargs["subSize"] = (sizeX, subSizeY)
else:
kwargs["subSize"] = None
try:
askFileName = filedialog.asksaveasfilename(initialdir = os.path.abspath("./"), title = "Output", filetypes = (\
("scalable vector graphics files","*.svg"), \
("portable document format files","*.pdf"), \
("post script files","*.ps")),
defaultextension="*.*")
if (askFileName):
GenMarkerImageCallback(askFileName, *args, **kwargs)
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Save marker failed")
return
def OnPreviewOrSaveCharucoMarker(self, askSave = False):
try:
sizeX = int(self.charucoMarkerChessboardSizeXStr.get())
sizeY = int(self.charucoMarkerChessboardSizeYStr.get())
squareLength = float(self.charucoMarkerSquareLengthStr.get())
markerLength = float(self.charucoMarkerMarkerLengthStr.get())
borderBits = int(self.charucoMarkerBorderBitsStr.get())
dictionary = self.charucoMarkerDictionaryStr.get()
subSizeX = int(self.charucoMarkerSaveSubSizeXStr.get())
subSizeY = int(self.charucoMarkerSaveSubSizeYStr.get())
pageBorderX = float(self.charucoMarkerSavePageBorderXStr.get())
pageBorderY = float(self.charucoMarkerSavePageBorderYStr.get())
except ValueError as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Enter invalid parameters")
return
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Fail to get parameters")
return
# Preview
try:
dpi = self.VisDPI(((sizeY * squareLength + pageBorderY * 2) * MarkerPrinter.ptPerMeter, (sizeX * squareLength + pageBorderX * 2) * MarkerPrinter.ptPerMeter))
tkImage = PIL.ImageTk.PhotoImage(image = MarkerPrinter.PreviewCharucoMarkerImage(dictionary, (sizeX, sizeY), squareLength, markerLength, borderBits=borderBits, pageBorder = (pageBorderX, pageBorderY), dpi=dpi))
self.charucoMarkerImageLabel.imgtk = tkImage
self.charucoMarkerImageLabel.config(image=tkImage)
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "create marker failed")
return
# Save
if(askSave):
MarkerPrinterGUI.__SaveMarker(MarkerPrinter.GenCharucoMarkerImage, \
dictionary, (sizeX, sizeY), squareLength, markerLength, borderBits=borderBits, subSize = (subSizeX, subSizeY), pageBorder = (pageBorderX, pageBorderY))
def OnPreviewCharucoMarker(self):
self.OnPreviewOrSaveCharucoMarker(askSave = False)
def OnSaveCharucoMarker(self):
self.OnPreviewOrSaveCharucoMarker(askSave = True)
def InitCharucoMarkerTab(self):
self.charucoMarkerUIFrame = ttk.Frame(self.charucoMarkerTab)
self.charucoMarkerImageTab = ttk.Frame(self.charucoMarkerTab)
self.charucoMarkerUIFrame2 = ttk.Frame(self.charucoMarkerTab)
self.charucoMarkerUIFrame.grid(row=0, column=0, sticky = tk.NSEW)
self.charucoMarkerImageTab.grid(row=1, column=0, sticky = tk.NSEW)
self.charucoMarkerUIFrame2.grid(row=2, column=0, sticky = tk.NSEW)
self.charucoMarkerImageLabel = tk.Label(self.charucoMarkerImageTab)
self.charucoMarkerImageLabel.grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="dictionary").grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="chessboardSizeX").grid(row=0, column=1, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="chessboardSizeY").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="squareLength (Unit: Meter)").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="markerLength (Unit: Meter)").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame, text="borderBits").grid(row=0, column=5, sticky = tk.NSEW)
self.charucoMarkerDictionaryStr = tk.StringVar()
self.charucoMarkerChessboardSizeXStr = tk.StringVar()
self.charucoMarkerChessboardSizeXStr.set("16")
self.charucoMarkerChessboardSizeYStr = tk.StringVar()
self.charucoMarkerChessboardSizeYStr.set("9")
self.charucoMarkerSquareLengthStr = tk.StringVar()
self.charucoMarkerSquareLengthStr.set("0.09")
self.charucoMarkerMarkerLengthStr = tk.StringVar()
self.charucoMarkerMarkerLengthStr.set("0.07")
self.charucoMarkerBorderBitsStr = tk.StringVar()
self.charucoMarkerBorderBitsStr.set("1")
self.charucoMarkerDictionaryMenue = tk.OptionMenu(self.charucoMarkerUIFrame, self.charucoMarkerDictionaryStr, "DICT_ARUCO_ORIGINAL", command = self.OnSelectCharucoMarkerDictionary)
self.charucoMarkerDictionaryMenue.grid(row=1, column=0, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame, textvariable=self.charucoMarkerChessboardSizeXStr).grid(row=1, column=1, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame, textvariable=self.charucoMarkerChessboardSizeYStr).grid(row=1, column=2, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame, textvariable=self.charucoMarkerSquareLengthStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame, textvariable=self.charucoMarkerMarkerLengthStr).grid(row=1, column=4, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame, textvariable=self.charucoMarkerBorderBitsStr).grid(row=1, column=5, sticky = tk.NSEW)
tk.Button(self.charucoMarkerUIFrame2, text = "Preview", command = self.OnPreviewCharucoMarker).grid(row=1, column=0, sticky = tk.NSEW)
tk.Button(self.charucoMarkerUIFrame2, text = "Save", command = self.OnSaveCharucoMarker).grid(row=1, column=1, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="Save opetions:").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="(set 0 as disable)").grid(row=1, column=2, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="subSizeX").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="subSizeY").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="Divide to chunks, chunk sizeX").grid(row=2, column=3, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="Divide to chunks, chunk sizeY").grid(row=2, column=4, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="pageBorderX (Unit: Meter)").grid(row=0, column=5, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="pageBorderY (Unit: Meter)").grid(row=0, column=6, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="Border or page").grid(row=2, column=5, sticky = tk.NSEW)
tk.Label(self.charucoMarkerUIFrame2, text="Border or page").grid(row=2, column=6, sticky = tk.NSEW)
self.charucoMarkerSaveSubSizeXStr = tk.StringVar()
self.charucoMarkerSaveSubSizeXStr.set("0")
self.charucoMarkerSaveSubSizeYStr = tk.StringVar()
self.charucoMarkerSaveSubSizeYStr.set("0")
self.charucoMarkerSavePageBorderXStr = tk.StringVar()
self.charucoMarkerSavePageBorderXStr.set("0.02")
self.charucoMarkerSavePageBorderYStr = tk.StringVar()
self.charucoMarkerSavePageBorderYStr.set("0.02")
tk.Entry(self.charucoMarkerUIFrame2, textvariable=self.charucoMarkerSaveSubSizeXStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame2, textvariable=self.charucoMarkerSaveSubSizeYStr).grid(row=1, column=4, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame2, textvariable=self.charucoMarkerSavePageBorderXStr).grid(row=1, column=5, sticky = tk.NSEW)
tk.Entry(self.charucoMarkerUIFrame2, textvariable=self.charucoMarkerSavePageBorderYStr).grid(row=1, column=6, sticky = tk.NSEW)
self.charucoMarkerDictionaryMenue['menu'].delete(0, 'end')
for dictName in self.dictList:
self.charucoMarkerDictionaryMenue['menu'].add_command(label=dictName, command=tk._setit(self.charucoMarkerDictionaryStr, dictName, self.OnSelectCharucoMarkerDictionary))
self.OnSelectCharucoMarkerDictionary("DICT_ARUCO_ORIGINAL")
def OnSelectArucoGridMarkerDictionary(self, pDictName):
self.arucoGridMarkerDictionaryStr.set(pDictName)
def OnPreviewOrSaveArucoGridMarker(self, askSave = False):
try:
markersX = int(self.arucoGridMarkerMarkersXStr.get())
markersY = int(self.arucoGridMarkerMarkersYStr.get())
markerLength = float(self.arucoGridMarkerMarkerLengthStr.get())
markerSeparation = float(self.arucoGridMarkerMarkerSeparationStr.get())
borderBits = int(self.arucoGridMarkerBorderBitsStr.get())
firstMarker = int(self.arucoGridMarkerFirstMarkerStr.get())
dictionary = self.arucoGridMarkerDictionaryStr.get()
subSizeX = int(self.arucoGridMarkerSaveSubSizeXStr.get())
subSizeY = int(self.arucoGridMarkerSaveSubSizeYStr.get())
pageBorderX = float(self.arucoGridMarkerSavePageBorderXStr.get())
pageBorderY = float(self.arucoGridMarkerSavePageBorderYStr.get())
except ValueError as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Enter invalid parameters")
return
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Fail to get parameters")
return
# Preview
try:
dpi=self.VisDPI(((markersY * markerLength + (markersY - 1) * markerSeparation + pageBorderY * 2) * MarkerPrinter.ptPerMeter, (markersX * markerLength + (markersX - 1) * markerSeparation + pageBorderX * 2) * MarkerPrinter.ptPerMeter))
tkImage = PIL.ImageTk.PhotoImage(image = MarkerPrinter.PreviewArucoGridMarkerImage(dictionary, (markersX, markersY), markerLength, markerSeparation, firstMarker, borderBits=borderBits, pageBorder = (pageBorderX, pageBorderY), dpi=dpi))
self.arucoGridMarkerImageLabel.imgtk = tkImage
self.arucoGridMarkerImageLabel.config(image=tkImage)
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "create marker failed")
return
# Save
if(askSave):
MarkerPrinterGUI.__SaveMarker(MarkerPrinter.GenArucoGridMarkerImage, \
dictionary, (markersX, markersY), markerLength, markerSeparation, firstMarker, borderBits=borderBits, subSize = (subSizeX, subSizeY), pageBorder = (pageBorderX, pageBorderY))
def OnPreviewArucoGridMarker(self):
self.OnPreviewOrSaveArucoGridMarker(askSave = False)
def OnSaveArucoGridMarker(self):
self.OnPreviewOrSaveArucoGridMarker(askSave = True)
def InitArucoGridMarkerTab(self):
self.arucoGridMarkerUIFrame = ttk.Frame(self.arucoGridMarkerTab)
self.arucoGridMarkerImageTab = ttk.Frame(self.arucoGridMarkerTab)
self.arucoGridMarkerUIFrame2 = ttk.Frame(self.arucoGridMarkerTab)
self.arucoGridMarkerUIFrame.grid(row=0, column=0, sticky = tk.NSEW)
self.arucoGridMarkerImageTab.grid(row=1, column=0, sticky = tk.NSEW)
self.arucoGridMarkerUIFrame2.grid(row=2, column=0, sticky = tk.NSEW)
self.arucoGridMarkerImageLabel = tk.Label(self.arucoGridMarkerImageTab)
self.arucoGridMarkerImageLabel.grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="dictionary").grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="markersX").grid(row=0, column=1, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="markersY").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="markerLength (Unit: Meter)").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="markerSeparation (Unit: Meter)").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="firstMarker").grid(row=0, column=5, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame, text="borderBits").grid(row=0, column=6, sticky = tk.NSEW)
self.arucoGridMarkerDictionaryStr = tk.StringVar()
self.arucoGridMarkerMarkersXStr = tk.StringVar()
self.arucoGridMarkerMarkersXStr.set("16")
self.arucoGridMarkerMarkersYStr = tk.StringVar()
self.arucoGridMarkerMarkersYStr.set("9")
self.arucoGridMarkerMarkerLengthStr = tk.StringVar()
self.arucoGridMarkerMarkerLengthStr.set("0.07")
self.arucoGridMarkerMarkerSeparationStr = tk.StringVar()
self.arucoGridMarkerMarkerSeparationStr.set("0.02")
self.arucoGridMarkerFirstMarkerStr = tk.StringVar()
self.arucoGridMarkerFirstMarkerStr.set("0")
self.arucoGridMarkerBorderBitsStr = tk.StringVar()
self.arucoGridMarkerBorderBitsStr.set("1")
self.arucoGridMarkerDictionaryMenue = tk.OptionMenu(self.arucoGridMarkerUIFrame, self.arucoGridMarkerDictionaryStr, "DICT_ARUCO_ORIGINAL", command = self.OnSelectArucoGridMarkerDictionary)
self.arucoGridMarkerDictionaryMenue.grid(row=1, column=0, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerMarkersXStr).grid(row=1, column=1, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerMarkersYStr).grid(row=1, column=2, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerMarkerLengthStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerMarkerSeparationStr).grid(row=1, column=4, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerFirstMarkerStr).grid(row=1, column=5, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame, textvariable=self.arucoGridMarkerBorderBitsStr).grid(row=1, column=6, sticky = tk.NSEW)
tk.Button(self.arucoGridMarkerUIFrame2, text = "Preview", command = self.OnPreviewArucoGridMarker).grid(row=1, column=0, sticky = tk.NSEW)
tk.Button(self.arucoGridMarkerUIFrame2, text = "Save", command = self.OnSaveArucoGridMarker).grid(row=1, column=1, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="Save opetions:").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="(set 0 as disable)").grid(row=1, column=2, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="subSizeX").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="subSizeY").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="Divide to chunks, chunk sizeX").grid(row=2, column=3, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="Divide to chunks, chunk sizeY").grid(row=2, column=4, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="pageBorderX (Unit: Meter)").grid(row=0, column=5, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="pageBorderY (Unit: Meter)").grid(row=0, column=6, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="Border or page").grid(row=2, column=5, sticky = tk.NSEW)
tk.Label(self.arucoGridMarkerUIFrame2, text="Border or page").grid(row=2, column=6, sticky = tk.NSEW)
self.arucoGridMarkerSaveSubSizeXStr = tk.StringVar()
self.arucoGridMarkerSaveSubSizeXStr.set("0")
self.arucoGridMarkerSaveSubSizeYStr = tk.StringVar()
self.arucoGridMarkerSaveSubSizeYStr.set("0")
self.arucoGridMarkerSavePageBorderXStr = tk.StringVar()
self.arucoGridMarkerSavePageBorderXStr.set("0.02")
self.arucoGridMarkerSavePageBorderYStr = tk.StringVar()
self.arucoGridMarkerSavePageBorderYStr.set("0.02")
tk.Entry(self.arucoGridMarkerUIFrame2, textvariable=self.arucoGridMarkerSaveSubSizeXStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame2, textvariable=self.arucoGridMarkerSaveSubSizeYStr).grid(row=1, column=4, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame2, textvariable=self.arucoGridMarkerSavePageBorderXStr).grid(row=1, column=5, sticky = tk.NSEW)
tk.Entry(self.arucoGridMarkerUIFrame2, textvariable=self.arucoGridMarkerSavePageBorderYStr).grid(row=1, column=6, sticky = tk.NSEW)
self.arucoGridMarkerDictionaryMenue['menu'].delete(0, 'end')
for dictName in self.dictList:
self.arucoGridMarkerDictionaryMenue['menu'].add_command(label=dictName, command=tk._setit(self.arucoGridMarkerDictionaryStr, dictName, self.OnSelectArucoGridMarkerDictionary))
self.OnSelectArucoGridMarkerDictionary("DICT_ARUCO_ORIGINAL")
def OnSelectArucoMarkerDictionary(self, pDictName):
self.arucoMarkerDictionaryStr.set(pDictName)
def OnPreviewOrSaveArucoMarker(self, askSave = False):
try:
markerID = int(self.arucoMarkerMarkerIDStr.get())
markerLength = float(self.arucoMarkerMarkerLengthStr.get())
borderBits = int(self.arucoMarkerBorderBitsStr.get())
dictionary = self.arucoMarkerDictionaryStr.get()
pageBorderX = float(self.arucoMarkerSavePageBorderXStr.get())
pageBorderY = float(self.arucoMarkerSavePageBorderYStr.get())
except ValueError as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Enter invalid parameters")
return
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Fail to get parameters")
return
# Preview
try:
dpi=self.VisDPI(((markerLength + pageBorderY * 2) * MarkerPrinter.ptPerMeter, (markerLength + pageBorderX * 2) * MarkerPrinter.ptPerMeter))
tkImage = PIL.ImageTk.PhotoImage(image = MarkerPrinter.PreviewArucoMarkerImage(dictionary, markerID, markerLength, borderBits=borderBits, pageBorder = (pageBorderX, pageBorderY), dpi=dpi))
self.arucoMarkerImageLabel.imgtk = tkImage
self.arucoMarkerImageLabel.config(image=tkImage)
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "create marker failed")
return
# Save
if(askSave):
MarkerPrinterGUI.__SaveMarker(MarkerPrinter.GenArucoMarkerImage, \
dictionary, markerID, markerLength, borderBits=borderBits, pageBorder = (pageBorderX, pageBorderY))
def OnPreviewArucoMarker(self):
self.OnPreviewOrSaveArucoMarker(askSave = False)
def OnSaveArucoMarker(self):
self.OnPreviewOrSaveArucoMarker(askSave = True)
def InitArucoMarkerTab(self):
self.arucoMarkerUIFrame = ttk.Frame(self.arucoMarkerTab)
self.arucoMarkerImageTab = ttk.Frame(self.arucoMarkerTab)
self.arucoMarkerUIFrame2 = ttk.Frame(self.arucoMarkerTab)
self.arucoMarkerUIFrame.grid(row=0, column=0, sticky = tk.NSEW)
self.arucoMarkerImageTab.grid(row=1, column=0, sticky = tk.NSEW)
self.arucoMarkerUIFrame2.grid(row=2, column=0, sticky = tk.NSEW)
self.arucoMarkerImageLabel = tk.Label(self.arucoMarkerImageTab)
self.arucoMarkerImageLabel.grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame, text="dictionary").grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame, text="markerID").grid(row=0, column=1, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame, text="markerLength (Unit: Meter)").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame, text="borderBits").grid(row=0, column=3, sticky = tk.NSEW)
self.arucoMarkerDictionaryStr = tk.StringVar()
self.arucoMarkerMarkerIDStr = tk.StringVar()
self.arucoMarkerMarkerIDStr.set("0")
self.arucoMarkerMarkerLengthStr = tk.StringVar()
self.arucoMarkerMarkerLengthStr.set("0.07")
self.arucoMarkerBorderBitsStr = tk.StringVar()
self.arucoMarkerBorderBitsStr.set("1")
self.arucoMarkerDictionaryMenue = tk.OptionMenu(self.arucoMarkerUIFrame, self.arucoMarkerDictionaryStr, "DICT_ARUCO_ORIGINAL", command = self.OnSelectArucoMarkerDictionary)
self.arucoMarkerDictionaryMenue.grid(row=1, column=0, sticky = tk.NSEW)
tk.Entry(self.arucoMarkerUIFrame, textvariable=self.arucoMarkerMarkerIDStr).grid(row=1, column=1, sticky = tk.NSEW)
tk.Entry(self.arucoMarkerUIFrame, textvariable=self.arucoMarkerMarkerLengthStr).grid(row=1, column=2, sticky = tk.NSEW)
tk.Entry(self.arucoMarkerUIFrame, textvariable=self.arucoMarkerBorderBitsStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Button(self.arucoMarkerUIFrame2, text = "Preview", command = self.OnPreviewArucoMarker).grid(row=0, column=0, sticky = tk.NSEW)
tk.Button(self.arucoMarkerUIFrame2, text = "Save", command = self.OnSaveArucoMarker).grid(row=0, column=1, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="Save opetions:").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="(set 0 as disable)").grid(row=1, column=2, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="pageBorderX (Unit: Meter)").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="pageBorderY (Unit: Meter)").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="Border or page").grid(row=2, column=3, sticky = tk.NSEW)
tk.Label(self.arucoMarkerUIFrame2, text="Border or page").grid(row=2, column=4, sticky = tk.NSEW)
self.arucoMarkerSavePageBorderXStr = tk.StringVar()
self.arucoMarkerSavePageBorderXStr.set("0.02")
self.arucoMarkerSavePageBorderYStr = tk.StringVar()
self.arucoMarkerSavePageBorderYStr.set("0.02")
tk.Entry(self.arucoMarkerUIFrame2, textvariable=self.arucoMarkerSavePageBorderXStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.arucoMarkerUIFrame2, textvariable=self.arucoMarkerSavePageBorderYStr).grid(row=1, column=4, sticky = tk.NSEW)
self.arucoMarkerDictionaryMenue['menu'].delete(0, 'end')
for dictName in self.dictList:
self.arucoMarkerDictionaryMenue['menu'].add_command(label=dictName, command=tk._setit(self.arucoMarkerDictionaryStr, dictName, self.OnSelectArucoMarkerDictionary))
self.OnSelectArucoMarkerDictionary("DICT_ARUCO_ORIGINAL")
def OnPreviewOrSaveChessMarker(self, askSave = False):
try:
sizeX = int(self.chessMarkerChessboardSizeXStr.get())
sizeY = int(self.chessMarkerChessboardSizeYStr.get())
squareLength = float(self.chessMarkerSquareLengthStr.get())
subSizeX = int(self.chessMarkerSaveSubSizeXStr.get())
subSizeY = int(self.chessMarkerSaveSubSizeYStr.get())
pageBorderX = float(self.chessMarkerSavePageBorderXStr.get())
pageBorderY = float(self.chessMarkerSavePageBorderYStr.get())
except ValueError as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Enter invalid parameters")
return
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "Fail to get parameters")
return
# Preview
try:
dpi=self.VisDPI(((sizeY * squareLength + pageBorderY * 2) * MarkerPrinter.ptPerMeter, (sizeX * squareLength + pageBorderX * 2) * MarkerPrinter.ptPerMeter))
tkImage = PIL.ImageTk.PhotoImage(image = MarkerPrinter.PreviewChessMarkerImage((sizeX, sizeY), squareLength, pageBorder = (pageBorderX, pageBorderY), dpi=dpi))
self.chessMarkerImageLabel.imgtk = tkImage
self.chessMarkerImageLabel.config(image=tkImage)
except Exception as e:
warnings.warn(str(e))
messagebox.showinfo("Error", "create marker failed")
return
# Save
if(askSave):
MarkerPrinterGUI.__SaveMarker(MarkerPrinter.GenChessMarkerImage, \
(sizeX, sizeY), squareLength, subSize = (subSizeX, subSizeY), pageBorder = (pageBorderX, pageBorderY))
def OnPreviewChessMarker(self):
self.OnPreviewOrSaveChessMarker(askSave = False)
def OnSaveChessMarker(self):
self.OnPreviewOrSaveChessMarker(askSave = True)
def InitChessMarkerTab(self):
self.chessMarkerUIFrame = ttk.Frame(self.chessMarkerTab)
self.chessMarkerImageTab = ttk.Frame(self.chessMarkerTab)
self.chessMarkerUIFrame2 = ttk.Frame(self.chessMarkerTab)
self.chessMarkerUIFrame.grid(row=0, column=0, sticky = tk.NSEW)
self.chessMarkerImageTab.grid(row=1, column=0, sticky = tk.NSEW)
self.chessMarkerUIFrame2.grid(row=2, column=0, sticky = tk.NSEW)
self.chessMarkerImageLabel = tk.Label(self.chessMarkerImageTab)
self.chessMarkerImageLabel.grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame, text="chessboardSizeX").grid(row=0, column=0, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame, text="chessboardSizeY").grid(row=0, column=1, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame, text="squareLength (Unit: Meter)").grid(row=0, column=2, sticky = tk.NSEW)
self.chessMarkerChessboardSizeXStr = tk.StringVar()
self.chessMarkerChessboardSizeXStr.set("16")
self.chessMarkerChessboardSizeYStr = tk.StringVar()
self.chessMarkerChessboardSizeYStr.set("9")
self.chessMarkerSquareLengthStr = tk.StringVar()
self.chessMarkerSquareLengthStr.set("0.09")
tk.Entry(self.chessMarkerUIFrame, textvariable=self.chessMarkerChessboardSizeXStr).grid(row=1, column=0, sticky = tk.NSEW)
tk.Entry(self.chessMarkerUIFrame, textvariable=self.chessMarkerChessboardSizeYStr).grid(row=1, column=1, sticky = tk.NSEW)
tk.Entry(self.chessMarkerUIFrame, textvariable=self.chessMarkerSquareLengthStr).grid(row=1, column=2, sticky = tk.NSEW)
tk.Button(self.chessMarkerUIFrame2, text = "Preview", command = self.OnPreviewChessMarker).grid(row=1, column=0, sticky = tk.NSEW)
tk.Button(self.chessMarkerUIFrame2, text = "Save", command = self.OnSaveChessMarker).grid(row=1, column=1, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="Save opetions:").grid(row=0, column=2, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="(set 0 as disable)").grid(row=1, column=2, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="subSizeX").grid(row=0, column=3, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="subSizeY").grid(row=0, column=4, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="Divide to chunks, chunk sizeX").grid(row=2, column=3, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="Divide to chunks, chunk sizeY").grid(row=2, column=4, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="pageBorderX (Unit: Meter)").grid(row=0, column=5, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="pageBorderY (Unit: Meter)").grid(row=0, column=6, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="Border or page").grid(row=2, column=5, sticky = tk.NSEW)
tk.Label(self.chessMarkerUIFrame2, text="Border or page").grid(row=2, column=6, sticky = tk.NSEW)
self.chessMarkerSaveSubSizeXStr = tk.StringVar()
self.chessMarkerSaveSubSizeXStr.set("0")
self.chessMarkerSaveSubSizeYStr = tk.StringVar()
self.chessMarkerSaveSubSizeYStr.set("0")
self.chessMarkerSavePageBorderXStr = tk.StringVar()
self.chessMarkerSavePageBorderXStr.set("0.02")
self.chessMarkerSavePageBorderYStr = tk.StringVar()
self.chessMarkerSavePageBorderYStr.set("0.02")
tk.Entry(self.chessMarkerUIFrame2, textvariable=self.chessMarkerSaveSubSizeXStr).grid(row=1, column=3, sticky = tk.NSEW)
tk.Entry(self.chessMarkerUIFrame2, textvariable=self.chessMarkerSaveSubSizeYStr).grid(row=1, column=4, sticky = tk.NSEW)
tk.Entry(self.chessMarkerUIFrame2, textvariable=self.chessMarkerSavePageBorderXStr).grid(row=1, column=5, sticky = tk.NSEW)
tk.Entry(self.chessMarkerUIFrame2, textvariable=self.chessMarkerSavePageBorderYStr).grid(row=1, column=6, sticky = tk.NSEW)
def Update(self):
time.sleep(0)
self.window.after(self.delay, self.Update)
def __init__(self, pDelay=15, pDisplayShape=(int(400), int(1200))):
self.delay = pDelay
self.displayShape = pDisplayShape
self.dictList = MarkerPrinter.arucoDictBytesList.keys()
# GUI
self.window = tk.Tk()
self.notebook = ttk.Notebook(self.window)
self.notebook.grid(row=0, column=0, sticky = tk.NSEW)
self.window.title("MarkerPrinterGUI")
self.window.config(cursor="arrow")
self.window.protocol("WM_DELETE_WINDOW", self.OnCloseWindow)
# Menues
self.menu = tk.Menu(self.window)
self.helpMenu = tk.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label="Help", menu=self.helpMenu)
self.helpMenu.add_command(label="Github", command=self.OnShowingHelpGithub)
self.helpMenu.add_command(label="DEBUG_LINE_MODE", command=self.On_DEBUG_LINE_MODE)
self.helpMenu.add_command(label="DEBUG_BLOCK_MODE", command=self.On_DEBUG_BLOCK_MODE)
self.helpMenu.add_command(label="CLOSE_DEBUG_MODE", command=self.On_CLOSE_DEBUG_MODE)
self.window.config(menu=self.menu)
self.charucoMarkerTab = ttk.Frame(self.notebook)
self.arucoMarkerTab = ttk.Frame(self.notebook)
self.arucoGridMarkerTab = ttk.Frame(self.notebook)
self.chessMarkerTab = ttk.Frame(self.notebook)
self.notebook.add(self.charucoMarkerTab, text='ChArUco Marker')
self.notebook.add(self.arucoMarkerTab, text='ArUco Marker')
self.notebook.add(self.arucoGridMarkerTab, text='ArUcoGrid Marker')
self.notebook.add(self.chessMarkerTab, text='Chessboard Marker')
self.InitCharucoMarkerTab()
self.InitArucoMarkerTab()
self.InitArucoGridMarkerTab()
self.InitChessMarkerTab()
self.Update()
self.window.mainloop()
def On_DEBUG_LINE_MODE(self):
messagebox.showinfo("Note", "You enabled the debug mode: \"LINE\"")
MarkerPrinter.debugMode = "LINE"
def On_DEBUG_BLOCK_MODE(self):
messagebox.showinfo("Note", "You enabled the debug mode: \"BLOCK\"")
MarkerPrinter.debugMode = "BLOCK"
def On_CLOSE_DEBUG_MODE(self):
messagebox.showinfo("Note", "You closed the debug mode")
MarkerPrinter.debugMode = None
if __name__ == '__main__':
MarkerPrinterGUI()
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os
import cv2 as cv
from tests_common import NewOpenCVTests
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
MODULE_DIR = os.path.join(SCRIPT_DIR, '../../../')
class shape_test(NewOpenCVTests):
def test_computeDistance(self):
a = cv.imread(os.path.join(MODULE_DIR, 'samples/data/shape_sample/1.png'), cv.IMREAD_GRAYSCALE)
b = cv.imread(os.path.join(MODULE_DIR, 'samples/data/shape_sample/2.png'), cv.IMREAD_GRAYSCALE)
if a is None or b is None:
raise unittest.SkipTest("Missing files with test data")
ca, _ = cv.findContours(a, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
cb, _ = cv.findContours(b, cv.RETR_CCOMP, cv.CHAIN_APPROX_TC89_KCOS)
hd = cv.createHausdorffDistanceExtractor()
sd = cv.createShapeContextDistanceExtractor()
d1 = hd.computeDistance(ca[0], cb[0])
d2 = sd.computeDistance(ca[0], cb[0])
self.assertAlmostEqual(d1, 26.4196891785, 3, "HausdorffDistanceExtractor")
self.assertAlmostEqual(d2, 0.25804194808, 3, "ShapeContextDistanceExtractor")
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
#!/usr/bin/env python
import os
import cv2 as cv
from tests_common import NewOpenCVTests, unittest
class test_dnn_superres(NewOpenCVTests):
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_single_output(self):
# Get test data paths
dnn_superres_test_path = os.environ['OPENCV_TEST_DATA_PATH'] + "/cv/dnn_superres/"
img_path = dnn_superres_test_path + "butterfly.png"
espcn_path = dnn_superres_test_path + "ESPCN_x2.pb"
# Create an SR object
sr = cv.dnn_superres.DnnSuperResImpl_create()
# Read image
image = cv.imread(img_path)
inp_h, inp_w, inp_c = image.shape
# Read the desired model
sr.readModel(espcn_path)
# Set the desired model and scale to get correct pre- and post-processing
sr.setModel("espcn", 2)
# Upscale the image
result = sr.upsample(image)
out_h, out_w, out_c = result.shape
# CHECK...
# if result is not empty
self.assertFalse(result is None)
# upsampled image is correct size
self.assertEqual(out_h, inp_h*2)
self.assertEqual(out_w, inp_w*2)
self.assertEqual(out_c, inp_c)
# get functions work
self.assertEqual(sr.getScale(), 2)
self.assertEqual(sr.getAlgorithm(), "espcn")
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaobjdetect_test(NewOpenCVTests):
def setUp(self):
super(cudaobjdetect_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_hog(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/gpu/caltech/image_00000009_0.png'
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2BGRA)
cuMat = cv.cuda_GpuMat(npMat)
cuHog = cv.cuda.HOG_create()
cuHog.setSVMDetector(cuHog.getDefaultPeopleDetector())
loc, conf = cuHog.detect(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 2)
loc = cuHog.detectMultiScaleWithoutConf(cuMat)
self.assertTrue(len(loc) > 0 and len(loc[0]) == 4)
cuHog.setGroupThreshold(0)
loc, conf = cuHog.detectMultiScale(cuMat)
self.assertTrue(len(loc) == len(conf) and len(loc) > 0 and len(loc[0]) == 4)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudabgsegm_test(NewOpenCVTests):
def setUp(self):
super(cudabgsegm_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cudabgsegm(self):
lr = 0.05
sz = (128,128,1)
npMat = (np.random.random(sz) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat(npMat)
cuMatBg = cv.cuda_GpuMat(cuMat.size(),cuMat.type())
cuMatFg = cv.cuda_GpuMat(cuMat.size(),cuMat.type())
mog = cv.cuda.createBackgroundSubtractorMOG()
mog.apply(cuMat, lr, cv.cuda.Stream_Null(), cuMatFg)
mog.getBackgroundImage(cv.cuda.Stream_Null(),cuMatBg)
self.assertTrue(sz[:2] == cuMatFg.size() == cuMatBg.size())
self.assertTrue(sz[2] == cuMatFg.channels() == cuMatBg.channels())
self.assertTrue(cv.CV_8UC1 == cuMatFg.type() == cuMatBg.type())
mog = cv.cuda.createBackgroundSubtractorMOG()
self.assertTrue(np.allclose(cuMatFg.download(),mog.apply(cuMat, lr, cv.cuda.Stream_Null()).download()))
self.assertTrue(np.allclose(cuMatBg.download(),mog.getBackgroundImage(cv.cuda.Stream_Null()).download()))
mog2 = cv.cuda.createBackgroundSubtractorMOG2()
mog2.apply(cuMat, lr, cv.cuda.Stream_Null(), cuMatFg)
mog2.getBackgroundImage(cv.cuda.Stream_Null(),cuMatBg)
self.assertTrue(sz[:2] == cuMatFg.size() == cuMatBg.size())
self.assertTrue(sz[2] == cuMatFg.channels() == cuMatBg.channels())
self.assertTrue(cv.CV_8UC1 == cuMatFg.type() == cuMatBg.type())
mog2 = cv.cuda.createBackgroundSubtractorMOG2()
self.assertTrue(np.allclose(cuMatFg.download(),mog2.apply(cuMat, lr, cv.cuda.Stream_Null()).download()))
self.assertTrue(np.allclose(cuMatBg.download(),mog2.getBackgroundImage(cv.cuda.Stream_Null()).download()))
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudacodec_test(NewOpenCVTests):
def setUp(self):
super(cudacodec_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_reader(self):
#Test the functionality but not the results of the video reader
vid_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/cv/video/1920x1080.avi'
try:
reader = cv.cudacodec.createVideoReader(vid_path)
ret, gpu_mat = reader.nextFrame()
self.assertTrue(ret)
self.assertTrue('GpuMat' in str(type(gpu_mat)), msg=type(gpu_mat))
#TODO: print(cv.utils.dumpInputArray(gpu_mat)) # - no support for GpuMat
# not checking output, therefore sepearate tests for different signatures is unecessary
ret, _gpu_mat2 = reader.nextFrame(gpu_mat)
#TODO: self.assertTrue(gpu_mat == gpu_mat2)
self.assertTrue(ret)
except cv.error as e:
notSupported = (e.code == cv.Error.StsNotImplemented or e.code == cv.Error.StsUnsupportedFormat or e.code == cv.Error.GPU_API_CALL_ERROR)
self.assertTrue(notSupported)
if e.code == cv.Error.StsNotImplemented:
self.skipTest("NVCUVID is not installed")
elif e.code == cv.Error.StsUnsupportedFormat:
self.skipTest("GPU hardware video decoder missing or video format not supported")
elif e.code == cv.Error.GPU_API_CALL_ERRROR:
self.skipTest("GPU hardware video decoder is missing")
else:
self.skipTest(e.err)
def test_writer_existence(self):
#Test at least the existence of wrapped functions for now
try:
_writer = cv.cudacodec.createVideoWriter("tmp", (128, 128), 30)
except cv.error as e:
self.assertEqual(e.code, cv.Error.StsNotImplemented)
self.skipTest("NVCUVENC is not installed")
self.assertTrue(True) #It is sufficient that no exceptions have been there
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
import random
import numpy as np
import cv2 as cv
frame1 = cv.imread(cv.samples.findFile('lena.jpg'))
if frame1 is None:
print("image not found")
exit()
frame = np.vstack((frame1,frame1))
facemark = cv.face.createFacemarkLBF()
try:
facemark.loadModel(cv.samples.findFile('lbfmodel.yaml'))
except cv.error:
print("Model not found\nlbfmodel.yaml can be download at")
print("https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml")
cascade = cv.CascadeClassifier(cv.samples.findFile('lbpcascade_frontalface_improved.xml'))
if cascade.empty() :
print("cascade not found")
exit()
faces = cascade.detectMultiScale(frame, 1.05, 3, cv.CASCADE_SCALE_IMAGE, (30, 30))
ok, landmarks = facemark.fit(frame, faces=faces)
cv.imshow("Image", frame)
for marks in landmarks:
couleur = (random.randint(0,255),
random.randint(0,255),
random.randint(0,255))
cv.face.drawFacemarks(frame, marks, couleur)
cv.imshow("Image Landmarks", frame)
cv.waitKey()
|
#!/usr/bin/env python
import sys
import os.path
# This is a tiny script to help you creating a CSV file from a face
# database with a similar hierarchie:
#
# philipp@mango:~/facerec/data/at$ tree
# .
# |-- README
# |-- s1
# | |-- 1.pgm
# | |-- ...
# | |-- 10.pgm
# |-- s2
# | |-- 1.pgm
# | |-- ...
# | |-- 10.pgm
# ...
# |-- s40
# | |-- 1.pgm
# | |-- ...
# | |-- 10.pgm
#
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: create_csv <base_path>"
sys.exit(1)
BASE_PATH=sys.argv[1]
SEPARATOR=";"
label = 0
for dirname, dirnames, filenames in os.walk(BASE_PATH):
for subdirname in dirnames:
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
abs_path = "%s/%s" % (subject_path, filename)
print "%s%s%d" % (abs_path, SEPARATOR, label)
label = label + 1
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Philipp Wagner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, math, Image
def Distance(p1,p2):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx*dx+dy*dy)
def ScaleRotateTranslate(image, angle, center = None, new_center = None, scale = None, resample=Image.BICUBIC):
if (scale is None) and (center is None):
return image.rotate(angle=angle, resample=resample)
nx,ny = x,y = center
sx=sy=1.0
if new_center:
(nx,ny) = new_center
if scale:
(sx,sy) = (scale, scale)
cosine = math.cos(angle)
sine = math.sin(angle)
a = cosine/sx
b = sine/sx
c = x-nx*a-ny*b
d = -sine/sy
e = cosine/sy
f = y-nx*d-ny*e
return image.transform(image.size, Image.AFFINE, (a,b,c,d,e,f), resample=resample)
def CropFace(image, eye_left=(0,0), eye_right=(0,0), offset_pct=(0.2,0.2), dest_sz = (70,70)):
# calculate offsets in original image
offset_h = math.floor(float(offset_pct[0])*dest_sz[0])
offset_v = math.floor(float(offset_pct[1])*dest_sz[1])
# get the direction
eye_direction = (eye_right[0] - eye_left[0], eye_right[1] - eye_left[1])
# calc rotation angle in radians
rotation = -math.atan2(float(eye_direction[1]),float(eye_direction[0]))
# distance between them
dist = Distance(eye_left, eye_right)
# calculate the reference eye-width
reference = dest_sz[0] - 2.0*offset_h
# scale factor
scale = float(dist)/float(reference)
# rotate original around the left eye
image = ScaleRotateTranslate(image, center=eye_left, angle=rotation)
# crop the rotated image
crop_xy = (eye_left[0] - scale*offset_h, eye_left[1] - scale*offset_v)
crop_size = (dest_sz[0]*scale, dest_sz[1]*scale)
image = image.crop((int(crop_xy[0]), int(crop_xy[1]), int(crop_xy[0]+crop_size[0]), int(crop_xy[1]+crop_size[1])))
# resize it
image = image.resize(dest_sz, Image.ANTIALIAS)
return image
def readFileNames():
try:
inFile = open('path_to_created_csv_file.csv')
except:
raise IOError('There is no file named path_to_created_csv_file.csv in current directory.')
return False
picPath = []
picIndex = []
for line in inFile.readlines():
if line != '':
fields = line.rstrip().split(';')
picPath.append(fields[0])
picIndex.append(int(fields[1]))
return (picPath, picIndex)
if __name__ == "__main__":
[images, indexes]=readFileNames()
if not os.path.exists("modified"):
os.makedirs("modified")
for img in images:
image = Image.open(img)
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.1,0.1), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_10_10_200_200.jpg")
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_20_20_200_200.jpg")
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.3,0.3), dest_sz=(200,200)).save("modified/"+img.rstrip().split('/')[1]+"_30_30_200_200.jpg")
CropFace(image, eye_left=(252,364), eye_right=(420,366), offset_pct=(0.2,0.2)).save("modified/"+img.rstrip().split('/')[1]+"_20_20_70_70.jpg")
|
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudaimgproc_test(NewOpenCVTests):
def setUp(self):
super(cudaimgproc_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cudaimgproc(self):
npC1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npC3 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
npC4 = (np.random.random((128, 128, 4)) * 255).astype(np.uint8)
cuC1 = cv.cuda_GpuMat()
cuC3 = cv.cuda_GpuMat()
cuC4 = cv.cuda_GpuMat()
cuC1.upload(npC1)
cuC3.upload(npC3)
cuC4.upload(npC4)
cv.cuda.cvtColor(cuC3, cv.COLOR_RGB2HSV)
cv.cuda.demosaicing(cuC1, cv.cuda.COLOR_BayerGR2BGR_MHT)
cv.cuda.gammaCorrection(cuC3)
cv.cuda.alphaComp(cuC4, cuC4, cv.cuda.ALPHA_XOR)
cv.cuda.calcHist(cuC1)
cv.cuda.equalizeHist(cuC1)
cv.cuda.evenLevels(3, 0, 255)
cv.cuda.meanShiftFiltering(cuC4, 10, 5)
cv.cuda.meanShiftProc(cuC4, 10, 5)
cv.cuda.bilateralFilter(cuC3, 3, 16, 3)
cv.cuda.blendLinear
cuRes = cv.cuda.meanShiftSegmentation(cuC4, 10, 5, 5)
cuDst = cv.cuda_GpuMat(cuC4.size(),cuC4.type())
cv.cuda.meanShiftSegmentation(cuC4, 10, 5, 5, cuDst)
self.assertTrue(np.allclose(cuRes.download(),cuDst.download()))
clahe = cv.cuda.createCLAHE()
clahe.apply(cuC1, cv.cuda_Stream.Null())
histLevels = cv.cuda.histEven(cuC3, 20, 0, 255)
cv.cuda.histRange(cuC1, histLevels)
detector = cv.cuda.createCannyEdgeDetector(0, 100)
detector.detect(cuC1)
detector = cv.cuda.createHoughLinesDetector(3, np.pi / 180, 20)
detector.detect(cuC1)
detector = cv.cuda.createHoughSegmentDetector(3, np.pi / 180, 20, 5)
detector.detect(cuC1)
detector = cv.cuda.createHoughCirclesDetector(3, 20, 10, 10, 20, 100)
detector.detect(cuC1)
detector = cv.cuda.createGeneralizedHoughBallard()
#BUG: detect accept only Mat!
#Even if generate_gpumat_decls is set to True, it only wraps overload CUDA functions.
#The problem is that Mat and GpuMat are not fully compatible to enable system-wide overloading
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createGeneralizedHoughGuil()
#BUG: same as above..
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createHarrisCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createMinEigenValCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createGoodFeaturesToTrackDetector(cv.CV_8UC1)
detector.detect(cuC1)
matcher = cv.cuda.createTemplateMatching(cv.CV_8UC1, cv.TM_CCOEFF_NORMED)
matcher.match(cuC3, cuC3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cvtColor(self):
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.cvtColor(cuMat, cv.COLOR_BGR2HSV).download(),
cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class xfeatures2d_test(NewOpenCVTests):
def setUp(self):
super(xfeatures2d_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_surf(self):
img_path = os.environ['OPENCV_TEST_DATA_PATH'] + "/gpu/features2d/aloe.png"
hessianThreshold = 100
nOctaves = 3
nOctaveLayers = 2
extended = False
keypointsRatio = 0.05
upright = False
npMat = cv.cvtColor(cv.imread(img_path),cv.COLOR_BGR2GRAY)
cuMat = cv.cuda_GpuMat(npMat)
try:
cuSurf = cv.cuda_SURF_CUDA.create(hessianThreshold,nOctaves,nOctaveLayers,extended,keypointsRatio,upright)
surf = cv.xfeatures2d_SURF.create(hessianThreshold,nOctaves,nOctaveLayers,extended,upright)
except cv.error as e:
self.assertEqual(e.code, cv.Error.StsNotImplemented)
self.skipTest("OPENCV_ENABLE_NONFREE is not enabled in this build.")
cuKeypoints = cuSurf.detect(cuMat,cv.cuda_GpuMat())
keypointsHost = cuSurf.downloadKeypoints(cuKeypoints)
keypoints = surf.detect(npMat)
self.assertTrue(len(keypointsHost) == len(keypoints))
cuKeypoints, cuDescriptors = cuSurf.detectWithDescriptors(cuMat,cv.cuda_GpuMat(),cuKeypoints,useProvidedKeypoints=True)
keypointsHost = cuSurf.downloadKeypoints(cuKeypoints)
descriptorsHost = cuDescriptors.download()
keypoints, descriptors = surf.compute(npMat,keypoints)
self.assertTrue(len(keypointsHost) == len(keypoints) and descriptorsHost.shape == descriptors.shape)
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class sift_compatibility_test(NewOpenCVTests):
def test_create(self):
sift = cv.xfeatures2d.SIFT_create()
self.assertFalse(sift is None)
img1 = np.zeros((100, 100, 3), dtype=np.uint8)
kp1_, des1_ = sift.detectAndCompute(img1, None)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
#!/usr/bin/python
"""
/*********************************************************************
* Software License Agreement (BSD License)
*
* Copyright (c) 2016
*
* Balint Cristian <cristian dot balint at gmail dot com>
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*********************************************************************/
/* export-boostdesc.py */
/* Export C headers from binary data */
// [http://infoscience.epfl.ch/record/186246/files/boostDesc_1.0.tar.gz]
"""
import sys
import struct
def float_to_hex(f):
return struct.unpack( '<I', struct.pack('<f', f) )[0]
def main():
# usage
if ( len(sys.argv) < 3 ):
print( "Usage: %s <binary-type (BGM, LBGM, BINBOOST)> <boostdesc-binary-filename>" % sys.argv[0] )
sys.exit(0)
if ( ( sys.argv[1] != "BGM" ) and
( sys.argv[1] != "LBGM" ) and
( sys.argv[1] != "BINBOOST" ) ):
print( "Invalid type [%s]" % sys.argv[1] )
sys.exit(0)
# enum literals
Assign = [ "ASSIGN_HARD",
"ASSIGN_BILINEAR",
"ASSIGN_SOFT",
"ASSIGN_HARD_MAGN",
"ASSIGN_SOFT_MAGN" ]
# open binary data file
f = open( sys.argv[2], 'rb' )
# header
print "/*"
print " *"
print " * Header exported from binary."
print " * [%s %s %s]" % ( sys.argv[0], sys.argv[1], sys.argv[2] )
print " *"
print " */"
# ini
nDim = 1;
nWLs = 0;
# dimensionality (where is the case)
if ( ( sys.argv[1] == "LBGM" ) or
( sys.argv[1] == "BINBOOST" ) ):
nDim = struct.unpack( '<i', f.read(4) )[0]
print
print "// dimensionality of learner"
print "static const int nDim = %i;" % nDim
# week learners (where is the case)
if ( sys.argv[1] != "BINBOOST" ):
nWLs = struct.unpack( '<i', f.read(4) )[0]
# common header
orientQuant = struct.unpack( '<i', f.read(4) )[0]
patchSize = struct.unpack( '<i', f.read(4) )[0]
iGradAssignType = struct.unpack( '<i', f.read(4) )[0]
print
print "// orientations"
print "static const int orientQuant = %i;" % orientQuant
print
print "// patch size"
print "static const int patchSize = %i;" % patchSize
print
print "// gradient assignment type"
print "static const int iGradAssignType = %s;" % Assign[iGradAssignType]
arr_thresh = ""
arr_orient = ""
arr__y_min = ""
arr__y_max = ""
arr__x_min = ""
arr__x_max = ""
arr__alpha = ""
arr___beta = ""
dims = nDim
if ( sys.argv[1] == "LBGM" ):
dims = 1
# iterate each dimension
for d in range( 0, dims ):
if ( sys.argv[1] == "BINBOOST" ):
nWLs = struct.unpack( '<i', f.read(4) )[0]
if ( d == 0 ):
print
print "// number of weak learners"
print "static const int nWLs = %i;" % nWLs
# iterate each members
for i in range( 0, nWLs ):
# unpack structure array
thresh = struct.unpack( '<f', f.read(4) )[0]
orient = struct.unpack( '<i', f.read(4) )[0]
y_min = struct.unpack( '<i', f.read(4) )[0]
y_max = struct.unpack( '<i', f.read(4) )[0]
x_min = struct.unpack( '<i', f.read(4) )[0]
x_max = struct.unpack( '<i', f.read(4) )[0]
alpha = struct.unpack( '<f', f.read(4) )[0]
beta = 0
if ( sys.argv[1] == "BINBOOST" ):
beta = struct.unpack( '<f', f.read(4) )[0]
# first entry
if ( d*dims + i == 0 ):
arr_thresh += "\n"
arr_thresh += "// threshold array (%s x %s)\n" % (dims,nWLs)
arr_thresh += "static const unsigned int thresh[] =\n{\n"
arr_orient += "\n"
arr_orient += "// orientation array (%s x %s)\n" % (dims,nWLs)
arr_orient += "static const int orient[] =\n{\n"
arr__y_min += "\n"
arr__y_min += "// Y min array (%s x %s)\n" % (dims,nWLs)
arr__y_min += "static const int y_min[] =\n{\n"
arr__y_max += "\n"
arr__y_max += "// Y max array (%s x %s)\n" % (dims,nWLs)
arr__y_max += "static const int y_max[] =\n{\n"
arr__x_min += "\n"
arr__x_min += "// X min array (%s x %s)\n" % (dims,nWLs)
arr__x_min += "static const int x_min[] =\n{\n"
arr__x_max += "\n"
arr__x_max += "// X max array (%s x %s)\n" % (dims,nWLs)
arr__x_max += "static const int x_max[] =\n{\n"
arr__alpha += "\n"
arr__alpha += "// alpha array (%s x %s)\n" % (dims,nWLs)
arr__alpha += "static const unsigned int alpha[] =\n{\n"
if ( sys.argv[1] == "BINBOOST" ):
arr___beta += "\n"
arr___beta += "// beta array (%s x %s)\n" % (dims,nWLs)
arr___beta += "static const unsigned int beta[] =\n{\n"
# last entry
if ( i == nWLs - 1 ) and ( d == dims - 1):
arr_thresh += " 0x%08x\n};" % float_to_hex(thresh)
arr_orient += " 0x%02x\n};" % orient
arr__y_min += " 0x%02x\n};" % y_min
arr__y_max += " 0x%02x\n};" % y_max
arr__x_min += " 0x%02x\n};" % x_min
arr__x_max += " 0x%02x\n};" % x_max
arr__alpha += " 0x%08x\n};" % float_to_hex(alpha)
if ( sys.argv[1] == "BINBOOST" ):
arr___beta += " 0x%08x\n};" % float_to_hex(beta)
break
# align entries
if ( (d*dims + i + 1) % 8 ):
arr_thresh += " 0x%08x," % float_to_hex(thresh)
arr_orient += " 0x%02x," % orient
arr__y_min += " 0x%02x," % y_min
arr__y_max += " 0x%02x," % y_max
arr__x_min += " 0x%02x," % x_min
arr__x_max += " 0x%02x," % x_max
arr__alpha += " 0x%08x," % float_to_hex(alpha)
if ( sys.argv[1] == "BINBOOST" ):
arr___beta += " 0x%08x," % float_to_hex(beta)
else:
arr_thresh += " 0x%08x,\n" % float_to_hex(thresh)
arr_orient += " 0x%02x,\n" % orient
arr__y_min += " 0x%02x,\n" % y_min
arr__y_max += " 0x%02x,\n" % y_max
arr__x_min += " 0x%02x,\n" % x_min
arr__x_max += " 0x%02x,\n" % x_max
arr__alpha += " 0x%08x,\n" % float_to_hex(alpha)
if ( sys.argv[1] == "BINBOOST" ):
arr___beta += " 0x%08x,\n" % float_to_hex(beta)
# extra array (when LBGM)
if ( sys.argv[1] == "LBGM" ):
arr___beta += "\n"
arr___beta += "// beta array (%s x %s)\n" % (nWLs,nDim)
arr___beta += "static const unsigned int beta[] =\n{\n"
for i in range( 0, nWLs ):
for d in range( 0, nDim ):
beta = struct.unpack( '<f', f.read(4) )[0]
# last entry
if ( i == nWLs-1 ) and ( d == nDim-1 ):
arr___beta += " 0x%08x\n};" % float_to_hex(beta)
break
# align entries
if ( (i*nDim + d + 1) % 8 ):
arr___beta += " 0x%08x," % float_to_hex(beta)
else:
arr___beta += " 0x%08x,\n" % float_to_hex(beta)
# release
f.close()
# dump on screen
print arr_thresh
print arr_orient
print arr__y_min
print arr__y_max
print arr__x_min
print arr__x_max
print arr__alpha
if ( ( sys.argv[1] == "LBGM" ) or
( sys.argv[1] == "BINBOOST" ) ):
print arr___beta
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
def create_affine_transform_matrix(size,angle):
return np.array([[np.cos(angle), -np.sin(angle), size[1]/2], [np.sin(angle), np.cos(angle), 0]])
def create_perspective_transform_matrix(size,angle):
return np.vstack([create_affine_transform_matrix(size,angle),[0, 0, 1]])
class cudawarping_test(NewOpenCVTests):
def setUp(self):
super(cudawarping_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_resize(self):
dstSz = (256,256)
interp = cv.INTER_NEAREST
npMat = (np.random.random((128,128,3))*255).astype(np.uint8)
cuMat = cv.cuda_GpuMat(npMat)
cuMatDst = cv.cuda_GpuMat(dstSz,cuMat.type())
self.assertTrue(np.allclose(cv.cuda.resize(cuMat,dstSz,interpolation=interp).download(),
cv.resize(npMat,dstSz,interpolation=interp)))
cv.cuda.resize(cuMat,dstSz,cuMatDst,interpolation=interp)
self.assertTrue(np.allclose(cuMatDst.download(),cv.resize(npMat,dstSz,interpolation=interp)))
def test_warp(self):
npMat = (np.random.random((128,128,3))*255).astype(np.uint8)
size = npMat.shape[:2]
M1 = create_affine_transform_matrix(size,np.pi/2)
cuMat = cv.cuda_GpuMat(npMat)
cuMatDst = cv.cuda_GpuMat(size,cuMat.type())
borderType = cv.BORDER_REFLECT101
self.assertTrue(np.allclose(cv.cuda.warpAffine(cuMat,M1,size,borderMode=borderType).download(),
cv.warpAffine(npMat,M1,size, borderMode=borderType)))
cv.cuda.warpAffine(cuMat,M1,size,cuMatDst,borderMode=borderType)
self.assertTrue(np.allclose(cuMatDst.download(),cv.warpAffine(npMat,M1,size,borderMode=borderType)))
interpolation = cv.INTER_NEAREST
flags = interpolation | cv.WARP_INVERSE_MAP
dst_gold = cv.warpAffine(npMat, M1, size, flags = flags)
cuMaps = cv.cuda.buildWarpAffineMaps(M1,True,size)
dst = cv.remap(npMat, cuMaps[0].download(), cuMaps[1].download(),interpolation)
self.assertTrue(np.allclose(dst,dst_gold))
xmap = cv.cuda_GpuMat(size,cv.CV_32FC1)
ymap = cv.cuda_GpuMat(size,cv.CV_32FC1)
cv.cuda.buildWarpAffineMaps(M1,True,size,xmap,ymap)
dst = cv.remap(npMat, xmap.download(), ymap.download(),interpolation)
self.assertTrue(np.allclose(dst,dst_gold))
M2 = create_perspective_transform_matrix(size,np.pi/2)
np.allclose(cv.cuda.warpPerspective(cuMat,M2,size,borderMode=borderType).download(),
cv.warpPerspective(npMat,M2,size,borderMode=borderType))
cv.cuda.warpPerspective(cuMat,M2,size,cuMatDst,borderMode=borderType)
self.assertTrue(np.allclose(cuMatDst.download(),cv.warpPerspective(npMat,M2,size,borderMode=borderType)))
dst_gold = cv.warpPerspective(npMat, M2, size, flags = flags)
cuMaps = cv.cuda.buildWarpPerspectiveMaps(M2,True,size)
dst = cv.remap(npMat, cuMaps[0].download(), cuMaps[1].download(),interpolation)
self.assertTrue(np.allclose(dst,dst_gold))
cv.cuda.buildWarpPerspectiveMaps(M2,True,size,xmap,ymap)
dst = cv.remap(npMat, xmap.download(), ymap.download(),interpolation)
self.assertTrue(np.allclose(dst,dst_gold))
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class intensity_transform_test(NewOpenCVTests):
def setUp(self):
super(intensity_transform_test, self).setUp()
try:
result_ = cv.intensity_transform.BIMEF(None)
except cv.error as e:
if e.code == cv.Error.StsNotImplemented:
self.skipTest('BIMEF is not implemented (missing Eigen dependency)')
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_BIMEF(self):
filenames = ['P1000205_resize', 'P1010676_resize', 'P1010815_resize']
for f in filenames:
img = self.get_sample('cv/intensity_transform/BIMEF/{}.png'.format(f))
self.assertTrue(img.size > 0)
img_ref = self.get_sample('cv/intensity_transform/BIMEF/{}_ref.png'.format(f))
self.assertTrue(img_ref.size > 0)
img_BIMEF = cv.intensity_transform.BIMEF(img)
self.assertTrue(img_BIMEF.size > 0)
self.assertTrue(img_BIMEF.shape == img_ref.shape)
self.assertTrue(img_BIMEF.dtype == img_ref.dtype)
RMSE = np.sqrt(cv.norm(img_BIMEF, img_ref, cv.NORM_L2SQR) / (img_ref.shape[0]*img_ref.shape[1]*img_ref.shape[2]))
max_RMSE_threshold = 9.0
self.assertLessEqual(RMSE, max_RMSE_threshold)
print('BIMEF RMSE:', RMSE)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
import argparse
import glob
import os
import subprocess
FRAME_DIST = 2
assert (FRAME_DIST >= 1)
def execute(cmd):
popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for stdout_line in iter(popen.stdout.readline, ''):
print(stdout_line.rstrip())
for stderr_line in iter(popen.stderr.readline, ''):
print(stderr_line.rstrip())
popen.stdout.close()
popen.stderr.close()
return_code = popen.wait()
if return_code != 0:
raise subprocess.CalledProcessError(return_code, cmd)
def main():
parser = argparse.ArgumentParser(
description='Train Global Patch Collider using MPI Sintel dataset')
parser.add_argument(
'--bin_path',
help='Path to the training executable (example_optflow_gpc_train)',
required=True)
parser.add_argument('--dataset_path',
help='Path to the directory with frames',
required=True)
parser.add_argument('--gt_path',
help='Path to the directory with ground truth flow',
required=True)
parser.add_argument('--descriptor_type',
help='Descriptor type',
type=int,
default=0)
args = parser.parse_args()
seq = glob.glob(os.path.join(args.dataset_path, '*'))
seq.sort()
input_files = []
for s in seq:
seq_name = os.path.basename(s)
frames = glob.glob(os.path.join(s, 'frame*.png'))
frames.sort()
for i in range(0, len(frames) - 1, FRAME_DIST):
gt_flow = os.path.join(args.gt_path, seq_name,
os.path.basename(frames[i])[0:-4] + '.flo')
assert (os.path.isfile(gt_flow))
input_files += [frames[i], frames[i + 1], gt_flow]
execute([args.bin_path, '--descriptor-type=%d' % args.descriptor_type] + input_files)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys, shutil
import argparse
import json, re
from subprocess import check_output
import datetime
import matplotlib.pyplot as plt
def load_json(path):
f = open(path, "r")
data = json.load(f)
return data
def save_json(obj, path):
tmp_file = path + ".bak"
f = open(tmp_file, "w")
json.dump(obj, f, indent=2)
f.flush()
os.fsync(f.fileno())
f.close()
try:
os.rename(tmp_file, path)
except:
os.remove(path)
os.rename(tmp_file, path)
def parse_evaluation_result(input_str, i):
res = {}
res['frame_number'] = i + 1
res['error'] = {}
regex = "([A-Za-z. \\[\\].0-9]+):[ ]*([0-9]*\.[0-9]+|[0-9]+)"
for elem in re.findall(regex,input_str):
if "Time" in elem[0]:
res['time'] = float(elem[1])
elif "Average" in elem[0]:
res['error']['average'] = float(elem[1])
elif "deviation" in elem[0]:
res['error']['std'] = float(elem[1])
else:
res['error'][elem[0]] = float(elem[1])
return res
def evaluate_sequence(sequence, algorithm, dataset, executable, img_files, gt_files,
state, state_path):
if "eval_results" not in state[dataset][algorithm][-1].keys():
state[dataset][algorithm][-1]["eval_results"] = {}
elif sequence in state[dataset][algorithm][-1]["eval_results"].keys():
return
res = []
for i in range(len(img_files) - 1):
sys.stdout.write("Algorithm: %-20s Sequence: %-10s Done: [%3d/%3d]\r" %
(algorithm, sequence, i, len(img_files) - 1)),
sys.stdout.flush()
res_string = check_output([executable, img_files[i], img_files[i + 1],
algorithm, gt_files[i]])
res.append(parse_evaluation_result(res_string, i))
state[dataset][algorithm][-1]["eval_results"][sequence] = res
save_json(state, state_path)
#############################DATSET DEFINITIONS################################
def evaluate_mpi_sintel(source_dir, algorithm, evaluation_executable, state, state_path):
evaluation_result = {}
img_dir = os.path.join(source_dir, 'mpi_sintel', 'training', 'final')
gt_dir = os.path.join(source_dir, 'mpi_sintel', 'training', 'flow')
sequences = [f for f in os.listdir(img_dir)
if os.path.isdir(os.path.join(img_dir, f))]
for seq in sequences:
img_files = sorted([os.path.join(img_dir, seq, f)
for f in os.listdir(os.path.join(img_dir, seq))
if f.endswith(".png")])
gt_files = sorted([os.path.join(gt_dir, seq, f)
for f in os.listdir(os.path.join(gt_dir, seq))
if f.endswith(".flo")])
evaluation_result[seq] = evaluate_sequence(seq, algorithm, 'mpi_sintel',
evaluation_executable, img_files, gt_files, state, state_path)
return evaluation_result
def evaluate_middlebury(source_dir, algorithm, evaluation_executable, state, state_path):
evaluation_result = {}
img_dir = os.path.join(source_dir, 'middlebury', 'other-data')
gt_dir = os.path.join(source_dir, 'middlebury', 'other-gt-flow')
sequences = [f for f in os.listdir(gt_dir)
if os.path.isdir(os.path.join(gt_dir, f))]
for seq in sequences:
img_files = sorted([os.path.join(img_dir, seq, f)
for f in os.listdir(os.path.join(img_dir, seq))
if f.endswith(".png")])
gt_files = sorted([os.path.join(gt_dir, seq, f)
for f in os.listdir(os.path.join(gt_dir, seq))
if f.endswith(".flo")])
evaluation_result[seq] = evaluate_sequence(seq, algorithm, 'middlebury',
evaluation_executable, img_files, gt_files, state, state_path)
return evaluation_result
dataset_eval_functions = {
"mpi_sintel": evaluate_mpi_sintel,
"middlebury": evaluate_middlebury
}
###############################################################################
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def parse_sequence(input_str):
if len(input_str) == 0:
return []
else:
return [o.strip() for o in input_str.split(",") if o]
def build_chart(dst_folder, state, dataset):
fig = plt.figure(figsize=(16, 10))
markers = ["o", "s", "h", "^", "D"]
marker_idx = 0
colors = ["b", "g", "r"]
color_idx = 0
for algo in state[dataset].keys():
for eval_instance in state[dataset][algo]:
name = algo + "--" + eval_instance["timestamp"]
average_time = 0.0
average_error = 0.0
num_elem = 0
for seq in eval_instance["eval_results"].keys():
for frame in eval_instance["eval_results"][seq]:
average_time += frame["time"]
average_error += frame["error"]["average"]
num_elem += 1
average_time /= num_elem
average_error /= num_elem
marker_style = colors[color_idx] + markers[marker_idx]
color_idx += 1
if color_idx >= len(colors):
color_idx = 0
marker_idx += 1
if marker_idx >= len(markers):
marker_idx = 0
plt.gca().plot([average_time], [average_error],
marker_style,
markersize=14,
label=name)
plt.gca().set_ylabel('Average Endpoint Error (EPE)', fontsize=20)
plt.gca().set_xlabel('Average Runtime (seconds per frame)', fontsize=20)
plt.gca().set_xscale("log")
plt.gca().set_title('Evaluation on ' + dataset, fontsize=20)
plt.gca().legend()
fig.savefig(os.path.join(dst_folder, "evaluation_results_" + dataset + ".png"),
bbox_inches='tight')
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Optical flow benchmarking script',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"bin_path",
default="./optflow-example-optical_flow_evaluation",
help="Path to the optical flow evaluation executable")
parser.add_argument(
"-a",
"--algorithms",
metavar="ALGORITHMS",
default="",
help=("Comma-separated list of optical-flow algorithms to evaluate "
"(example: -a farneback,tvl1,deepflow). Note that previously "
"evaluated algorithms are also included in the output charts"))
parser.add_argument(
"-d",
"--datasets",
metavar="DATASETS",
default="mpi_sintel",
help=("Comma-separated list of datasets for evaluation (currently only "
"'mpi_sintel' and 'middlebury' are supported)"))
parser.add_argument(
"-f",
"--dataset_folder",
metavar="DATASET_FOLDER",
default="./OF_datasets",
help=("Path to a folder containing datasets. To enable evaluation on "
"MPI Sintel dataset, please download it using the following links: "
"http://files.is.tue.mpg.de/sintel/MPI-Sintel-training_images.zip and "
"http://files.is.tue.mpg.de/sintel/MPI-Sintel-training_extras.zip and "
"unzip these archives into the 'mpi_sintel' folder. To enable evaluation "
"on the Middlebury dataset use the following links: "
"http://vision.middlebury.edu/flow/data/comp/zip/other-color-twoframes.zip, "
"http://vision.middlebury.edu/flow/data/comp/zip/other-gt-flow.zip. "
"These should be unzipped into 'middlebury' folder"))
parser.add_argument(
"-o",
"--out",
metavar="OUT_DIR",
default="./OF_evaluation_results",
help="Output directory where to store benchmark results")
parser.add_argument(
"-s",
"--state",
metavar="STATE_JSON",
default="./OF_evaluation_state.json",
help=("Path to a json file that stores the current evaluation state and "
"previous evaluation results"))
args, other_args = parser.parse_known_args()
if not os.path.isfile(args.bin_path):
print("Error: " + args.bin_path + " does not exist")
sys.exit(1)
if not os.path.exists(args.dataset_folder):
print("Error: " + args.dataset_folder + (" does not exist. Please, correctly "
"specify the -f parameter"))
sys.exit(1)
state = {}
if os.path.isfile(args.state):
state = load_json(args.state)
algorithm_list = parse_sequence(args.algorithms)
dataset_list = parse_sequence(args.datasets)
for dataset in dataset_list:
if dataset not in dataset_eval_functions.keys():
print("Error: unsupported dataset " + dataset)
sys.exit(1)
if dataset not in os.listdir(args.dataset_folder):
print("Error: " + os.path.join(args.dataset_folder, dataset) + (" does not exist. "
"Please, download the dataset and follow the naming conventions "
"(use -h for more information)"))
sys.exit(1)
for dataset in dataset_list:
if dataset not in state.keys():
state[dataset] = {}
for algorithm in algorithm_list:
if algorithm in state[dataset].keys():
last_eval_instance = state[dataset][algorithm][-1]
if "finished" not in last_eval_instance.keys():
print(("Continuing an unfinished evaluation of " +
algorithm + " started at " + last_eval_instance["timestamp"]))
else:
state[dataset][algorithm].append({"timestamp":
datetime.datetime.now().strftime("%Y-%m-%d--%H-%M")})
else:
state[dataset][algorithm] = [{"timestamp":
datetime.datetime.now().strftime("%Y-%m-%d--%H-%M")}]
save_json(state, args.state)
dataset_eval_functions[dataset](args.dataset_folder, algorithm, args.bin_path,
state, args.state)
state[dataset][algorithm][-1]["finished"] = True
save_json(state, args.state)
save_json(state, args.state)
create_dir(args.out)
for dataset in dataset_list:
build_chart(args.out, state, dataset)
|
import argparse
import glob
import os
import subprocess
def execute(cmd):
popen = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for stdout_line in iter(popen.stdout.readline, ''):
print(stdout_line.rstrip())
for stderr_line in iter(popen.stderr.readline, ''):
print(stderr_line.rstrip())
popen.stdout.close()
popen.stderr.close()
return_code = popen.wait()
if return_code != 0:
raise subprocess.CalledProcessError(return_code, cmd)
def main():
parser = argparse.ArgumentParser(
description='Train Global Patch Collider using Middlebury dataset')
parser.add_argument(
'--bin_path',
help='Path to the training executable (example_optflow_gpc_train)',
required=True)
parser.add_argument('--dataset_path',
help='Path to the directory with frames',
required=True)
parser.add_argument('--gt_path',
help='Path to the directory with ground truth flow',
required=True)
parser.add_argument('--descriptor_type',
help='Descriptor type',
type=int,
default=0)
args = parser.parse_args()
seq = glob.glob(os.path.join(args.dataset_path, '*'))
seq.sort()
input_files = []
for s in seq:
if os.path.isdir(s):
seq_name = os.path.basename(s)
frames = glob.glob(os.path.join(s, 'frame*.png'))
frames.sort()
assert (len(frames) == 2)
assert (os.path.basename(frames[0]) == 'frame10.png')
assert (os.path.basename(frames[1]) == 'frame11.png')
gt_flow = os.path.join(args.gt_path, seq_name, 'flow10.flo')
if os.path.isfile(gt_flow):
input_files += [frames[0], frames[1], gt_flow]
execute([args.bin_path, '--descriptor-type=%d' % args.descriptor_type] + input_files)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import numpy as np
import cv2 as cv
MHI_DURATION = 0.5
DEFAULT_THRESHOLD = 32
MAX_TIME_DELTA = 0.25
MIN_TIME_DELTA = 0.05
# (empty) trackbar callback
def nothing(dummy):
pass
def draw_motion_comp(vis, rect, angle, color):
x, y, w, h = rect
cv.rectangle(vis, (x, y), (x+w, y+h), (0, 255, 0))
r = min(w//2, h//2)
cx, cy = x+w//2, y+h//2
angle = angle*np.pi/180
cv.circle(vis, (cx, cy), r, color, 3)
cv.line(vis, (cx, cy), (int(cx+np.cos(angle)*r), int(cy+np.sin(angle)*r)), color, 3)
if __name__ == '__main__':
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
cv.namedWindow('motempl')
visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
cv.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
cv.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
cam = cv.VideoCapture(video_src)
if not cam.isOpened():
print("could not open video_src " + str(video_src) + " !\n")
sys.exit(1)
ret, frame = cam.read()
if ret == False:
print("could not read from " + str(video_src) + " !\n")
sys.exit(1)
h, w = frame.shape[:2]
prev_frame = frame.copy()
motion_history = np.zeros((h, w), np.float32)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[:,:,1] = 255
while True:
ret, frame = cam.read()
if ret == False:
break
frame_diff = cv.absdiff(frame, prev_frame)
gray_diff = cv.cvtColor(frame_diff, cv.COLOR_BGR2GRAY)
thrs = cv.getTrackbarPos('threshold', 'motempl')
ret, motion_mask = cv.threshold(gray_diff, thrs, 1, cv.THRESH_BINARY)
timestamp = cv.getTickCount() / cv.getTickFrequency()
cv.motempl.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
mg_mask, mg_orient = cv.motempl.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
seg_mask, seg_bounds = cv.motempl.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
visual_name = visuals[cv.getTrackbarPos('visual', 'motempl')]
if visual_name == 'input':
vis = frame.copy()
elif visual_name == 'frame_diff':
vis = frame_diff.copy()
elif visual_name == 'motion_hist':
vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR)
elif visual_name == 'grad_orient':
hsv[:,:,0] = mg_orient/2
hsv[:,:,2] = mg_mask*255
vis = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
x, y, rw, rh = rect
area = rw*rh
if area < 64**2:
continue
silh_roi = motion_mask [y:y+rh,x:x+rw]
orient_roi = mg_orient [y:y+rh,x:x+rw]
mask_roi = mg_mask [y:y+rh,x:x+rw]
mhi_roi = motion_history[y:y+rh,x:x+rw]
if cv.norm(silh_roi, cv.NORM_L1) < area*0.05:
continue
angle = cv.motempl.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
color = ((255, 0, 0), (0, 0, 255))[i == 0]
draw_motion_comp(vis, rect, angle, color)
cv.putText(vis, visual_name, (20, 20), cv.FONT_HERSHEY_PLAIN, 1.0, (200,0,0))
cv.imshow('motempl', vis)
prev_frame = frame.copy()
if 0xFF & cv.waitKey(5) == 27:
break
# cleanup the camera and close any open windows
cam.release()
cv.destroyAllWindows()
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import cv2 as cv
import struct
import argparse
from math import sqrt
argparser = argparse.ArgumentParser(
description='''Use this script to generate prior for using with PCAFlow.
Basis size here must match corresponding parameter in the PCAFlow.
Gamma should be selected experimentally.''')
argparser.add_argument('-f',
'--files',
nargs='+',
help='List of optical flow .flo files for learning. You can pass a directory here and it will be scanned recursively for .flo files.',
required=True)
argparser.add_argument('-o',
'--output',
help='Output file for prior',
required=True)
argparser.add_argument('--width',
type=int,
help='Size of the basis first dimension',
required=True,
default=18)
argparser.add_argument('--height',
type=int,
help='Size of the basis second dimension',
required=True,
default=14)
argparser.add_argument(
'-g',
'--gamma',
type=float,
help='Amount of regularization. The greater this parameter, the bigger will be an impact of the regularization.',
required=True)
args = argparser.parse_args()
basis_size = (args.height, args.width)
gamma = args.gamma
def find_flo(pp):
f = []
for p in pp:
if os.path.isfile(p):
f.append(p)
else:
for root, subdirs, files in os.walk(p):
f += map(lambda x: os.path.join(root, x),
filter(lambda x: x.split('.')[-1] == 'flo', files))
return list(set(f))
def load_flo(flo):
with open(flo, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)[0]
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
else:
w = np.fromfile(f, np.int32, count=1)[0]
h = np.fromfile(f, np.int32, count=1)[0]
print('Reading %dx%d flo file %s' % (w, h, flo))
data = np.fromfile(f, np.float32, count=2 * w * h)
# Reshape data into 3D array (columns, rows, bands)
flow = np.reshape(data, (h, w, 2))
return flow[:, :, 0], flow[:, :, 1]
def get_w(m):
s = m.shape
w = cv.dct(m)
w *= 2.0 / sqrt(s[0] * s[1])
#w[0,0] *= 0.5
w[:, 0] *= sqrt(0.5)
w[0, :] *= sqrt(0.5)
w = w[0:basis_size[0], 0:basis_size[1]].transpose().flatten()
return w
w1 = []
w2 = []
for flo in find_flo(args.files):
x, y = load_flo(flo)
w1.append(get_w(x))
w2.append(get_w(y))
w1mean = sum(w1) / len(w1)
w2mean = sum(w2) / len(w2)
for i in xrange(len(w1)):
w1[i] -= w1mean
for i in xrange(len(w2)):
w2[i] -= w2mean
Q1 = sum([w1[i].reshape(-1, 1).dot(w1[i].reshape(1, -1))
for i in xrange(len(w1))]) / len(w1)
Q2 = sum([w2[i].reshape(-1, 1).dot(w2[i].reshape(1, -1))
for i in xrange(len(w2))]) / len(w2)
Q1 = np.matrix(Q1)
Q2 = np.matrix(Q2)
if len(w1) > 1:
while True:
try:
L1 = np.linalg.cholesky(Q1)
break
except np.linalg.linalg.LinAlgError:
mev = min(np.linalg.eig(Q1)[0]).real
assert (mev < 0)
print('Q1', mev)
if -mev < 1e-6:
mev = -1e-6
Q1 += (-mev * 1.000001) * np.identity(Q1.shape[0])
while True:
try:
L2 = np.linalg.cholesky(Q2)
break
except np.linalg.linalg.LinAlgError:
mev = min(np.linalg.eig(Q2)[0]).real
assert (mev < 0)
print('Q2', mev)
if -mev < 1e-6:
mev = -1e-6
Q2 += (-mev * 1.000001) * np.identity(Q2.shape[0])
else:
L1 = np.identity(Q1.shape[0])
L2 = np.identity(Q2.shape[0])
L1 = np.linalg.inv(L1) * gamma
L2 = np.linalg.inv(L2) * gamma
assert (L1.shape == L2.shape)
assert (L1.shape[0] == L1.shape[1])
f = open(args.output, 'wb')
f.write(struct.pack('I', L1.shape[0]))
f.write(struct.pack('I', L1.shape[1]))
for i in xrange(L1.shape[0]):
for j in xrange(L1.shape[1]):
f.write(struct.pack('f', L1[i, j]))
for i in xrange(L2.shape[0]):
for j in xrange(L2.shape[1]):
f.write(struct.pack('f', L2[i, j]))
b1 = L1.dot(w1mean.reshape(-1, 1))
b2 = L2.dot(w2mean.reshape(-1, 1))
assert (L1.shape[0] == b1.shape[0])
for i in xrange(b1.shape[0]):
f.write(struct.pack('f', b1[i, 0]))
for i in xrange(b2.shape[0]):
f.write(struct.pack('f', b2[i, 0]))
f.close()
|
import argparse
import cv2 as cv
import glob
import numpy as np
import os
import time
# This tool is intended for evaluation of different background subtraction algorithms presented in OpenCV.
# Several presets with different settings are available. You can see them below.
# This tool measures quality metrics as well as speed.
ALGORITHMS_TO_EVALUATE = [
(cv.bgsegm.createBackgroundSubtractorMOG, 'MOG', {}),
(cv.bgsegm.createBackgroundSubtractorGMG, 'GMG', {}),
(cv.bgsegm.createBackgroundSubtractorCNT, 'CNT', {}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-vanilla', {'nSamples': 20, 'LSBPRadius': 4, 'Tlower': 2.0, 'Tupper': 200.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 5.0, 'Rincdec': 0.05, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-speed', {'nSamples': 10, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-quality', {'nSamples': 20, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-camera-motion-compensation', {'mc': 1}),
(cv.bgsegm.createBackgroundSubtractorGSOC, 'GSOC', {}),
(cv.bgsegm.createBackgroundSubtractorGSOC, 'GSOC-camera-motion-compensation', {'mc': 1})
]
def contains_relevant_files(root):
return os.path.isdir(os.path.join(root, 'groundtruth')) and os.path.isdir(os.path.join(root, 'input'))
def find_relevant_dirs(root):
relevant_dirs = []
for d in sorted(os.listdir(root)):
d = os.path.join(root, d)
if os.path.isdir(d):
if contains_relevant_files(d):
relevant_dirs += [d]
else:
relevant_dirs += find_relevant_dirs(d)
return relevant_dirs
def load_sequence(root):
gt_dir, frames_dir = os.path.join(root, 'groundtruth'), os.path.join(root, 'input')
gt = sorted(glob.glob(os.path.join(gt_dir, '*.png')))
f = sorted(glob.glob(os.path.join(frames_dir, '*.jpg')))
assert(len(gt) == len(f))
return gt, f
def evaluate_algorithm(gt, frames, algo, algo_arguments):
bgs = algo(**algo_arguments)
mask = []
t_start = time.time()
for i in range(len(gt)):
frame = np.uint8(cv.imread(frames[i], cv.IMREAD_COLOR))
mask.append(bgs.apply(frame))
average_duration = (time.time() - t_start) / len(gt)
average_precision, average_recall, average_f1, average_accuracy = [], [], [], []
for i in range(len(gt)):
gt_mask = np.uint8(cv.imread(gt[i], cv.IMREAD_GRAYSCALE))
roi = ((gt_mask == 255) | (gt_mask == 0))
if roi.sum() > 0:
gt_answer, answer = gt_mask[roi], mask[i][roi]
tp = ((answer == 255) & (gt_answer == 255)).sum()
tn = ((answer == 0) & (gt_answer == 0)).sum()
fp = ((answer == 255) & (gt_answer == 0)).sum()
fn = ((answer == 0) & (gt_answer == 255)).sum()
if tp + fp > 0:
average_precision.append(float(tp) / (tp + fp))
if tp + fn > 0:
average_recall.append(float(tp) / (tp + fn))
if tp + fn + fp > 0:
average_f1.append(2.0 * tp / (2.0 * tp + fn + fp))
average_accuracy.append(float(tp + tn) / (tp + tn + fp + fn))
return average_duration, np.mean(average_precision), np.mean(average_recall), np.mean(average_f1), np.mean(average_accuracy)
def evaluate_on_sequence(seq, summary):
gt, frames = load_sequence(seq)
category, video_name = os.path.basename(os.path.dirname(seq)), os.path.basename(seq)
print('=== %s:%s ===' % (category, video_name))
for algo, algo_name, algo_arguments in ALGORITHMS_TO_EVALUATE:
print('Algorithm name: %s' % algo_name)
sec_per_step, precision, recall, f1, accuracy = evaluate_algorithm(gt, frames, algo, algo_arguments)
print('Average accuracy: %.3f' % accuracy)
print('Average precision: %.3f' % precision)
print('Average recall: %.3f' % recall)
print('Average F1: %.3f' % f1)
print('Average sec. per step: %.4f' % sec_per_step)
print('')
if category not in summary:
summary[category] = {}
if algo_name not in summary[category]:
summary[category][algo_name] = []
summary[category][algo_name].append((precision, recall, f1, accuracy))
def main():
parser = argparse.ArgumentParser(description='Evaluate all background subtractors using Change Detection 2014 dataset')
parser.add_argument('--dataset_path', help='Path to the directory with dataset. It may contain multiple inner directories. It will be scanned recursively.', required=True)
parser.add_argument('--algorithm', help='Test particular algorithm instead of all.')
args = parser.parse_args()
dataset_dirs = find_relevant_dirs(args.dataset_path)
assert len(dataset_dirs) > 0, ("Passed directory must contain at least one sequence from the Change Detection dataset. There is no relevant directories in %s. Check that this directory is correct." % (args.dataset_path))
if args.algorithm is not None:
global ALGORITHMS_TO_EVALUATE
ALGORITHMS_TO_EVALUATE = filter(lambda a: a[1].lower() == args.algorithm.lower(), ALGORITHMS_TO_EVALUATE)
summary = {}
for seq in dataset_dirs:
evaluate_on_sequence(seq, summary)
for category in summary:
for algo_name in summary[category]:
summary[category][algo_name] = np.mean(summary[category][algo_name], axis=0)
for category in summary:
print('=== SUMMARY for %s (Precision, Recall, F1, Accuracy) ===' % category)
for algo_name in summary[category]:
print('%05s: %.3f %.3f %.3f %.3f' % ((algo_name,) + tuple(summary[category][algo_name])))
if __name__ == '__main__':
main()
|
import cv2 as cv
import argparse
def main():
argparser = argparse.ArgumentParser(description='Vizualization of the SyntheticSequenceGenerator.')
argparser.add_argument('-b', '--background', help='Background image.', required=True)
argparser.add_argument('-o', '--obj', help='Object image. It must be strictly smaller than background.', required=True)
args = argparser.parse_args()
bg = cv.imread(args.background)
obj = cv.imread(args.obj)
generator = cv.bgsegm.createSyntheticSequenceGenerator(bg, obj)
while True:
frame, mask = generator.getNextFrame()
cv.imshow('Generated frame', frame)
cv.imshow('Generated mask', mask)
k = cv.waitKey(int(1000.0 / 30))
if k == 27:
break
if __name__ == '__main__':
main()
|
import numpy as np
import cv2 as cv
import argparse
import os
def main():
argparser = argparse.ArgumentParser(description='Vizualization of the LSBP/GSOC background subtraction algorithm.')
argparser.add_argument('-g', '--gt', help='Directory with ground-truth frames', required=True)
argparser.add_argument('-f', '--frames', help='Directory with input frames', required=True)
argparser.add_argument('-l', '--lsbp', help='Display LSBP instead of GSOC', default=False)
args = argparser.parse_args()
gt = map(lambda x: os.path.join(args.gt, x), os.listdir(args.gt))
gt.sort()
f = map(lambda x: os.path.join(args.frames, x), os.listdir(args.frames))
f.sort()
gt = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_GRAYSCALE), gt))
f = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_COLOR), f))
if not args.lsbp:
bgs = cv.bgsegm.createBackgroundSubtractorGSOC()
else:
bgs = cv.bgsegm.createBackgroundSubtractorLSBP()
for i in xrange(f.shape[0]):
cv.imshow('Frame', f[i])
cv.imshow('Ground-truth', gt[i])
mask = bgs.apply(f[i])
bg = bgs.getBackgroundImage()
cv.imshow('BG', bg)
cv.imshow('Output mask', mask)
k = cv.waitKey(0)
if k == 27:
break
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests
class disparity_test(NewOpenCVTests):
def test_disp(self):
# readGT
ret,GT = cv.ximgproc.readGT(self.find_file("cv/disparityfilter/GT.png"))
self.assertEqual(ret, 0) # returns 0 on success!
self.assertFalse(np.shape(GT) == ())
# computeMSE
left = cv.imread(self.find_file("cv/disparityfilter/disparity_left_raw.png"), cv.IMREAD_UNCHANGED)
self.assertFalse(np.shape(left) == ())
left = np.asarray(left, dtype=np.int16)
mse = cv.ximgproc.computeMSE(GT, left, (0, 0, GT.shape[1], GT.shape[0]))
# computeBadPixelPercent
bad = cv.ximgproc.computeBadPixelPercent(GT, left, (0, 0, GT.shape[1], GT.shape[0]), 24)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
This sample demonstrates structured edge detection and edgeboxes.
Usage:
edgeboxes_demo.py [<model>] [<input_image>]
'''
import cv2 as cv
import numpy as np
import sys
if __name__ == '__main__':
print(__doc__)
model = sys.argv[1]
im = cv.imread(sys.argv[2])
edge_detection = cv.ximgproc.createStructuredEdgeDetection(model)
rgb_im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
edges = edge_detection.detectEdges(np.float32(rgb_im) / 255.0)
orimap = edge_detection.computeOrientation(edges)
edges = edge_detection.edgesNms(edges, orimap)
edge_boxes = cv.ximgproc.createEdgeBoxes()
edge_boxes.setMaxBoxes(30)
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
boxes, scores = edge_boxes.getBoundingBoxes(edges, orimap)
if len(boxes) > 0:
boxes_scores = zip(boxes, scores)
for b_s in boxes_scores:
box = b_s[0]
x, y, w, h = box
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
score = b_s[1][0]
cv.putText(im, "{:.2f}".format(score), (x, y), cv.FONT_HERSHEY_PLAIN, 0.8, (255, 255, 255), 1, cv.LINE_AA)
print("Box at (x,y)=({:d},{:d}); score={:f}".format(x, y, score))
cv.imshow("edges", edges)
cv.imshow("edgeboxes", im)
cv.waitKey(0)
cv.destroyAllWindows()
|
# USAGE - How to run this code ?
# python find_shapes.py --image shapes.png
#python findredlinedpolygonfromgooglemaps.py --image stanford.png
import numpy as np
import argparse
import cv2 as cv
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help = "path to the image file")
args = vars(ap.parse_args())
# load the image
image = cv.imread(args["image"])
lower = np.array([20,0,155])
upper = np.array([255,120,250])
shapeMask = cv.inRange(image, lower, upper)
# find the contours in the mask
(cnts, _) = cv.findContours(shapeMask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cv.imshow("Mask", shapeMask)
# loop over the contours
for c in cnts:
cv.drawContours(image, [c], -1, (0, 255, 0), 2)
cv.imshow("Image", image)
cv.waitKey(0) |
import sys
import numpy as np
import cv2 as cv
def AddSlider(sliderName,windowName,minSlider,maxSlider,valDefault, update=[]):
if update is None:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1)
else:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1, update)
cv.setTrackbarMin(sliderName, windowName, minSlider)
cv.setTrackbarMax(sliderName, windowName, maxSlider)
cv.setTrackbarPos(sliderName, windowName, valDefault)
class Filtrage:
def __init__(self):
self.s =0
self.alpha = 100
self.omega = 100
self.updateFiltre=True
self.img=[]
self.dximg=[]
self.dyimg=[]
self.module=[]
def DericheFilter(self):
self.dximg = cv.ximgproc.GradientDericheX( self.img, self.alpha/100., self.omega/1000. )
self.dyimg = cv.ximgproc.GradientDericheY( self.img, self.alpha/100., self.omega/1000. )
dx2=self.dximg*self.dximg
dy2=self.dyimg*self.dyimg
self.module = np.sqrt(dx2+dy2)
cv.normalize(src=self.module,dst=self.module,norm_type=cv.NORM_MINMAX)
def SlideBarDeriche(self):
cv.destroyWindow(self.filename)
cv.namedWindow(self.filename)
AddSlider("alpha",self.filename,1,400,self.alpha,self.UpdateAlpha)
AddSlider("omega",self.filename,1,1000,self.omega,self.UpdateOmega)
def UpdateOmega(self,x ):
self.updateFiltre=True
self.omega=x
def UpdateAlpha(self,x ):
self.updateFiltre=True
self.alpha=x
def run(self,argv):
# Load the source image
self.filename = argv[0] if len(argv) > 0 else "../doc/pics/corridor_fld.jpg"
self.img=cv.imread(self.filename,cv.IMREAD_GRAYSCALE)
if self.img is None:
print ('cannot read file')
return
self.SlideBarDeriche()
while True:
cv.imshow(self.filename,self.img)
if self.updateFiltre:
self.DericheFilter()
cv.imshow("module",self.module)
self.updateFiltre =False
code = cv.waitKey(10)
if code==27:
break
if __name__ == '__main__':
Filtrage().run(sys.argv[1:])
|
#!/usr/bin/env python
'''
A program demonstrating the use and capabilities of a particular image segmentation algorithm described
in Jasper R. R. Uijlings, Koen E. A. van de Sande, Theo Gevers, Arnold W. M. Smeulders:
"Selective Search for Object Recognition"
International Journal of Computer Vision, Volume 104 (2), page 154-171, 2013
Usage:
./selectivesearchsegmentation_demo.py input_image (single|fast|quality)
Use "a" to display less rects, 'd' to display more rects, "q" to quit.
'''
import cv2 as cv
import sys
if __name__ == '__main__':
img = cv.imread(sys.argv[1])
cv.setUseOptimized(True)
cv.setNumThreads(8)
gs = cv.ximgproc.segmentation.createSelectiveSearchSegmentation()
gs.setBaseImage(img)
if (sys.argv[2][0] == 's'):
gs.switchToSingleStrategy()
elif (sys.argv[2][0] == 'f'):
gs.switchToSelectiveSearchFast()
elif (sys.argv[2][0] == 'q'):
gs.switchToSelectiveSearchQuality()
else:
print(__doc__)
sys.exit(1)
rects = gs.process()
nb_rects = 10
while True:
wimg = img.copy()
for i in range(len(rects)):
if (i < nb_rects):
x, y, w, h = rects[i]
cv.rectangle(wimg, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
cv.imshow("Output", wimg);
c = cv.waitKey()
if (c == 100):
nb_rects += 10
elif (c == 97 and nb_rects > 10):
nb_rects -= 10
elif (c == 113):
break
cv.destroyAllWindows()
|
import numpy as np
import cv2 as cv
import math
class ThParameters:
def __init__(self):
self.levelNoise=6
self.angle=45
self.scale10=5
self.origin=10
self.xg=150
self.yg=150
self.update=True
def UpdateShape(x ):
p.update = True
def union(a,b):
x = min(a[0], b[0])
y = min(a[1], b[1])
w = max(a[0]+a[2], b[0]+b[2]) - x
h = max(a[1]+a[3], b[1]+b[3]) - y
return (x, y, w, h)
def intersection(a,b):
x = max(a[0], b[0])
y = max(a[1], b[1])
w = min(a[0]+a[2], b[0]+b[2]) - x
h = min(a[1]+a[3], b[1]+b[3]) - y
if w<0 or h<0: return () # or (0,0,0,0) ?
return (x, y, w, h)
def NoisyPolygon(pRef,n):
# vector<Point> c
p = pRef;
# vector<vector<Point> > contour;
p = p+n*np.random.random_sample((p.shape[0],p.shape[1]))-n/2.0
if (n==0):
return p
c = np.empty(shape=[0, 2])
minX = p[0][0]
maxX = p[0][0]
minY = p[0][1]
maxY = p[0][1]
for i in range( 0,p.shape[0]):
next = i + 1;
if (next == p.shape[0]):
next = 0;
u = p[next] - p[i]
d = int(cv.norm(u))
a = np.arctan2(u[1], u[0])
step = 1
if (n != 0):
step = d // n
for j in range( 1,int(d),int(max(step, 1))):
while True:
pAct = (u*j) / (d)
r = n*np.random.random_sample()
theta = a + 2*math.pi*np.random.random_sample()
# pNew = Point(Point2d(r*cos(theta) + pAct.x + p[i].x, r*sin(theta) + pAct.y + p[i].y));
pNew = np.array([(r*np.cos(theta) + pAct[0] + p[i][0], r*np.sin(theta) + pAct[1] + p[i][1])])
if (pNew[0][0]>=0 and pNew[0][1]>=0):
break
if (pNew[0][0]<minX):
minX = pNew[0][0]
if (pNew[0][0]>maxX):
maxX = pNew[0][0]
if (pNew[0][1]<minY):
minY = pNew[0][1]
if (pNew[0][1]>maxY):
maxY = pNew[0][1]
c = np.append(c,pNew,axis = 0)
return c
#static vector<Point> NoisyPolygon(vector<Point> pRef, double n);
#static void UpdateShape(int , void *r);
#static void AddSlider(String sliderName, String windowName, int minSlider, int maxSlider, int valDefault, int *valSlider, void(*f)(int, void *), void *r);
def AddSlider(sliderName,windowName,minSlider,maxSlider,valDefault, update):
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1, update)
cv.setTrackbarMin(sliderName, windowName, minSlider)
cv.setTrackbarMax(sliderName, windowName, maxSlider)
cv.setTrackbarPos(sliderName, windowName, valDefault)
# vector<Point> ctrRef;
# vector<Point> ctrRotate, ctrNoisy, ctrNoisyRotate, ctrNoisyRotateShift;
# // build a shape with 5 vertex
ctrRef = np.array([(250,250),(400, 250),(400, 300),(250, 300),(180, 270)])
cg = np.mean(ctrRef,axis=0)
p=ThParameters()
cv.namedWindow("FD Curve matching");
# A rotation with center at (150,150) of angle 45 degrees and a scaling of 5/10
AddSlider("Noise", "FD Curve matching", 0, 20, p.levelNoise, UpdateShape)
AddSlider("Angle", "FD Curve matching", 0, 359, p.angle, UpdateShape)
AddSlider("Scale", "FD Curve matching", 5, 100, p.scale10, UpdateShape)
AddSlider("Origin", "FD Curve matching", 0, 100, p.origin, UpdateShape)
AddSlider("Xg", "FD Curve matching", 150, 450, p.xg, UpdateShape)
AddSlider("Yg", "FD Curve matching", 150, 450, p.yg, UpdateShape)
code = 0
img = np.zeros((300,512,3), np.uint8)
print ("******************** PRESS g TO MATCH CURVES *************\n")
while (code!=27):
code = cv.waitKey(60)
if p.update:
p.levelNoise=cv.getTrackbarPos('Noise','FD Curve matching')
p.angle=cv.getTrackbarPos('Angle','FD Curve matching')
p.scale10=cv.getTrackbarPos('Scale','FD Curve matching')
p.origin=cv.getTrackbarPos('Origin','FD Curve matching')
p.xg=cv.getTrackbarPos('Xg','FD Curve matching')
p.yg=cv.getTrackbarPos('Yg','FD Curve matching')
r = cv.getRotationMatrix2D((p.xg, p.yg), angle=p.angle, scale=10.0/ p.scale10);
ctrNoisy= NoisyPolygon(ctrRef,p.levelNoise)
ctrNoisy1 = np.reshape(ctrNoisy,(ctrNoisy.shape[0],1,2))
ctrNoisyRotate = cv.transform(ctrNoisy1,r)
ctrNoisyRotateShift = np.empty([ctrNoisyRotate.shape[0],1,2],dtype=np.int32)
for i in range(0,ctrNoisy.shape[0]):
k=(i+(p.origin*ctrNoisy.shape[0])//100)% ctrNoisyRotate.shape[0]
ctrNoisyRotateShift[i] = ctrNoisyRotate[k]
# To draw contour using drawcontours
cc= np.reshape(ctrNoisyRotateShift,[ctrNoisyRotateShift.shape[0],2])
c = [ ctrRef,cc]
p.update = False;
rglobal =(0,0,0,0)
for i in range(0,2):
r = cv.boundingRect(c[i])
rglobal = union(rglobal,r)
r = list(rglobal)
r[2] = r[2]+10
r[3] = r[3]+10
rglobal = tuple(r)
img = np.zeros((2 * rglobal[3], 2 * rglobal[2], 3), np.uint8)
cv.drawContours(img, c, 0, (255,0,0),1);
cv.drawContours(img, c, 1, (0, 255, 0),1);
cv.circle(img, tuple(c[0][0]), 5, (255, 0, 0),3);
cv.circle(img, tuple(c[1][0]), 5, (0, 255, 0),3);
cv.imshow("FD Curve matching", img);
if code == ord('d') :
cv.destroyWindow("FD Curve matching");
cv.namedWindow("FD Curve matching");
# A rotation with center at (150,150) of angle 45 degrees and a scaling of 5/10
AddSlider("Noise", "FD Curve matching", 0, 20, p.levelNoise, UpdateShape)
AddSlider("Angle", "FD Curve matching", 0, 359, p.angle, UpdateShape)
AddSlider("Scale", "FD Curve matching", 5, 100, p.scale10, UpdateShape)
AddSlider("Origin%%", "FD Curve matching", 0, 100, p.origin, UpdateShape)
AddSlider("Xg", "FD Curve matching", 150, 450, p.xg, UpdateShape)
AddSlider("Yg", "FD Curve matching", 150, 450, p.yg, UpdateShape)
if code == ord('g'):
fit = cv.ximgproc.createContourFitting(1024,16);
# sampling contour we want 256 points
cn= np.reshape(ctrRef,[ctrRef.shape[0],1,2])
ctrRef2d = cv.ximgproc.contourSampling(cn, 256)
ctrRot2d = cv.ximgproc.contourSampling(ctrNoisyRotateShift, 256)
fit.setFDSize(16)
c1 = ctrRef2d
c2 = ctrRot2d
alphaPhiST, dist = fit.estimateTransformation(ctrRot2d, ctrRef2d)
print( "Transform *********\n Origin = ", 1-alphaPhiST[0,0] ," expected ", p.origin / 100. ,"\n")
print( "Angle = ", alphaPhiST[0,1] * 180 / math.pi ," expected " , p.angle,"\n")
print( "Scale = " ,alphaPhiST[0,2] ," expected " , p.scale10 / 10.0 , "\n")
dst = cv.ximgproc.transformFD(ctrRot2d, alphaPhiST,cn, False);
ctmp= np.reshape(dst,[dst.shape[0],2])
cdst=ctmp.astype(int)
c = [ ctrRef,cc,cdst]
cv.drawContours(img, c, 2, (0,0,255),1);
cv.circle(img, (int(c[2][0][0]),int(c[2][0][1])), 5, (0, 0, 255),5);
cv.imshow("FD Curve matching", img);
|
import collections
from textwrap import fill
from filters import *
try:
# Python 2.7+
basestring
except NameError:
# Python 3.3+
basestring = str
valid_types = (
'int', 'bool', 'float', 'double', 'size_t', 'char',
'Mat', 'Scalar', 'String',
'TermCriteria', 'Size', 'Point', 'Point2f', 'Point2d', 'Rect', 'RotatedRect',
'RNG', 'DMatch', 'Moments',
'vector_Mat', 'vector_Point', 'vector_int', 'vector_float', 'vector_double', 'vector_String', 'vector_uchar', 'vector_Rect', 'vector_DMatch', 'vector_KeyPoint',
'vector_Point2f', 'vector_vector_char', 'vector_vector_DMatch', 'vector_vector_KeyPoint',
'Ptr_StereoBM', 'Ptr_StereoSGBM', 'Ptr_FeatureDetector', 'Ptr_CLAHE', 'Ptr_LineSegmentDetector', 'Ptr_AlignMTB', 'Ptr_CalibrateDebevec',
'Ptr_CalibrateRobertson', 'Ptr_DenseOpticalFlow', 'Ptr_DualTVL1OpticalFlow', 'Ptr_MergeDebevec', 'Ptr_MergeMertens', 'Ptr_MergeRobertson',
'Ptr_Stitcher', 'Ptr_Tonemap', 'Ptr_TonemapDrago', 'Ptr_TonemapDurand', 'Ptr_TonemapMantiuk', 'Ptr_TonemapReinhard', 'Ptr_float',
# Not supported:
#vector_vector_KeyPoint
)
class ParseTree(object):
"""
The ParseTree class produces a semantic tree of C++ definitions given
the output of the CppHeaderParser (from opencv/modules/python/src2/hdr_parser.py)
The full hierarchy is as follows:
Namespaces
|
|- name
|- Classes
|
|- name
|- Methods
|- Constants
|- Methods
|
|- name
|- static (T/F)
|- return type
|- required Arguments
|
|- name
|- const (T/F)
|- reference ('&'/'*')
|- type
|- input
|- output (pass return by reference)
|- default value
|- optional Arguments
|- Constants
|
|- name
|- const (T/F)
|- reference ('&'/'*')
|- type
|- value
The semantic tree contains substantial information for easily introspecting
information about objects. How many methods does the 'core' namespace have?
Does the 'randn' method have any return by reference (output) arguments?
How many required and optional arguments does the 'add' method have? Is the
variable passed by reference or raw pointer?
Individual definitions from the parse tree (Classes, Functions, Constants)
are passed to the Jinja2 template engine where they are manipulated to
produce Matlab mex sources.
A common call tree for constructing and using a ParseTree object is:
# parse a set of definitions into a dictionary of namespaces
parser = CppHeaderParser()
ns['core'] = parser.parse('path/to/opencv/core.hpp')
# refactor into a semantic tree
parse_tree = ParseTree()
parse_tree.build(ns)
# iterate over the tree
for namespace in parse_tree.namespaces:
for clss in namespace.classes:
# do stuff
for method in namespace.methods:
# do stuff
Calling 'print' on a ParseTree object will reconstruct the definitions
to produce an output resembling the original C++ code.
"""
def __init__(self, namespaces=None):
self.namespaces = namespaces if namespaces else []
def __str__(self):
return '\n\n\n'.join(ns.__str__() for ns in self.namespaces)
def build(self, namespaces):
babel = Translator()
for name, definitions in namespaces.items():
class_tree = {}
methods = []
constants = []
for defn in definitions:
try:
obj = babel.translate(defn)
except Exception as e:
print(e)
obj = None
if obj is None:
continue
if type(obj) is Class or obj.clss:
self.insertIntoClassTree(obj, class_tree)
elif type(obj) is Method:
methods.append(obj)
elif type(obj) is Constant:
constants.append(obj)
else:
raise TypeError('Unexpected object type: '+str(type(obj)))
self.namespaces.append(Namespace(name, constants, list(class_tree.values()), methods))
def insertIntoClassTree(self, obj, class_tree):
cname = obj.name if type(obj) is Class else obj.clss
if not cname:
return
if not cname in class_tree:
# add a new class to the tree
class_tree[cname] = Class(cname)
# insert the definition into the class
val = class_tree[cname]
if type(obj) is Method:
val.methods.append(obj)
elif type(obj) is Constant:
val.constants.append(obj)
else:
raise TypeError('Unexpected object type: '+str(type(obj)))
class Translator(object):
"""
The Translator class does the heavy lifting of translating the nested
list representation of the hdr_parser into individual definitions that
are inserted into the ParseTree.
Translator consists of a top-level method: translate()
along with a number of helper methods: translateClass(), translateMethod(),
translateArgument(), translateConstant(), translateName(), and
translateClassName()
"""
def translate(self, defn):
# --- class ---
# classes have 'class' prefixed on their name
if 'class' in defn[0].split(' ') or 'struct' in defn[0].split(' '):
return self.translateClass(defn)
# --- operators! ---
#TODO: implement operators: http://www.mathworks.com.au/help/matlab/matlab_oop/implementing-operators-for-your-class.html
if 'operator' in defn[0]:
return
# --- constant ---
elif convertibleToInt(defn[1]):
return self.translateConstant(defn)
# --- function ---
# functions either need to have input arguments, or not uppercase names
elif defn[3] or not self.translateName(defn[0]).split('_')[0].isupper():
return self.translateMethod(defn)
# --- constant ---
else:
return self.translateConstant(defn)
def translateClass(self, defn):
return Class()
def translateMethod(self, defn, class_tree=None):
name = self.translateName(defn[0])
clss = self.translateClassName(defn[0])
rtp = defn[1]
static = True if 'S' in ''.join(defn[2]) else False
args = defn[3]
req = []
opt = []
for arg in args:
if arg:
a = self.translateArgument(arg)
opt.append(a) if a.default else req.append(a)
return Method(name, clss, static, '', rtp, False, req, opt)
def translateConstant(self, defn):
const = True if 'const' in defn[0] else False
name = self.translateName(defn[0])
clss = self.translateClassName(defn[0])
tp = 'int'
val = defn[1]
return Constant(name, clss, tp, const, '', val)
def translateArgument(self, defn):
modifiers = defn[3]
ref = '*' if '*' in defn[0] else ''
ref = '&' if '&' in defn[0] or '/Ref' in modifiers else ref
const = '/C' in modifiers
tp = " ".join([word for word in defn[0].replace(ref, '').split() if not ' const ' in ' '+word+' '])
name = defn[1]
default = defn[2] if defn[2] else ''
I = True if '/I' in modifiers or not '/O' in modifiers else False
O = True if '/O' in modifiers else False
return Argument(name, tp, const, I, O, ref, default)
def translateName(self, name):
return name.split(' ')[-1].split('.')[-1]
def translateClassName(self, name):
name = name.split(' ')[-1]
parts = name.split('.')
return parts[-2] if len(parts) > 1 and not parts[-2] == 'cv' else ''
class Namespace(object):
"""
Namespace
|
|- name
|- Constants
|- Methods
|- Constants
"""
def __init__(self, name='', constants=None, classes=None, methods=None):
self.name = name
self.constants = constants if constants else []
self.classes = classes if classes else []
self.methods = methods if methods else []
def __str__(self):
return 'namespace '+self.name+' {\n\n'+\
('\n'.join(c.__str__() for c in self.constants)+'\n\n' if self.constants else '')+\
('\n'.join(f.__str__() for f in self.methods)+'\n\n' if self.methods else '')+\
('\n\n'.join(o.__str__() for o in self.classes) if self.classes else '')+'\n};'
class Class(object):
"""
Class
|
|- name
|- Methods
|- Constants
"""
def __init__(self, name='', namespace='', constants=None, methods=None):
self.name = name
self.namespace = namespace
self.constants = constants if constants else []
self.methods = methods if methods else []
def __str__(self):
return 'class '+self.name+' {\n\t'+\
('\n\t'.join(c.__str__() for c in self.constants)+'\n\n\t' if self.constants else '')+\
('\n\t'.join(f.__str__() for f in self.methods) if self.methods else '')+'\n};'
class Method(object):
"""
Method
int VideoWriter::read( cv::Mat& frame, const cv::Mat& mask=cv::Mat() );
--- ----- ---- -------- ----------------
rtp class name required optional
name the method name
clss the class the method belongs to ('' if free)
static static?
namespace the namespace the method belongs to ('' if free)
rtp the return type
const const?
req list of required arguments
opt list of optional arguments
"""
def __init__(self, name='', clss='', static=False, namespace='', rtp='', const=False, req=None, opt=None):
self.name = name
self.clss = clss
self.constructor = True if name == clss else False
self.static = static
self.const = const
self.namespace = namespace
self.rtp = rtp
self.req = req if req else []
self.opt = opt if opt else []
def __str__(self):
return (self.rtp+' ' if self.rtp else '')+self.name+'('+\
', '.join(arg.__str__() for arg in self.req+self.opt)+\
')'+(' const' if self.const else '')+';'
class Argument(object):
"""
Argument
const cv::Mat& mask=cv::Mat()
----- ---- --- ---- -------
const tp ref name default
name the argument name
tp the argument type
const const?
I is the argument treated as an input?
O is the argument treated as an output (return by reference)
ref is the argument passed by reference? ('*'/'&')
default the default value of the argument ('' if required)
"""
def __init__(self, name='', tp='', const=False, I=True, O=False, ref='', default=''):
self.name = name
self.tp = tp
self.ref = ref
self.I = I
self.O = O
self.const = const
self.default = default
if not tp in valid_types:
raise Exception("Non-supported argument type: {} (name: {})".format(tp, name))
def __str__(self):
return ('const ' if self.const else '')+self.tp+self.ref+\
' '+self.name+('='+self.default if self.default else '')
class Constant(object):
"""
Constant
DFT_COMPLEX_OUTPUT = 12;
---- -------
name default
name the name of the constant
clss the class that the constant belongs to ('' if free)
tp the type of the constant ('' if int)
const const?
ref is the constant a reference? ('*'/'&')
default default value, required for constants
"""
def __init__(self, name='', clss='', tp='', const=False, ref='', default=''):
self.name = name
self.clss = clss
self.tp = tp
self.ref = ref
self.const = const
self.default = default
def __str__(self):
return ('const ' if self.const else '')+self.tp+self.ref+\
' '+self.name+('='+self.default if self.default else '')+';'
def constants(tree):
"""
recursive generator to strip all Constant objects from the ParseTree
and place them into a flat dictionary of { name, value (default) }
"""
if isinstance(tree, dict) and 'constants' in tree and isinstance(tree['constants'], list):
for node in tree['constants']:
yield (node['name'], node['default'])
if isinstance(tree, dict):
for key, val in tree.items():
for gen in constants(val):
yield gen
if isinstance(tree, list):
for val in tree:
for gen in constants(val):
yield gen
def todict(obj):
"""
Recursively convert a Python object graph to sequences (lists)
and mappings (dicts) of primitives (bool, int, float, string, ...)
"""
if isinstance(obj, basestring):
return obj
elif isinstance(obj, dict):
return dict((key, todict(val)) for key, val in obj.items())
elif isinstance(obj, collections.Iterable):
return [todict(val) for val in obj]
elif hasattr(obj, '__dict__'):
return todict(vars(obj))
elif hasattr(obj, '__slots__'):
return todict(dict((name, getattr(obj, name)) for name in getattr(obj, '__slots__')))
return obj
|
#!/usr/bin/env python
import sys, re, os, time
from string import Template
from parse_tree import ParseTree, todict, constants
from filters import *
updated_files = []
def update_file(fname, content):
if fname in updated_files:
print('ERROR(gen_matlab.py): attemption to write file multiple times: {}'.format(fname))
return
updated_files.append(fname)
if os.path.exists(fname):
with open(fname, 'rb') as f:
old_content = f.read()
if old_content == content:
#print('Up-to-date: {}'.format(fname))
return
print('Updating: {}'.format(fname))
else:
print('Writing: {}'.format(fname))
with open(fname, 'wb') as f:
f.write(content)
class MatlabWrapperGenerator(object):
"""
MatlabWrapperGenerator is a class for generating Matlab mex sources from
a set of C++ headers. MatlabWrapperGenerator objects can be default
constructed. Given an instance, the gen() method performs the translation.
"""
def gen(self, module_roots, modules, extras, output_dir):
"""
Generate a set of Matlab mex source files by parsing exported symbols
in a set of C++ headers. The headers can be input in one (or both) of
two methods:
1. specify module_root and modules
Given a path to the OpenCV module root and a list of module names,
the headers to parse are implicitly constructed.
2. specifiy header locations explicitly in extras
Each element in the list of extras must be of the form:
'namespace=/full/path/to/extra/header.hpp' where 'namespace' is
the namespace in which the definitions should be added.
The output_dir specifies the directory to write the generated sources
to.
"""
# dynamically import the parsers
from jinja2 import Environment, FileSystemLoader
import hdr_parser
# parse each of the files and store in a dictionary
# as a separate "namespace"
parser = hdr_parser.CppHeaderParser()
ns = dict((key, []) for key in modules)
path_template = Template('${module}/include/opencv2/${module}.hpp')
for module in modules:
for module_root in module_roots:
# construct a header path from the module root and a path template
header = os.path.join(module_root, path_template.substitute(module=module))
if os.path.isfile(header):
break
else:
raise Exception('no header found for module %s!' % module)
# parse the definitions
ns[module] = parser.parse(header)
for extra in extras:
module = extra.split("=")[0]
header = extra.split("=")[1]
ns[module] = ns[module] + parser.parse(header) if module in ns else parser.parse(header)
# cleanify the parser output
parse_tree = ParseTree()
parse_tree.build(ns)
# setup the template engine
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
# add the custom filters
jtemplate.filters['formatMatlabConstant'] = formatMatlabConstant
jtemplate.filters['convertibleToInt'] = convertibleToInt
jtemplate.filters['toUpperCamelCase'] = toUpperCamelCase
jtemplate.filters['toLowerCamelCase'] = toLowerCamelCase
jtemplate.filters['toUnderCase'] = toUnderCase
jtemplate.filters['matlabURL'] = matlabURL
jtemplate.filters['stripTags'] = stripTags
jtemplate.filters['filename'] = filename
jtemplate.filters['comment'] = comment
jtemplate.filters['inputs'] = inputs
jtemplate.filters['ninputs'] = ninputs
jtemplate.filters['outputs'] = outputs
jtemplate.filters['noutputs'] = noutputs
jtemplate.filters['qualify'] = qualify
jtemplate.filters['slugify'] = slugify
jtemplate.filters['only'] = only
jtemplate.filters['void'] = void
jtemplate.filters['not'] = flip
# load the templates
tfunction = jtemplate.get_template('template_function_base.cpp')
tclassm = jtemplate.get_template('template_class_base.m')
tclassc = jtemplate.get_template('template_class_base.cpp')
tconst = jtemplate.get_template('template_map_base.m')
# create the build directory
output_source_dir = output_dir+'/src'
output_private_dir = output_source_dir+'/private'
output_class_dir = output_dir+'/+cv'
output_map_dir = output_dir+'/map'
if not os.path.isdir(output_source_dir):
os.makedirs(output_source_dir)
if not os.path.isdir(output_private_dir):
os.makedirs(output_private_dir)
if not os.path.isdir(output_class_dir):
os.makedirs(output_class_dir)
if not os.path.isdir(output_map_dir):
os.makedirs(output_map_dir)
# populate templates
for namespace in parse_tree.namespaces:
# functions
for method in namespace.methods:
populated = tfunction.render(fun=method, time=time, includes=namespace.name)
update_file(output_source_dir+'/'+method.name+'.cpp', populated.encode('utf-8'))
# classes
for clss in namespace.classes:
# cpp converter
populated = tclassc.render(clss=clss, time=time)
update_file(output_private_dir+'/'+clss.name+'Bridge.cpp', populated.encode('utf-8'))
# matlab classdef
populated = tclassm.render(clss=clss, time=time)
update_file(output_class_dir+'/'+clss.name+'.m', populated.encode('utf-8'))
# create a global constants lookup table
const = dict(constants(todict(parse_tree.namespaces)))
populated = tconst.render(constants=const, time=time)
update_file(output_dir+'/cv.m', populated.encode('utf-8'))
if __name__ == "__main__":
"""
Usage: python gen_matlab.py
--hdrparser /path/to/hdr_parser/dir
--moduleroot [ /path/to/opencv/modules /path/to/opencv_contrib/modules etc ]
--modules [core imgproc objdetect etc]
--extra namespace=/path/to/extra/header.hpp
--outdir /path/to/output/generated/srcs
gen_matlab.py is the main control script for generating matlab source
files from given set of headers. Internally, gen_matlab:
1. constructs the headers to parse from the module root and list of modules
2. parses the headers using CppHeaderParser
3. refactors the definitions using ParseTree
4. populates the templates for classes, function, enums from the
definitions
gen_matlab.py requires the following inputs:
--hdrparser the path to the header parser directory
(opencv/modules/python/src2)
--moduleroot (optional) paths to the opencv directories containing the modules
--modules (optional - required if --moduleroot specified) the modules
to produce bindings for. The path to the include directories
as well as the namespaces are constructed from the modules
and the moduleroot
--extra extra headers explicitly defined to parse. This must be in
the format "namepsace=/path/to/extra/header.hpp". For example,
the core module requires the extra header:
"core=/opencv/modules/core/include/opencv2/core/core/base.hpp"
--outdir the output directory to put the generated matlab sources. In
the OpenCV build this is "${CMAKE_CURRENT_BUILD_DIR}/src"
"""
# parse the input options
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--hdrparser')
parser.add_argument('--moduleroot', nargs='*', default=[], required=False)
parser.add_argument('--modules', nargs='*', default=[], required=False)
parser.add_argument('--extra', nargs='*', default=[], required=False)
parser.add_argument('--outdir')
args = parser.parse_args()
# add the hdr_parser module to the path
sys.path.append(args.hdrparser)
# create the generator
mwg = MatlabWrapperGenerator()
mwg.gen(args.moduleroot, args.modules, args.extra, args.outdir)
|
#!/usr/bin/env python
def substitute(cv, output_dir):
# setup the template engine
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
# add the filters
jtemplate.filters['cellarray'] = cellarray
jtemplate.filters['split'] = split
jtemplate.filters['csv'] = csv
# load the template
template = jtemplate.get_template('template_cvmex_base.m')
# create the build directory
output_dir = output_dir+'/+cv'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# populate template
populated = template.render(cv=cv, time=time)
with open(os.path.join(output_dir, 'mex.m'), 'wb') as f:
f.write(populated.encode('utf-8'))
if __name__ == "__main__":
"""
Usage: python cvmex.py
--opts [-list -of -opts]
--include_dirs [-list -of -opencv_include_directories]
--lib_dir opencv_lib_directory
--libs [-lopencv_core -lopencv_imgproc ...]
--flags [-Wall -opencv_build_flags ...]
--outdir /path/to/generated/output
cvmex.py generates a custom mex compiler that automatically links OpenCV
libraries to built sources where appropriate. The calling syntax is the
same as the builtin mex compiler, with added cv qualification:
>> cv.mex(..., ...);
"""
# parse the input options
import sys, re, os, time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--opts')
parser.add_argument('--include_dirs')
parser.add_argument('--lib_dir')
parser.add_argument('--libs')
parser.add_argument('--flags')
parser.add_argument('--outdir')
cv = parser.parse_args()
from filters import *
from jinja2 import Environment, FileSystemLoader
# populate the mex base template
substitute(cv, cv.outdir)
|
#!/usr/bin/env python
def substitute(build, output_dir):
# setup the template engine
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jtemplate = Environment(loader=FileSystemLoader(template_dir), trim_blocks=True, lstrip_blocks=True)
# add the filters
jtemplate.filters['csv'] = csv
jtemplate.filters['stripExtraSpaces'] = stripExtraSpaces
# load the template
template = jtemplate.get_template('template_build_info.m')
# create the build directory
output_dir = output_dir+'/+cv'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# populate template
populated = template.render(build=build, time=time)
with open(os.path.join(output_dir, 'buildInformation.m'), 'wb') as f:
f.write(populated.encode('utf-8'))
if __name__ == "__main__":
"""
Usage: python build_info.py
--os os_version_string
--arch [bitness processor]
--compiler [id version]
--mex_arch arch_string
--mex_script /path/to/mex/script
--cxx_flags [-list -of -flags -to -passthrough]
--opencv_version version_string
--commit commit_hash_if_using_git
--modules [core imgproc highgui etc]
--configuration Debug/Release
--outdir /path/to/write/build/info
build_info.py generates a Matlab function that can be invoked with a call to
>> cv.buildInformation();
This function prints a summary of the user's OS, OpenCV and Matlab build
given the information passed to this module. build_info.py invokes Jinja2
on the template_build_info.m template.
"""
# parse the input options
import sys, re, os, time
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--os')
parser.add_argument('--arch', nargs=2)
parser.add_argument('--compiler', nargs='+')
parser.add_argument('--mex_arch')
parser.add_argument('--mex_script')
parser.add_argument('--mex_opts', default=['-largeArrayDims'], nargs='*')
parser.add_argument('--cxx_flags', default=[], nargs='*')
parser.add_argument('--opencv_version', default='', nargs='?')
parser.add_argument('--commit', default='Not in working git tree', nargs='?')
parser.add_argument('--modules', nargs='+')
parser.add_argument('--configuration')
parser.add_argument('--outdir')
build = parser.parse_args()
from filters import *
from jinja2 import Environment, FileSystemLoader
# populate the build info template
substitute(build, build.outdir)
|
from textwrap import TextWrapper
import re, os
# precompile a URL matching regular expression
urlexpr = re.compile(r"((https?):((//)|(\\\\))+[\w\d:#@%/;$()~_?\+-=\\\.&]*)", re.MULTILINE|re.UNICODE)
def inputs(args):
'''Keeps only the input arguments in a list of elements.
'''
try:
return [arg for arg in args['only'] if arg.I and not arg.O]
except:
return [arg for arg in args if arg.I]
def ninputs(fun):
'''Counts the number of input arguments in the input list'''
return len(inputs(fun.req)) + len(inputs(fun.opt))
def outputs(args):
'''Determines whether any of the given arguments is an output
reference, and returns a list of only those elements.
In OpenCV, output references are preceded by CV_OUT or has *OutputArray* type
'''
try:
return [arg for arg in args['only'] if arg.O and not arg.I]
except:
return [arg for arg in args if arg.O]
def only(args):
'''Returns exclusively the arguments which are only inputs
or only outputs'''
d = {};
d['only'] = args
return d
def void(arg):
'''Is the input 'void' '''
return arg == 'void'
def flip(arg):
'''flip the sign of the input'''
return not arg
def noutputs(fun):
'''Counts the number of output arguments in the input list'''
return int(not void(fun.rtp)) + len(outputs(fun.req)) + len(outputs(fun.opt))
def convertibleToInt(string):
'''Can the input string be evaluated to an integer?'''
salt = '1+'
try:
exec(salt+string)
return True
except:
return False
def binaryToDecimal(string):
'''Attempt to convert the input string to floating point representation'''
try:
return str(eval(string))
except:
return string
def formatMatlabConstant(string, table):
'''
Given a string representing a Constant, and a table of all Constants,
attempt to resolve the Constant into a valid Matlab expression
For example, the input
DEPENDENT_VALUE = 1 << FIXED_VALUE
needs to be converted to
DEPENDENT_VALUE = bitshift(1, cv.FIXED_VALUE);
'''
# split the string into expressions
words = re.split('(\W+)', string)
# add a 'cv' prefix if an expression is also a key in the lookup table
words = ''.join([('cv.'+word if word in table else word) for word in words])
# attempt to convert arithmetic expressions and binary/hex to decimal
words = binaryToDecimal(words)
# convert any remaining bitshifts to Matlab 'bitshift' methods
shift = re.sub('[\(\) ]', '', words).split('<<')
words = 'bitshift('+shift[0]+', '+shift[1]+')' if len(shift) == 2 else words
return words
def matlabURL(string):
"""This filter is used to construct a Matlab specific URL that calls the
system browser instead of the (insanely bad) builtin Matlab browser"""
return re.sub(urlexpr, '<a href="matlab: web(\'\\1\', \'-browser\')">\\1</a>', string)
def capitalizeFirst(text):
'''Capitalize only the first character of the text string'''
return text[0].upper() + text[1:]
def toUpperCamelCase(text):
'''variable_name --> VariableName'''
return ''.join([capitalizeFirst(word) for word in text.split('_')])
def toLowerCamelCase(text):
'''variable_name --> variableName'''
upper_camel = toUpperCamelCase(test)
return upper_camel[0].lower() + upper_camel[1:]
def toUnderCase(text):
'''VariableName --> variable_name'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', text)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def stripTags(text):
'''
strip or convert html tags from a text string
<code>content</code> --> content
<anything> --> ''
< --> <
> --> >
&le --> <=
&ge --> >=
'''
upper = lambda pattern: pattern.group(1).upper()
text = re.sub('<code>(.*?)</code>', upper, text)
text = re.sub('<([^=\s].*?)>', '', text)
text = re.sub('<', '<', text)
text = re.sub('>', '>', text)
text = re.sub('&le', '<=', text)
text = re.sub('&ge', '>=', text)
return text
def qualify(text, name):
'''Adds uppercase 'CV.' qualification to any occurrences of name in text'''
return re.sub(name.upper(), 'CV.'+name.upper(), text)
def slugify(text):
'''A_Function_name --> a-function-name'''
return text.lower().replace('_', '-')
def filename(fullpath):
'''Returns only the filename without an extension from a file path
eg. /path/to/file.txt --> file
'''
return os.path.splitext(os.path.basename(fullpath))[0]
def split(text, delimiter=' '):
'''Split a text string into a list using the specified delimiter'''
return text.split(delimiter)
def csv(items, sep=', '):
'''format a list with a separator (comma if not specified)'''
return sep.join(item for item in items)
def cellarray(items, escape='\''):
'''format a list of items as a matlab cell array'''
return '{' + ', '.join(escape+item+escape for item in items) + '}'
def stripExtraSpaces(text):
'''Removes superfluous whitespace from a string, including the removal
of all leading and trailing whitespace'''
return ' '.join(text.split())
def comment(text, wrap=80, escape='% ', escape_first='', escape_last=''):
'''comment filter
Takes a string in text, and wraps it to wrap characters in length with
preceding comment escape sequence on each line. escape_first and
escape_last can be used for languages which define block comments.
Examples:
C++ inline comment comment(80, '// ')
C block comment: comment(80, ' * ', '/*', ' */')
Matlab comment: comment(80, '% ')
Matlab block comment: comment(80, '', '%{', '%}')
Python docstrings: comment(80, '', '\'\'\'', '\'\'\'')
'''
tw = TextWrapper(width=wrap-len(escape))
if escape_first:
escape_first = escape_first+'\n'
if escape_last:
escape_last = '\n'+escape_last
escapn = '\n'+escape
lines = text.split('\n')
wlines = (tw.wrap(line) for line in lines)
return escape_first+escape+escapn.join(escapn.join(line) for line in wlines)+escape_last
|
import numpy as np
import cv2 as cv
import sys
if len(sys.argv) != 2:
print('Input video name is missing')
exit()
print('Select 3 tracking targets')
cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
tracker = cv.MultiTracker_create()
init_once = False
ok, image=camera.read()
if not ok:
print('Failed to read video')
exit()
bbox1 = cv.selectROI('tracking', image)
bbox2 = cv.selectROI('tracking', image)
bbox3 = cv.selectROI('tracking', image)
while camera.isOpened():
ok, image=camera.read()
if not ok:
print 'no image to read'
break
if not init_once:
ok = tracker.add(cv.TrackerMIL_create(), image, bbox1)
ok = tracker.add(cv.TrackerMIL_create(), image, bbox2)
ok = tracker.add(cv.TrackerMIL_create(), image, bbox3)
init_once = True
ok, boxes = tracker.update(image)
print ok, boxes
for newbox in boxes:
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv.rectangle(image, p1, p2, (200,0,0))
cv.imshow('tracking', image)
k = cv.waitKey(1)
if k == 27 : break # esc pressed
|
import numpy as np
import cv2 as cv
import sys
if len(sys.argv) != 2:
print('Input video name is missing')
exit()
cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
ok, image=camera.read()
if not ok:
print('Failed to read video')
exit()
bbox = cv.selectROI("tracking", image)
tracker = cv.TrackerMIL_create()
init_once = False
while camera.isOpened():
ok, image=camera.read()
if not ok:
print 'no image to read'
break
if not init_once:
ok = tracker.init(image, bbox)
init_once = True
ok, newbox = tracker.update(image)
print ok, newbox
if ok:
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv.rectangle(image, p1, p2, (200,0,0))
cv.imshow("tracking", image)
k = cv.waitKey(1) & 0xff
if k == 27 : break # esc pressed
|
#!/usr/bin/env python
# This file is part of OpenCV project.
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html
# Copyright (C) 2020 by Archit Rungta
import hdr_parser, sys, re, os
from string import Template
from pprint import pprint
from collections import namedtuple
import json
import os, shutil
from io import StringIO
forbidden_arg_types = ["void*"]
ignored_arg_types = ["RNG*"]
pass_by_val_types = ["Point*", "Point2f*", "Rect*", "String*", "double*", "float*", "int*"]
def get_char(c):
if c.isalpha():
return c
if ord(c)%52 < 26:
return chr(ord('a')+ord(c)%26)
return chr(ord('A')+ord(c)%26)
def get_var(inp):
out = ''
for c in inp:
out = out+get_char(c)
return out
def normalize_name(name):
return name.replace('.', '::')
def normalize_class_name(name):
_, classes, name = split_decl_name(normalize_name(name))
return "_".join(classes+[name])
def normalize_full_name(name):
ns, classes, name = split_decl_name(normalize_name(name))
return "::".join(ns)+'::'+'_'.join(classes+[name])
def split_decl_name(name):
chunks = name.split('::')
namespace = chunks[:-1]
classes = []
while namespace and '::'.join(namespace) not in namespaces:
classes.insert(0, namespace.pop())
ns = '::'.join(namespace)
if ns not in namespaces and ns:
assert(0)
return namespace, classes, chunks[-1]
def handle_cpp_arg(inp):
def handle_vector(match):
return handle_cpp_arg("%svector<%s>" % (match.group(1), match.group(2)))
def handle_ptr(match):
return handle_cpp_arg("%sPtr<%s>" % (match.group(1), match.group(2)))
inp = re.sub("(.*)vector_(.*)", handle_vector, inp)
inp = re.sub("(.*)Ptr_(.*)", handle_ptr, inp)
return inp.replace("String", "string")
def get_template_arg(inp):
inp = inp.replace(' ','').replace('*', '').replace('cv::', '').replace('std::', '')
def handle_vector(match):
return get_template_arg("%s" % (match.group(1)))
def handle_ptr(match):
return get_template_arg("%s" % (match.group(1)))
inp = re.sub("vector<(.*)>", handle_vector, inp)
inp = re.sub("Ptr<(.*)>", handle_ptr, inp)
ns, cl, n = split_decl_name(inp)
inp = "::".join(cl+[n])
# print(inp)
return inp.replace("String", "string")
def registered_tp_search(tp):
found = False
if not tp:
return True
for tpx in registered_types:
if re.findall(tpx, tp):
found = True
break
return found
namespaces = {}
type_paths = {}
enums = {}
classes = {}
functions = {}
registered_types = ["int", "Size.*", "Rect.*", "Scalar", "RotatedRect", "Point.*", "explicit", "string", "bool", "uchar",
"Vec.*", "float", "double", "char", "Mat", "size_t", "RNG", "DescriptorExtractor", "FeatureDetector", "TermCriteria"]
class ClassProp(object):
"""
Helper class to store field information(type, name and flags) of classes and structs
"""
def __init__(self, decl):
self.tp = decl[0]
self.name = decl[1]
self.readonly = True
if "/RW" in decl[3]:
self.readonly = False
class ClassInfo(object):
def __init__(self, name, decl=None):
self.name = name
self.mapped_name = normalize_class_name(name)
self.ismap = False #CV_EXPORTS_W_MAP
self.isalgorithm = False #if class inherits from cv::Algorithm
self.methods = {} #Dictionary of methods
self.props = [] #Collection of ClassProp associated with this class
self.base = None #name of base class if current class inherits another class
self.constructors = [] #Array of constructors for this class
self.add_decl(decl)
classes[name] = self
def add_decl(self, decl):
if decl:
# print(decl)
bases = decl[1].split(',')
if len(bases[0].split()) > 1:
bases[0] = bases[0].split()[1]
bases = [x.replace(' ','') for x in bases]
# print(bases)
if len(bases) > 1:
# Clear the set a bit
bases = list(set(bases))
bases.remove('cv::class')
bases_clear = []
for bb in bases:
if self.name not in bb:
bases_clear.append(bb)
bases = bases_clear
if len(bases) > 1:
print("Note: Class %s has more than 1 base class (not supported by CxxWrap)" % (self.name,))
print(" Bases: ", " ".join(bases))
print(" Only the first base class will be used")
if len(bases) >= 1:
self.base = bases[0].replace('.', '::')
if "cv::Algorithm" in bases:
self.isalgorithm = True
for m in decl[2]:
if m.startswith("="):
self.mapped_name = m[1:]
# if m == "/Map":
# self.ismap = True
self.props = [ClassProp(p) for p in decl[3]]
# return code for functions and setters and getters if simple class or functions and map type
def get_prop_func_cpp(self, mode, propname):
return "jlopencv_" + self.mapped_name + "_"+mode+"_"+propname
argumentst = []
default_values = []
class ArgInfo(object):
"""
Helper class to parse and contain information about function arguments
"""
def sec(self, arg_tuple):
self.isbig = arg_tuple[0] in ["Mat", "vector_Mat", "cuda::GpuMat", "GpuMat", "vector_GpuMat", "UMat", "vector_UMat"] # or self.tp.startswith("vector")
self.tp = handle_cpp_arg(arg_tuple[0]) #C++ Type of argument
argumentst.append(self.tp)
self.name = arg_tuple[1] #Name of argument
# TODO: Handle default values nicely
self.default_value = arg_tuple[2] #Default value
self.inputarg = True #Input argument
self.outputarg = False #output argument
self.ref = False
for m in arg_tuple[3]:
if m == "/O":
self.inputarg = False
self.outputarg = True
elif m == "/IO":
self.inputarg = True
self.outputarg = True
elif m == '/Ref':
self.ref = True
if self.tp in pass_by_val_types:
self.outputarg = True
def __init__(self, name, tp = None):
if not tp:
self.sec(name)
else:
self.name = name
self.tp = tp
class FuncVariant(object):
"""
Helper class to parse and contain information about different overloaded versions of same function
"""
def __init__(self, classname, name, mapped_name, decl, namespace, istatic=False):
self.classname = classname
self.name = name
self.mapped_name = mapped_name
self.isconstructor = name.split('::')[-1]==classname.split('::')[-1]
self.isstatic = istatic
self.namespace = namespace
self.rettype = decl[4]
if self.rettype == "void" or not self.rettype:
self.rettype = ""
else:
self.rettype = handle_cpp_arg(self.rettype)
self.args = []
for ainfo in decl[3]:
a = ArgInfo(ainfo)
if a.default_value and ('(' in a.default_value or ':' in a.default_value):
default_values.append(a.default_value)
assert not a.tp in forbidden_arg_types, 'Forbidden type "{}" for argument "{}" in "{}" ("{}")'.format(a.tp, a.name, self.name, self.classname)
if a.tp in ignored_arg_types:
continue
self.args.append(a)
self.init_proto()
if name not in functions:
functions[name]= []
functions[name].append(self)
if not registered_tp_search(get_template_arg(self.rettype)):
namespaces[namespace].register_types.append(get_template_arg(self.rettype))
for arg in self.args:
if not registered_tp_search(get_template_arg(arg.tp)):
namespaces[namespace].register_types.append(get_template_arg(arg.tp))
def get_wrapper_name(self):
"""
Return wrapping function name
"""
name = self.name.replace('::', '_')
if self.classname:
classname = self.classname.replace('::', '_') + "_"
else:
classname = ""
return "jlopencv_" + self.namespace.replace('::','_') + '_' + classname + name
def init_proto(self):
# string representation of argument list, with '[', ']' symbols denoting optional arguments, e.g.
# "src1, src2[, dst[, mask]]" for cv.add
prototype = ""
inlist = []
optlist = []
outlist = []
deflist = []
biglist = []
# This logic can almost definitely be simplified
for a in self.args:
if a.isbig and not (a.inputarg and not a.default_value):
optlist.append(a)
if a.outputarg:
outlist.append(a)
if a.inputarg and not a.default_value:
inlist.append(a)
elif a.inputarg and a.default_value and not a.isbig:
optlist.append(a)
elif not (a.isbig and not (a.inputarg and not a.default_value)):
deflist.append(a)
if self.rettype:
outlist = [ArgInfo("retval", self.rettype)] + outlist
if self.isconstructor:
assert outlist == [] or outlist[0].tp == "explicit"
outlist = [ArgInfo("retval", self.classname)]
self.outlist = outlist
self.optlist = optlist
self.deflist = deflist
self.inlist = inlist
self.prototype = prototype
class NameSpaceInfo(object):
def __init__(self, name):
self.funcs = {}
self.classes = {} #Dictionary of classname : ClassInfo objects
self.enums = {}
self.consts = {}
self.register_types = []
self.name = name
def add_func(decl):
"""
Creates functions based on declaration and add to appropriate classes and/or namespaces
"""
decl[0] = decl[0].replace('.', '::')
namespace, classes, barename = split_decl_name(decl[0])
name = "::".join(namespace+classes+[barename])
full_classname = "::".join(namespace + classes)
classname = "::".join(classes)
namespace = '::'.join(namespace)
is_static = False
isphantom = False
mapped_name = ''
for m in decl[2]:
if m == "/S":
is_static = True
elif m == "/phantom":
print("phantom not supported yet ")
return
elif m.startswith("="):
mapped_name = m[1:]
elif m.startswith("/mappable="):
print("Mappable not supported yet")
return
# if m == "/V":
# print("skipping ", name)
# return
if classname and full_classname not in namespaces[namespace].classes:
# print("HH1")
# print(namespace, classname)
namespaces[namespace].classes[full_classname] = ClassInfo(full_classname)
assert(0)
if is_static:
# Add it as global function
func_map = namespaces[namespace].funcs
if name not in func_map:
func_map[name] = []
if not mapped_name:
mapped_name = "_".join(classes + [barename])
func_map[name].append(FuncVariant("", name, mapped_name, decl, namespace, True))
else:
if classname:
func = FuncVariant(full_classname, name, barename, decl, namespace, False)
if func.isconstructor:
namespaces[namespace].classes[full_classname].constructors.append(func)
else:
func_map = namespaces[namespace].classes[full_classname].methods
if name not in func_map:
func_map[name] = []
func_map[name].append(func)
else:
func_map = namespaces[namespace].funcs
if name not in func_map:
func_map[name] = []
if not mapped_name:
mapped_name = barename
func_map[name].append(FuncVariant("", name, mapped_name, decl, namespace, False))
def add_class(stype, name, decl):
"""
Creates class based on name and declaration. Add it to list of classes and to JSON file
"""
# print("n", name)
name = name.replace('.', '::')
classinfo = ClassInfo(name, decl)
namespace, classes, barename = split_decl_name(name)
namespace = '::'.join(namespace)
if classinfo.name in classes:
namespaces[namespace].classes[name].add_decl(decl)
else:
namespaces[namespace].classes[name] = classinfo
def add_const(name, decl, tp = ''):
name = name.replace('.','::')
namespace, classes, barename = split_decl_name(name)
namespace = '::'.join(namespace)
mapped_name = '_'.join(classes+[barename])
ns = namespaces[namespace]
if mapped_name in ns.consts:
print("Generator error: constant %s (name=%s) already exists" \
% (name, name))
sys.exit(-1)
ns.consts[name] = mapped_name
def add_enum(name, decl):
name = name.replace('.', '::')
mapped_name = normalize_class_name(name)
# print(name)
if mapped_name.endswith("<unnamed>"):
mapped_name = None
else:
enums[name.replace(".", "::")] = mapped_name
const_decls = decl[3]
if mapped_name:
namespace, classes, name2 = split_decl_name(name)
namespace = '::'.join(namespace)
mapped_name = '_'.join(classes+[name2])
# print(mapped_name)
namespaces[namespace].enums[name] = (name.replace(".", "::"),mapped_name)
for decl in const_decls:
name = decl[0]
add_const(name.replace("const ", "", ).strip(), decl, "int")
def gen_tree(srcfiles):
parser = hdr_parser.CppHeaderParser(generate_umat_decls=False, generate_gpumat_decls=False)
allowed_func_list = []
with open("funclist.csv", "r") as f:
allowed_func_list = f.readlines()
allowed_func_list = [x[:-1] for x in allowed_func_list]
count = 0
# step 1: scan the headers and build more descriptive maps of classes, consts, functions
for hdr in srcfiles:
decls = parser.parse(hdr)
for ns in parser.namespaces:
ns = ns.replace('.', '::')
if ns not in namespaces:
namespaces[ns] = NameSpaceInfo(ns)
count += len(decls)
if len(decls) == 0:
continue
if hdr.find('opencv2/') >= 0: #Avoid including the shadow files
# code_include.write( '#include "{0}"\n'.format(hdr[hdr.rindex('opencv2/'):]) )
pass
for decl in decls:
name = decl[0]
if name.startswith("struct") or name.startswith("class"):
# class/struct
p = name.find(" ")
stype = name[:p]
name = name[p+1:].strip()
add_class(stype, name, decl)
elif name.startswith("const"):
# constant
assert(0)
add_const(name.replace("const ", "").strip(), decl)
elif name.startswith("enum"):
# enum
add_enum(name.rsplit(" ", 1)[1], decl)
else:
# function
if decl[0] in allowed_func_list:
add_func(decl)
# step 1.5 check if all base classes exist
# print(classes)
for name, classinfo in classes.items():
if classinfo.base:
base = classinfo.base
# print(base)
if base not in classes:
print("Generator error: unable to resolve base %s for %s"
% (classinfo.base, classinfo.name))
sys.exit(-1)
base_instance = classes[base]
classinfo.base = base
classinfo.isalgorithm |= base_instance.isalgorithm # wrong processing of 'isalgorithm' flag:
# doesn't work for trees(graphs) with depth > 2
classes[name] = classinfo
# tree-based propagation of 'isalgorithm'
processed = dict()
def process_isalgorithm(classinfo):
if classinfo.isalgorithm or classinfo in processed:
return classinfo.isalgorithm
res = False
if classinfo.base:
res = process_isalgorithm(classes[classinfo.base])
#assert not (res == True or classinfo.isalgorithm is False), "Internal error: " + classinfo.name + " => " + classinfo.base
classinfo.isalgorithm |= res
res = classinfo.isalgorithm
processed[classinfo] = True
return res
for name, classinfo in classes.items():
process_isalgorithm(classinfo)
for name, ns in namespaces.items():
if name.split('.')[-1] == '':
continue
ns.registered = []
for name, cl in ns.classes.items():
registered_types.append(get_template_arg(name))
ns.registered.append(cl.mapped_name)
nss, clss, bs = split_decl_name(name)
type_paths[bs] = [name.replace("::", ".")]
type_paths["::".join(clss+[bs])] = [name.replace("::", ".")]
for e1,e2 in ns.enums.items():
registered_types.append(get_template_arg(e2[0]))
registered_types.append(get_template_arg(e2[0]).replace('::', '_')) #whyyy typedef
ns.registered.append(e2[1])
ns.register_types = list(set(ns.register_types))
ns.register_types = [tp for tp in ns.register_types if not registered_tp_search(tp) and not tp in ns.registered]
for tp in ns.register_types:
registered_types.append(get_template_arg(tp))
ns.registered.append(get_template_arg(tp))
default_valuesr = list(set(default_values))
# registered_types = registered_types + ns.register_types
return namespaces, default_valuesr
|
#!/usr/bin/python
# This file is part of OpenCV project.
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html
# Copyright (C) 2020 by Archit Rungta
import sys
import subprocess
import os
mod_path = sys.argv[1]
hdr_list = [
mod_path+"/core/include/opencv2/core.hpp",
mod_path+"/core/include/opencv2/core/base.hpp",
mod_path+"/core/include/opencv2/core/bindings_utils.hpp",
mod_path+"/core/include/opencv2/core/optim.hpp",
mod_path+"/core/include/opencv2/core/persistence.hpp",
mod_path+"/core/include/opencv2/core/types.hpp",
mod_path+"/core/include/opencv2/core/utility.hpp"]
for module in sys.argv[2:]:
if module=='opencv_imgproc':
hdr_list.append(mod_path+"/imgproc/include/opencv2/imgproc.hpp")
elif module =='opencv_dnn':
hdr_list.append(mod_path+"/dnn/include/opencv2/dnn/dnn.hpp")
elif module == 'opencv_imgcodecs':
hdr_list.append(mod_path+"/imgcodecs/include/opencv2/imgcodecs.hpp")
elif module =='opencv_videoio':
hdr_list.append(mod_path+"/videoio/include/opencv2/videoio.hpp")
elif module =='opencv_highgui':
hdr_list.append(mod_path+"/highgui/include/opencv2/highgui.hpp")
if not os.path.exists('autogen_cpp'):
os.makedirs('autogen_cpp')
os.makedirs('autogen_jl')
subprocess.call([sys.executable, 'gen3_cpp.py', str(';'.join(hdr_list))])
subprocess.call([sys.executable, 'gen3_julia_cxx.py', str(';'.join(hdr_list))])
subprocess.call([sys.executable, 'gen3_julia.py', str(';'.join(hdr_list))])
|
#!/usr/bin/env python
# This file is part of OpenCV project.
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html
# Copyright (C) 2020 by Archit Rungta
from __future__ import unicode_literals # Needed for python2
import hdr_parser, sys, re, os
from string import Template
from pprint import pprint
from collections import namedtuple
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import os, shutil
from parse_tree import *
jl_cpp_argmap = {}
jl_cpp_defmap = {}
julia_types = ["Int32", "Float32", "Float64", "Bool", "String", "Array", "Any"]
cv_types = ["UMat","Size" ]
submodule_template = Template('')
root_template = Template('')
with open("binding_templates_jl/template_cv2_submodule_cxx.jl", "r") as f:
submodule_template = Template(f.read())
with open("binding_templates_jl/template_cv2_root.jl", "r") as f:
root_template = Template(f.read())
with open("typemap.txt", 'r') as f:
tmp = f.readlines()
for ln in tmp:
ln = ln.strip('\n').split(':')
jl_cpp_argmap[ln[0]] = ln[1]
with open("defval.txt", 'r') as f:
tmp = f.readlines()
for ln in tmp:
ln = ln.strip('\n').split('|')
if ln[0] not in jl_cpp_defmap:
jl_cpp_defmap[ln[0]] = {}
jl_cpp_defmap[ln[0]][ln[1]] = ln[2]
def handle_def_arg(inp, tp = '', ns=''):
tp = tp.strip()
inp = inp.strip()
out = ''
if tp in julia_types:
out = inp
elif not inp or inp=='Mat()':
if tp=='Mat' or tp=='InputArray':
out= 'CxxMat()'
out = tp+'()'
elif inp=="String()":
out= '""'
elif '(' in inp or ':' in inp:
out = "cpp_to_julia("+get_var(inp)+"())"
else:
print("Default not found")
if inp in jl_cpp_defmap[tp]:
out = jl_cpp_defmap[tp][inp]
elif inp != '':
print(inp+" not found")
# print(inp, tp, out)
return out
def handle_jl_arg(inp):
if not inp:
return ''
inp = inp.replace('std::', '')
if inp in jl_cpp_argmap:
return jl_cpp_argmap[inp]
inp = inp.replace('cv::', '')
return inp
# return outs
class ClassInfo(ClassInfo):
def get_jl_code(self):
if self.ismap:
return ''
return self.overload_get()+self.overload_set()
def overload_get(self):
stra = "function Base.getproperty(m::%s, s::Symbol)\n" %(self.mapped_name)
if self.isalgorithm:
stra = "function Base.getproperty(m::cv_Ptr{%s}, s::Symbol)\n" %(self.mapped_name)
for prop in self.props:
stra = stra + " if s==:" + prop.name+"\n"
stra = stra + " return cpp_to_julia(%s(m))\n"%self.get_prop_func_cpp("get", prop.name)
stra = stra + " end\n"
stra = stra + " return Base.getfield(m, s)\nend\n"
return stra
def overload_set(self):
stra = "function Base.setproperty!(m::%s, s::Symbol, v)\n" %(self.mapped_name)
if self.isalgorithm:
stra = "function Base.setproperty!(m::cv_Ptr{%s}, s::Symbol, v)\n" %(self.mapped_name)
for prop in self.props:
if not prop.readonly:
continue
stra = stra + " if s==:" + prop.name+"\n"
stra = stra + " %s(m, julia_to_cpp(v))\n"%(self.get_prop_func_cpp("set", prop.name))
stra = stra + " end\n"
stra = stra + " return Base.setfield!(m, s, v)\nend\n"
return stra
class FuncVariant(FuncVariant):
def get_argument_full(self, classname='', isalgo = False):
arglist = self.inlist + self.optlist
argnamelist = [arg.name+"::"+(handle_jl_arg(arg.tp) if handle_jl_arg(arg.tp) not in pass_by_val_types else handle_jl_arg(arg.tp)[:-1]) for arg in arglist]
argstr = ", ".join(argnamelist)
return argstr
def get_argument_opt(self, ns=''):
# [print(arg.default_value,":",handle_def_arg(arg.default_value, handle_jl_arg(arg.tp))) for arg in self.optlist]
str2 = ", ".join(["%s::%s = %s(%s)" % (arg.name, handle_jl_arg(arg.tp), handle_jl_arg(arg.tp) if (arg.tp == 'int' or arg.tp=='float' or arg.tp=='double') else '', handle_def_arg(arg.default_value, handle_jl_arg(arg.tp), ns)) for arg in self.optlist])
return str2
def get_argument_def(self, classname, isalgo):
arglist = self.inlist
argnamelist = [arg.name+"::"+(handle_jl_arg(arg.tp) if handle_jl_arg(arg.tp) not in pass_by_val_types else handle_jl_arg(arg.tp)[:-1]) for arg in arglist]
argstr = ", ".join(argnamelist)
return argstr
def get_return(self, classname=''):
argstr = ''
arglist = self.inlist + self.optlist
return "return cpp_to_julia(%s(%s))" %(self.get_wrapper_name(), ",".join(["julia_to_cpp(%s)" % (x.name) for x in arglist]))
def get_algo_tp(self, classname, isalgo):
if not isalgo or not classname:
return ''
return ' where {T <: %s}' % classname
def get_complete_code(self, classname='', isalgo = False, iscons = False, gen_default = True, ns = ''):
if classname and not iscons:
if isalgo:
self.inlist = [ArgInfo("cobj", "cv_Ptr{T}")] + self.inlist
else:
self.inlist = [ArgInfo("cobj", classname)] + self.inlist
map_name = self.mapped_name
if ns!='cv':
map_name = '%s_%s' %(ns.split('::')[-1], map_name)
outstr = 'function %s(%s)%s\n\t%s\nend\n' % (map_name, self.get_argument_full(classname, isalgo), self.get_algo_tp(classname, isalgo),self.get_return())
str2 = ", ".join([x.name for x in self.inlist + self.optlist])
# outstr = outstr +
if self.get_argument_opt() != '' and gen_default:
outstr = outstr + ('%s(%s; %s)%s = %s(%s)\n' % (map_name, self.get_argument_def(classname, isalgo), self.get_argument_opt(ns), self.get_algo_tp(classname, isalgo), map_name, str2))
if iscons and len(self.inlist+self.optlist)==0 and ns=='cv':
return ''
return outstr
def gen(srcfiles):
namespaces, _ = gen_tree(srcfiles)
jl_code = StringIO()
for name, ns in namespaces.items():
cv_types.extend(ns.registered)
jl_code = StringIO()
nsname = name
for e1,e2 in ns.enums.items():
# jl_code.write('\n const {0} = Int32'.format(e2[0]))
jl_code.write('\n const {0} = Int32 \n'.format(e2[1]))
# Do not duplicate functions. This should prevent overwriting of Mat function by UMat functions
function_signatures = []
for cname, cl in ns.classes.items():
cl.__class__ = ClassInfo
jl_code.write(cl.get_jl_code())
for mname, fs in cl.methods.items():
for f in fs:
f.__class__ = FuncVariant
sign = (f.name, f.mapped_name, f.classname, [x.tp for x in f.inlist+f.optlist])
if sign in function_signatures:
print("Skipping entirely: ", f.name)
continue
sign2 = (f.name, f.mapped_name, f.classname, [x.tp for x in f.inlist])
gend = True
if sign2 in function_signatures:
print("Skipping default declaration: ", f.name)
gend = False
jl_code.write('\n%s' % f.get_complete_code(classname = cl.mapped_name, isalgo = cl.isalgorithm, gen_default = gend, ns=nsname))
function_signatures.append(sign)
function_signatures.append(sign2)
for f in cl.constructors:
f.__class__ = FuncVariant
jl_code.write('\n%s' % f.get_complete_code(classname = cl.mapped_name, isalgo = cl.isalgorithm, iscons = True, ns=nsname))
for mname, fs in ns.funcs.items():
for f in fs:
f.__class__ = FuncVariant
sign = (f.name, f.mapped_name, f.classname, [x.tp for x in f.inlist+f.optlist])
if sign in function_signatures:
print("Skipping entirely: ", f.name)
continue
gend = True
sign2 = (f.name, f.mapped_name, f.classname, [x.tp for x in f.inlist])
if sign2 in function_signatures:
print("Skipping default declaration: ", f.name)
gend = False
jl_code.write('\n%s' % f.get_complete_code(gen_default = gend, ns=nsname))
function_signatures.append(sign)
function_signatures.append(sign2)
imports = ''
for namex in namespaces:
if namex.startswith(name) and len(namex.split('::')) == 1 + len(name.split('::')):
imports = imports + '\ninclude("%s_cxx_wrap.jl")'%namex.replace('::', '_')
code = ''
if name == 'cv':
code = root_template.substitute(modname = name, code = jl_code.getvalue(), submodule_imports = imports)
else:
code = submodule_template.substitute(code = jl_code.getvalue(), submodule_imports = imports)
with open ('autogen_jl/%s_cxx_wrap.jl' % ns.name.replace('::', '_'), 'w') as fd:
fd.write(code)
srcfiles = hdr_parser.opencv_hdr_list
if len(sys.argv) > 1:
srcfiles = [l.strip() for l in sys.argv[1].split(';')]
gen(srcfiles)
|
#!/usr/bin/env python
# This file is part of OpenCV project.
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html
# Copyright (C) 2020 by Archit Rungta
from __future__ import unicode_literals # Needed for python2
import hdr_parser, sys, re, os
from string import Template
from pprint import pprint
from collections import namedtuple
import os, shutil
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
from parse_tree import *
mod_template = ""
with open("binding_templates_cpp/cv_core.cpp", "r") as f:
mod_template = Template(f.read())
def normalize_name(name):
return name.replace('.', '::')
def normalize_class_name(name):
_, classes, name = split_decl_name(normalize_name(name))
return "_".join(classes+[name])
def normalize_full_name(name):
ns, classes, name = split_decl_name(normalize_name(name))
return "::".join(ns)+'::'+'_'.join(classes+[name])
def split_decl_name(name):
chunks = name.split('::')
namespace = chunks[:-1]
classes = []
while namespace and '::'.join(namespace) not in namespaces:
classes.insert(0, namespace.pop())
ns = '::'.join(namespace)
if ns not in namespaces and ns:
assert(0)
return namespace, classes, chunks[-1]
def registered_tp_search(tp):
found = False
if not tp:
return True
for tpx in registered_types:
if re.findall(tpx, tp):
found = True
break
return found
namespaces = {}
enums = []
classes = {}
functions = {}
registered_types = ["int", "Size.*", "Rect.*", "Scalar", "RotatedRect", "Point.*", "explicit", "string", "bool", "uchar",
"Vec.*", "float", "double", "char", "Mat", "size_t", "RNG", "TermCriteria"]
class ClassInfo(ClassInfo):
def get_cpp_code_header(self):
if self.ismap:
return 'mod.map_type<%s>("%s");\n'%(self.name, self.mapped_name)
if not self.base:
return 'mod.add_type<%s>("%s");\n' % (self.name, self.mapped_name)
else:
return 'mod.add_type<%s>("%s", jlcxx::julia_base_type<%s>());\n' % (self.name, self.mapped_name, self.base)
def get_cpp_code_body(self):
if self.ismap:
return ''
cpp_code = StringIO()
for cons in self.constructors:
cons.__class__ = FuncVariant
cpp_code.write(cons.get_cons_code(self.name, self.mapped_name))
#add get/set
cpp_code.write('\n')
cpp_code.write(self.get_setters())
cpp_code.write('\n')
cpp_code.write(self.get_getters())
cpp_code.write(';')
return cpp_code.getvalue()
# return code for functions and setters and getters if simple class or functions and map type
def get_prop_func_cpp(self, mode, propname):
return "jlopencv_" + self.mapped_name + "_"+mode+"_"+propname
def get_getters(self):
stra = ""
for prop in self.props:
if not self.isalgorithm:
stra = stra + '\nmod.method("%s", [](const %s &cobj) {return %scobj.%s;});' % (self.get_prop_func_cpp("get", prop.name), self.name, '(int)' if prop.tp in enums else '', prop.name)
else:
stra = stra + '\nmod.method("%s", [](const cv::Ptr<%s> &cobj) {return %scobj->%s;});' % (self.get_prop_func_cpp("get", prop.name), self.name,'(int)' if prop.tp in enums else '', prop.name)
return stra
def get_setters(self):
stra = ""
for prop in self.props:
if prop.readonly:
continue
if not self.isalgorithm:
stra = stra + '\nmod.method("%s", [](%s &cobj,const force_enum_int<%s>::Type &v) {cobj.%s=(%s)v;});' % (self.get_prop_func_cpp("set", prop.name), self.name, prop.tp, prop.name, prop.tp)
else:
stra = stra + '\nmod.method("%s", [](cv::Ptr<%s> cobj, const force_enum_int<%s>::Type &v) {cobj->%s=(%s)v;});' % (self.get_prop_func_cpp("set", prop.name), self.name, prop.tp, prop.name, prop.tp)
return stra
class FuncVariant(FuncVariant):
def get_return(self):
if len(self.outlist)==0:
return ";"
elif len(self.outlist)==1:
return "return %s;" % ( ('(int)' if self.outlist[0].tp in enums else '') + self.outlist[0].name)
return "return make_tuple(%s);" % ",".join(["move(%s)" % (('(int)' if x.tp in enums else '') +x.name) for x in self.outlist])
def get_argument(self, isalgo):
args = self.inlist + self.optlist
if self.classname!="" and not self.isconstructor and not self.isstatic:
if isalgo:
args = [ArgInfo("cobj", ("cv::Ptr<%s>" % self.classname))] + args
else:
args = [ArgInfo("cobj", self.classname)] + args
argnamelist = []
for arg in args:
if arg.tp in pass_by_val_types:
print("PATHWAY NOT TESTED")
argnamelist.append(arg.tp[:-1] +"& "+arg.name)
elif arg.tp in enums:
argnamelist.append("int& " + arg.name)
else:
if arg.tp=='bool':
# Bool pass-by-reference is broken
argnamelist.append(arg.tp+" " +arg.name)
else:
argnamelist.append(arg.tp + "& "+arg.name)
# argnamelist = [(arg.tp if arg.tp not in pass_by_val_types else arg.tp[:-1]) +"& "+arg.name for arg in args]
argstr = ", ".join(argnamelist)
return argstr
def get_def_outtypes(self):
outstr = ""
for arg in self.deflist:
outstr = outstr + "%s %s;"%(arg.tp if arg.tp not in pass_by_val_types else arg.tp[:-1], arg.name)
return outstr
def get_retval(self, isalgo):
if self.rettype:
stra = "auto retval = "
else:
stra = ""
arlist = []
for x in self.args:
if x.tp in pass_by_val_types:
arlist.append("&"+x.name)
elif x.tp in enums:
arlist.append("(%s)%s" %(x.tp, x.name))
else:
arlist.append(x.name)
argstr = ", ".join(arlist)
if self.classname and not self.isstatic:
stra = stra + "cobj%s%s(%s); " %("->" if isalgo else ".",self.name.split('::')[-1], argstr)
else:
stra = stra + "%s(%s);" % (self.name, argstr)
return stra
def get_cons_code(self, name, mapped_name):
# if self.get_argument(False) == '':
# return ''
arglist = []
for x in self.args:
if x.tp in pass_by_val_types:
arglist.append("&"+x.name)
elif x.tp in enums:
arglist.append("(%s)%s" %(x.tp, x.name))
else:
arglist.append(x.name)
return 'mod.method("%s", [](%s) { %s return jlcxx::create<%s>(%s);});' % (self.get_wrapper_name(), self.get_argument(False), self.get_def_outtypes(), name, " ,".join(arglist))
def get_complete_code(self, classname, isalgo=False):
outstr = '.method("%s", [](%s) {%s %s %s})' % (self.get_wrapper_name(), self.get_argument(isalgo),self.get_def_outtypes(), self.get_retval(isalgo), self.get_return())
return outstr
def gen(srcfiles):
namespaces, default_values = gen_tree(srcfiles)
cpp_code = StringIO()
include_code = StringIO()
nsi = sorted(namespaces.items(), key =lambda x: x[0])
for name, ns in nsi:
cpp_code.write("using namespace %s;\n" % name.replace(".", "::"))
if name.split('.')[-1] == '':
continue
nsname = name
nsprefix = '_'.join(nsname.split('::')[1:])
def sort_classes(classes):
class_inherits = []
class_inherits_names = set()
class_noinherits = []
parent = {}
for name, cl in classes:
if cl.base:
class_inherits.append((name, cl))
parent[name] = cl.base
class_inherits_names.add(name)
else:
class_noinherits.append((name,cl))
final_order = class_noinherits
while len(class_inherits)>0:
for cli in class_inherits:
if parent[cli[0]] not in class_inherits_names:
final_order.append(cli)
class_inherits.remove(cli)
class_inherits_names.remove(cli[0])
return final_order
sorted_cls = sort_classes(ns.classes.items())
for name, cl in sorted_cls:
cl.__class__ = ClassInfo
cpp_code.write(cl.get_cpp_code_header())
if cl.base:
include_code.write("""
template <>
struct SuperType<%s>
{
typedef %s type;
};
""" % (cl.name.replace('.', '::'), cl.base.replace('.', '::')))
for e1,e2 in ns.enums.items():
# cpp_code.write('\n mod.add_bits<{0}>("{1}", jlcxx::julia_type("CppEnum"));'.format(e2[0], e2[1]))
enums.append(e2[0])
enums.append(e2[1])
enums.append(e2[0].replace("cv::", ""))
for tp in ns.register_types:
cpp_code.write(' mod.add_type<%s>("%s");\n' %(tp, normalize_class_name(tp)))
for name, ns in namespaces.items():
nsname = name.replace("::", "_")
for name, cl in ns.classes.items():
cl.__class__ = ClassInfo
cpp_code.write(cl.get_cpp_code_body())
for mname, fs in cl.methods.items():
for f in fs:
f.__class__ = FuncVariant
cpp_code.write('\n mod%s;' % f.get_complete_code(cl.name, cl.isalgorithm))
# for f in cl.constructors:
# cpp_code.write('\n %s; \n' % f.get_cons_code(cl.name, cl.mapped_name))
for mname, fs in ns.funcs.items():
for f in fs:
f.__class__ = FuncVariant
cpp_code.write('\n mod%s;' % f.get_complete_code("", False))
for mapname, name in sorted(ns.consts.items()):
cpp_code.write(' mod.set_const("%s_%s", (force_enum_int<decltype(%s)>::Type)%s);\n'%(nsname, name, mapname, mapname))
compat_name = re.sub(r"([a-z])([A-Z])", r"\1_\2", name).upper()
if name != compat_name:
cpp_code.write(' mod.set_const("%s_%s", (force_enum_int<decltype(%s)>::Type)%s);\n'%(nsname, compat_name, mapname, mapname))
default_values = list(set(default_values))
for val in default_values:
# val = handle_cpp_arg(val)
cpp_code.write(' mod.method("%s", [](){return (force_enum_int<decltype(%s)>::Type)%s;});\n'%(get_var(val), val, val))
with open ('autogen_cpp/cv_core.cpp', 'w') as fd:
fd.write(mod_template.substitute(include_code = include_code.getvalue(), cpp_code=cpp_code.getvalue()))
srcfiles = hdr_parser.opencv_hdr_list
if len(sys.argv) > 1:
srcfiles = [l.strip() for l in sys.argv[1].split(';')]
gen(srcfiles)
|
#!/usr/bin/env python
# This file is part of OpenCV project.
# It is subject to the license terms in the LICENSE file found in the top-level directory
# of this distribution and at http://opencv.org/license.html
# Copyright (C) 2020 by Archit Rungta
from __future__ import unicode_literals # Needed for python2
import hdr_parser, sys, re, os
from string import Template
from pprint import pprint
from collections import namedtuple
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import os, shutil
from parse_tree import *
submodule_template = Template('')
root_template = Template('')
with open("binding_templates_jl/template_cv2_submodule.jl", "r") as f:
submodule_template = Template(f.read())
with open("binding_templates_jl/template_cv2_root.jl", "r") as f:
root_template = Template(f.read())
class FuncVariant(FuncVariant):
def get_complete_code(self, classname='', isalgo = False, iscons = False, gen_default = True, ns = ''):
return 'const %s = OpenCV.%s_%s' %(self.mapped_name, ns, self.mapped_name)
def gen(srcfiles):
namespaces, _ = gen_tree(srcfiles)
jl_code = StringIO()
for name, ns in namespaces.items():
# cv_types.extend(ns.registered)
jl_code = StringIO()
nsname = '_'.join(name.split('::')[1:])
# Do not duplicate functions. This should prevent overwriting of Mat function by UMat functions
function_signatures = []
if name != 'cv':
for cname, cl in ns.classes.items():
cl.__class__ = ClassInfo
for mname, fs in cl.methods.items():
for f in fs:
f.__class__ = FuncVariant
if f.mapped_name in function_signatures:
print("Skipping entirely: ", f.name)
continue
jl_code.write('\n%s' % f.get_complete_code(isalgo = cl.isalgorithm, ns=nsname))
function_signatures.append(f.mapped_name)
for f in cl.constructors:
f.__class__ = FuncVariant
jl_code.write('\n%s' % f.get_complete_code(classname = cl.mapped_name, isalgo = cl.isalgorithm, iscons = True, ns=nsname))
break
for mname, fs in ns.funcs.items():
for f in fs:
f.__class__ = FuncVariant
if f.mapped_name in function_signatures:
continue
jl_code.write('\n%s' % f.get_complete_code(ns=nsname))
function_signatures.append(f.mapped_name)
jl_code.write('\n')
for mapname, cname in sorted(ns.consts.items()):
jl_code.write(' const %s = OpenCV.%s_%s\n'%(cname, name.replace('::', '_'), cname))
compat_name = re.sub(r"([a-z])([A-Z])", r"\1_\2", cname).upper()
if cname != compat_name:
jl_code.write(' const %s = OpenCV.%s_%s;\n'%(compat_name, name.replace('::', '_'), compat_name))
imports = ''
for namex in namespaces:
if namex.startswith(name) and len(namex.split('::')) == 1 + len(name.split('::')):
imports = imports + '\ninclude("%s_wrap.jl")'%namex.replace('::', '_')
code = ''
if name == 'cv':
code = root_template.substitute(modname = name, code = jl_code.getvalue(), submodule_imports = imports)
else:
code = submodule_template.substitute(modname = name.split('::')[-1], code = jl_code.getvalue(), submodule_imports = imports)
with open ('autogen_jl/%s_wrap.jl' % ns.name.replace('::', '_'), 'w') as fd:
fd.write(code)
srcfiles = hdr_parser.opencv_hdr_list
if len(sys.argv) > 1:
srcfiles = [l.strip() for l in sys.argv[1].split(';')]
gen(srcfiles)
|
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy
import cv2 as cv
from tests_common import NewOpenCVTests
class rgbd_test(NewOpenCVTests):
def test_computeRgbdPlane(self):
depth_image = self.get_sample('/cv/rgbd/depth.png', cv.IMREAD_ANYDEPTH)
if depth_image is None:
raise unittest.SkipTest("Missing files with test data")
K = numpy.array([[525, 0, 320.5], [0, 525, 240.5], [0, 0, 1]])
points3d = cv.rgbd.depthTo3d(depth_image, K)
normals_computer = normals_computer = cv.rgbd.RgbdNormals_create(480, 640, 5, K)
normals = normals_computer.apply(points3d)
rgbd_plane = cv.rgbd.RgbdPlane_create(cv.rgbd.RgbdPlane_RGBD_PLANE_METHOD_DEFAULT, 40, 1600, 0.01, 0, 0, 0)
_, planes_coeff = rgbd_plane.apply(points3d, normals)
planes_coeff_expected = \
numpy.asarray([[[-0.02447728, -0.8678335 , -0.49625182, 4.02800846]],
[[-0.05055107, -0.86144137, -0.50533485, 3.95456314]],
[[-0.03294908, -0.86964548, -0.49257591, 3.97052431]],
[[-0.02886586, -0.87153459, -0.48948362, 7.77550507]],
[[-0.04455929, -0.87659335, -0.47916424, 3.93200684]],
[[-0.21514639, 0.18835169, -0.95824611, 7.59479475]],
[[-0.01006953, -0.86679155, -0.49856904, 4.01355648]],
[[-0.00876531, -0.87571168, -0.48275498, 3.96768975]],
[[-0.06395926, -0.86951321, -0.48975089, 4.08618736]],
[[-0.01403128, -0.87593341, -0.48222789, 7.74559402]],
[[-0.01143177, -0.87495202, -0.4840748 , 7.75355816]]],
dtype=numpy.float32)
eps = 0.05
self.assertLessEqual(cv.norm(planes_coeff, planes_coeff_expected, cv.NORM_L2), eps)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
#!/usr/bin/env python
import os
import cv2 as cv
import numpy as np
from tests_common import NewOpenCVTests, unittest
class cudafeatures2d_test(NewOpenCVTests):
def setUp(self):
super(cudafeatures2d_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cudafeatures2d(self):
npMat1 = self.get_sample("samples/data/right01.jpg")
npMat2 = self.get_sample("samples/data/right02.jpg")
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMat1 = cv.cuda.cvtColor(cuMat1, cv.COLOR_RGB2GRAY)
cuMat2 = cv.cuda.cvtColor(cuMat2, cv.COLOR_RGB2GRAY)
fast = cv.cuda_FastFeatureDetector.create()
_kps = fast.detectAsync(cuMat1)
orb = cv.cuda_ORB.create()
_kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None)
_kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None)
self.assertTrue(len(orb.convert(_kps1)) == _kps1.size()[0])
self.assertTrue(len(orb.convert(_kps2)) == _kps2.size()[0])
bf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_HAMMING)
matches = bf.match(descs1, descs2)
self.assertGreater(len(matches), 0)
matches = bf.knnMatch(descs1, descs2, 2)
self.assertGreater(len(matches), 0)
matches = bf.radiusMatch(descs1, descs2, 0.1)
self.assertGreater(len(matches), 0)
self.assertTrue(True) #It is sufficient that no exceptions have been there
if __name__ == '__main__':
NewOpenCVTests.bootstrap() |
#!/usr/bin/python
import sys
import os
import cv2 as cv
import numpy as np
print('\ntextdetection.py')
print(' A demo script of the Extremal Region Filter algorithm described in:')
print(' Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012\n')
if (len(sys.argv) < 2):
print(' (ERROR) You must call this script with an argument (path_to_image_to_be_processed)\n')
quit()
pathname = os.path.dirname(sys.argv[0])
img = cv.imread(str(sys.argv[1]))
# for visualization
vis = img.copy()
# Extract channels to be processed individually
channels = cv.text.computeNMChannels(img)
# Append negative channels to detect ER- (bright regions over dark background)
cn = len(channels)-1
for c in range(0,cn):
channels.append((255-channels[c]))
# Apply the default cascade classifier to each independent channel (could be done in parallel)
print("Extracting Class Specific Extremal Regions from "+str(len(channels))+" channels ...")
print(" (...) this may take a while (...)")
for channel in channels:
erc1 = cv.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv.text.createERFilterNM1(erc1,16,0.00015,0.13,0.2,True,0.1)
erc2 = cv.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv.text.createERFilterNM2(erc2,0.5)
regions = cv.text.detectRegions(channel,er1,er2)
rects = cv.text.erGrouping(img,channel,[r.tolist() for r in regions])
#rects = cv.text.erGrouping(img,channel,[x.tolist() for x in regions], cv.text.ERGROUPING_ORIENTATION_ANY,'../../GSoC2014/opencv_contrib/modules/text/samples/trained_classifier_erGrouping.xml',0.5)
#Visualization
for r in range(0,np.shape(rects)[0]):
rect = rects[r]
cv.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
cv.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
#Visualization
cv.imshow("Text detection result", vis)
cv.waitKey(0)
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
import sys
import os
import cv2 as cv
import numpy as np
def main():
print('\nDeeptextdetection.py')
print(' A demo script of text box alogorithm of the paper:')
print(' * Minghui Liao et al.: TextBoxes: A Fast Text Detector with a Single Deep Neural Network https://arxiv.org/abs/1611.06779\n')
if (len(sys.argv) < 2):
print(' (ERROR) You must call this script with an argument (path_to_image_to_be_processed)\n')
quit()
if not os.path.isfile('TextBoxes_icdar13.caffemodel') or not os.path.isfile('textbox.prototxt'):
print " Model files not found in current directory. Aborting"
print " See the documentation of text::TextDetectorCNN class to get download links."
quit()
img = cv.imread(str(sys.argv[1]))
textSpotter = cv.text.TextDetectorCNN_create("textbox.prototxt", "TextBoxes_icdar13.caffemodel")
rects, outProbs = textSpotter.detect(img);
vis = img.copy()
thres = 0.6
for r in range(np.shape(rects)[0]):
if outProbs[r] > thres:
rect = rects[r]
cv.rectangle(vis, (rect[0],rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (255, 0, 0), 2)
cv.imshow("Text detection result", vis)
cv.waitKey()
if __name__ == "__main__":
main()
|
#!/usr/bin/python
import sys
import os
import cv2 as cv
import numpy as np
print('\ndetect_er_chars.py')
print(' A simple demo script using the Extremal Region Filter algorithm described in:')
print(' Neumann L., Matas J.: Real-Time Scene Text Localization and Recognition, CVPR 2012\n')
if (len(sys.argv) < 2):
print(' (ERROR) You must call this script with an argument (path_to_image_to_be_processed)\n')
quit()
pathname = os.path.dirname(sys.argv[0])
img = cv.imread(str(sys.argv[1]))
gray = cv.imread(str(sys.argv[1]),0)
erc1 = cv.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv.text.createERFilterNM1(erc1)
erc2 = cv.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv.text.createERFilterNM2(erc2)
regions = cv.text.detectRegions(gray,er1,er2)
#Visualization
rects = [cv.boundingRect(p.reshape(-1, 1, 2)) for p in regions]
for rect in rects:
cv.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
for rect in rects:
cv.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
cv.imshow("Text detection result", img)
cv.waitKey(0)
|
from skimage import io, transform
from multiprocessing.dummy import Pool as ThreadPool
def rescale(root_new, root_old, img_path, ann_path, out_shape):
try:
img = io.imread(root_old+"/"+img_path)
except Exception as E:
print E
h, w, _ = img.shape
f_h, f_w = float(out_shape)/h, float(out_shape)/w
trans_img = transform.rescale(img, (f_h, f_w))
num_objs = 0
with open(root_old+"/"+ann_path, 'r') as f:
ann = f.readline()
ann = ann.rstrip()
ann = ann.split(' ')
ann = [float(i) for i in ann]
num_objs = len(ann) / 5
for idx in xrange(num_objs):
ann[idx * 5 + 0] = int(f_w * ann[idx * 5 + 0])
ann[idx * 5 + 1] = int(f_h * ann[idx * 5 + 1])
ann[idx * 5 + 2] = int(f_w * ann[idx * 5 + 2])
ann[idx * 5 + 3] = int(f_h * ann[idx * 5 + 3])
# Write the new annotations to file
with open(root_new+"/"+ann_path, 'w') as f_new:
for val in ann:
f_new.write(str(val)+' ')
# Save the new image
io.imwrite(root_new+"/"+img_path, trans_img)
def preprocess():
source = '/users2/Datasets/PASCAL_VOC/VOCdevkit/VOC2012_Resize/source.txt'
root_old = '/users2/Datasets/PASCAL_VOC/VOCdevkit/VOC2012'
root_new = '/users2/Datasets/PASCAL_VOC/VOCdevkit/VOC2012_Resize'
out_shape = 416
with open(source, 'r') as src:
lines = src.readlines()
print 'Processing {} images and annotations'.format(len(lines))
for line in lines:
line = line.rstrip()
line = line.split(' ')
img_path = line[0]
ann_path = line[1]
rescale(root_new, root_old, img_path, ann_path, out_shape)
if __name__ == '__main__':
preprocess()
|
import argparse
import sys
import os
import time
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
def k_means(K, data, max_iter, n_jobs, image_file):
X = np.array(data)
np.random.shuffle(X)
begin = time.time()
print 'Running kmeans'
kmeans = KMeans(n_clusters=K, max_iter=max_iter, n_jobs=n_jobs, verbose=1).fit(X)
print 'K-Means took {} seconds to complete'.format(time.time()-begin)
step_size = 0.2
xmin, xmax = X[:, 0].min()-1, X[:, 0].max()+1
ymin, ymax = X[:, 1].min()-1, X[:, 1].max()+1
xx, yy = np.meshgrid(np.arange(xmin, xmax, step_size), np.arange(ymin, ymax, step_size))
preds = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
preds = preds.reshape(xx.shape)
plt.figure()
plt.clf()
plt.imshow(preds, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower')
plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=5, color='r', zorder=10)
plt.title("Anchor shapes generated using K-Means")
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
print 'Mean centroids are:'
for i, center in enumerate(centroids):
print '{}: {}, {}'.format(i, center[0], center[1])
# plt.xticks(())
# plt.yticks(())
plt.show()
def pre_process(directory, data_list):
if not os.path.exists(directory):
print "Path {} doesn't exist".format(directory)
return
files = os.listdir(directory)
print 'Loading data...'
for i, f in enumerate(files):
# Progress bar
sys.stdout.write('\r')
percentage = (i+1.0) / len(files)
progress = int(percentage * 30)
bar = [progress*'=', ' '*(29-progress), percentage*100]
sys.stdout.write('[{}>{}] {:.0f}%'.format(*bar))
sys.stdout.flush()
with open(directory+"/"+f, 'r') as ann:
l = ann.readline()
l = l.rstrip()
l = l.split(' ')
l = [float(i) for i in l]
if len(l) % 5 != 0:
sys.stderr.write('File {} contains incorrect number of annotations'.format(f))
return
num_objs = len(l) / 5
for obj in range(num_objs):
xmin = l[obj * 5 + 0]
ymin = l[obj * 5 + 1]
xmax = l[obj * 5 + 2]
ymax = l[obj * 5 + 3]
w = xmax - xmin
h = ymax - ymin
data_list.append([w, h])
if w > 1000 or h > 1000:
sys.stdout.write("[{}, {}]".format(w, h))
sys.stdout.write('\nProcessed {} files containing {} objects'.format(len(files), len(data_list)))
return data_list
def main():
parser = argparse.ArgumentParser("Parse hyperparameters")
parser.add_argument("clusters", help="Number of clusters", type=int)
parser.add_argument("dir", help="Directory containing annotations")
parser.add_argument("image_file", help="File to generate the final cluster of image")
parser.add_argument('-jobs', help="Number of jobs for parallel computation", default=1)
parser.add_argument('-iter', help="Max Iterations to run algorithm for", default=1000)
p = parser.parse_args(sys.argv[1:])
K = p.clusters
directory = p.dir
data_list = []
pre_process(directory, data_list )
sys.stdout.write('\nDone collecting data\n')
k_means(K, data_list, int(p.iter), int(p.jobs), p.image_file)
print 'Done !'
if __name__=='__main__':
try:
main()
except Exception as E:
print E
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.