system
stringclasses 7
values | input
stringlengths 10
94.5k
| output
stringlengths 14
55.5k
|
---|---|---|
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} | .file "tmpxft_0002ee25_00000000-6_matrixAddKernel1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
.type _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16matrixAddKernel1PfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i, .-_Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
.globl _Z16matrixAddKernel1PfS_S_i
.type _Z16matrixAddKernel1PfS_S_i, @function
_Z16matrixAddKernel1PfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16matrixAddKernel1PfS_S_i, .-_Z16matrixAddKernel1PfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16matrixAddKernel1PfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16matrixAddKernel1PfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16matrixAddKernel1PfS_S_i
.globl _Z16matrixAddKernel1PfS_S_i
.p2align 8
.type _Z16matrixAddKernel1PfS_S_i,@function
_Z16matrixAddKernel1PfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16matrixAddKernel1PfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16matrixAddKernel1PfS_S_i, .Lfunc_end0-_Z16matrixAddKernel1PfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16matrixAddKernel1PfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16matrixAddKernel1PfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} | .text
.file "matrixAddKernel1.hip"
.globl _Z31__device_stub__matrixAddKernel1PfS_S_i # -- Begin function _Z31__device_stub__matrixAddKernel1PfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__matrixAddKernel1PfS_S_i,@function
_Z31__device_stub__matrixAddKernel1PfS_S_i: # @_Z31__device_stub__matrixAddKernel1PfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16matrixAddKernel1PfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__matrixAddKernel1PfS_S_i, .Lfunc_end0-_Z31__device_stub__matrixAddKernel1PfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16matrixAddKernel1PfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16matrixAddKernel1PfS_S_i,@object # @_Z16matrixAddKernel1PfS_S_i
.section .rodata,"a",@progbits
.globl _Z16matrixAddKernel1PfS_S_i
.p2align 3, 0x0
_Z16matrixAddKernel1PfS_S_i:
.quad _Z31__device_stub__matrixAddKernel1PfS_S_i
.size _Z16matrixAddKernel1PfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16matrixAddKernel1PfS_S_i"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__matrixAddKernel1PfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16matrixAddKernel1PfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z16matrixAddKernel1PfS_S_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0020*/ S2R R2, SR_TID.X ; /* 0x0000000000027919 */
/* 0x000e280000002100 */
/*0030*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e680000002600 */
/*0040*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e620000002200 */
/*0050*/ IMAD R3, R3, c[0x0][0x0], R2 ; /* 0x0000000003037a24 */
/* 0x001fca00078e0202 */
/*0060*/ ISETP.GE.AND P0, PT, R3, c[0x0][0x178], PT ; /* 0x00005e0003007a0c */
/* 0x000fe20003f06270 */
/*0070*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fca00078e0205 */
/*0080*/ ISETP.GE.OR P0, PT, R0, c[0x0][0x178], P0 ; /* 0x00005e0000007a0c */
/* 0x000fda0000706670 */
/*0090*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00a0*/ HFMA2.MMA R7, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff077435 */
/* 0x000fe200000001ff */
/*00b0*/ IMAD R0, R0, c[0x0][0x178], R3 ; /* 0x00005e0000007a24 */
/* 0x000fe200078e0203 */
/*00c0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd00000000a00 */
/*00d0*/ IMAD.WIDE R4, R0, R7, c[0x0][0x170] ; /* 0x00005c0000047625 */
/* 0x000fc800078e0207 */
/*00e0*/ IMAD.WIDE R2, R0.reuse, R7.reuse, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x0c0fe400078e0207 */
/*00f0*/ LDG.E R4, [R4.64] ; /* 0x0000000404047981 */
/* 0x000ea8000c1e1900 */
/*0100*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea2000c1e1900 */
/*0110*/ IMAD.WIDE R6, R0, R7, c[0x0][0x160] ; /* 0x0000580000067625 */
/* 0x000fc800078e0207 */
/*0120*/ FADD R9, R4, R3 ; /* 0x0000000304097221 */
/* 0x004fca0000000000 */
/*0130*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z16matrixAddKernel1PfS_S_i
.globl _Z16matrixAddKernel1PfS_S_i
.p2align 8
.type _Z16matrixAddKernel1PfS_S_i,@function
_Z16matrixAddKernel1PfS_S_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s4, s3, 16
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s4, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s3, v[3:4]
s_mov_b32 s3, exec_lo
v_max_i32_e32 v2, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s2, v2
s_cbranch_execz .LBB0_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mad_u64_u32 v[2:3], null, v0, s2, v[1:2]
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[0:1], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s6, v0
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s4, v0
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_waitcnt vmcnt(0)
v_add_f32_e32 v2, v2, v3
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z16matrixAddKernel1PfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z16matrixAddKernel1PfS_S_i, .Lfunc_end0-_Z16matrixAddKernel1PfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z16matrixAddKernel1PfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z16matrixAddKernel1PfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0002ee25_00000000-6_matrixAddKernel1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
.type _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i, @function
_Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z16matrixAddKernel1PfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i, .-_Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
.globl _Z16matrixAddKernel1PfS_S_i
.type _Z16matrixAddKernel1PfS_S_i, @function
_Z16matrixAddKernel1PfS_S_i:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z41__device_stub__Z16matrixAddKernel1PfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z16matrixAddKernel1PfS_S_i, .-_Z16matrixAddKernel1PfS_S_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z16matrixAddKernel1PfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z16matrixAddKernel1PfS_S_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "matrixAddKernel1.hip"
.globl _Z31__device_stub__matrixAddKernel1PfS_S_i # -- Begin function _Z31__device_stub__matrixAddKernel1PfS_S_i
.p2align 4, 0x90
.type _Z31__device_stub__matrixAddKernel1PfS_S_i,@function
_Z31__device_stub__matrixAddKernel1PfS_S_i: # @_Z31__device_stub__matrixAddKernel1PfS_S_i
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z16matrixAddKernel1PfS_S_i, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z31__device_stub__matrixAddKernel1PfS_S_i, .Lfunc_end0-_Z31__device_stub__matrixAddKernel1PfS_S_i
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z16matrixAddKernel1PfS_S_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z16matrixAddKernel1PfS_S_i,@object # @_Z16matrixAddKernel1PfS_S_i
.section .rodata,"a",@progbits
.globl _Z16matrixAddKernel1PfS_S_i
.p2align 3, 0x0
_Z16matrixAddKernel1PfS_S_i:
.quad _Z31__device_stub__matrixAddKernel1PfS_S_i
.size _Z16matrixAddKernel1PfS_S_i, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z16matrixAddKernel1PfS_S_i"
.size .L__unnamed_1, 28
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z31__device_stub__matrixAddKernel1PfS_S_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z16matrixAddKernel1PfS_S_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
cudaMalloc(&X, size); // allocate GPU memory
cudaMalloc(&Y, size);
cudaMemcpy(X, X_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
cudaMalloc(&S, size_s);
cudaMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
cudaMemcpy(Y_h, Y, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
cudaFree(X); cudaFree(Y);
free(X_h); free(Y_h);
} | code for sm_80
Function : _Z23Add_scalar_to_subvectorPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0030*/ IADD3 R0, R2, 0x1, RZ ; /* 0x0000000102007810 */
/* 0x001fca0007ffe0ff */
/*0040*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0050*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE.U32 R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e0005 */
/*00a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fe400078e0205 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ FADD R7, R0, R3 ; /* 0x0000000300077221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z26Prefix_sum_oneblock_kernelPfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e020b */
/*0070*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*0090*/ SHF.R.S32.HI R9, RZ, 0x1f, R0 ; /* 0x0000001fff097819 */
/* 0x000fe20000011400 */
/*00a0*/ STS [R7.X4], R5 ; /* 0x0000000507007388 */
/* 0x0041f40000004800 */
/*00b0*/ @!P0 BRA 0x170 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff027435 */
/* 0x000fd000000001ff */
/*00d0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*00f0*/ IMAD.IADD R3, R7, 0x1, -R2 ; /* 0x0000000107037824 */
/* 0x000fe200078e0a02 */
/*0100*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc800000006ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fe20003f06070 */
/*0120*/ LDS R3, [R3.X4] ; /* 0x0000000003037984 */
/* 0x000fe80000004800 */
/*0130*/ LDS R4, [R7.X4] ; /* 0x0000000007047984 */
/* 0x000e640000004800 */
/*0140*/ FADD R5, R4, R3 ; /* 0x0000000304057221 */
/* 0x003fca0000000000 */
/*0150*/ STS [R7.X4], R5 ; /* 0x0000000507007388 */
/* 0x0001e20000004800 */
/*0160*/ @P0 BRA 0xd0 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0170*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06270 */
/*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0190*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fd60003f25270 */
/*01a0*/ @!P0 LEA R2, P2, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000028a11 */
/* 0x000fc800078410ff */
/*01b0*/ @!P0 LEA.HI.X R3, R0, c[0x0][0x16c], R9, 0x2, P2 ; /* 0x00005b0000038a11 */
/* 0x000fca00010f1409 */
/*01c0*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0003e8000c101904 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*01f0*/ LDS R5, [0x7c] ; /* 0x00007c00ff057984 */
/* 0x003e220000000800 */
/*0200*/ IMAD.WIDE.U32 R2, R6, R11, c[0x0][0x178] ; /* 0x00005e0006027625 */
/* 0x000fca00078e000b */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
cudaMalloc(&X, size); // allocate GPU memory
cudaMalloc(&Y, size);
cudaMemcpy(X, X_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
cudaMalloc(&S, size_s);
cudaMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
cudaMemcpy(Y_h, Y, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
cudaFree(X); cudaFree(Y);
free(X_h); free(Y_h);
} | .file "tmpxft_0019988f_00000000-6_prefix_sum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
.type _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_, @function
_Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z26Prefix_sum_oneblock_kernelPfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_, .-_Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_, @function
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, .-_Z26Prefix_sum_oneblock_kernelPfS_iS_
.globl _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
.type _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i, @function
_Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23Add_scalar_to_subvectorPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i, .-_Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.type _Z23Add_scalar_to_subvectorPfS_i, @function
_Z23Add_scalar_to_subvectorPfS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z23Add_scalar_to_subvectorPfS_i, .-_Z23Add_scalar_to_subvectorPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl 12(%rsp), %r12d
leal 0(,%r12,4), %ebp
movslq %ebp, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r13
testl %r12d, %r12d
jle .L20
movslq %r12d, %rbx
leaq 0(,%rbx,4), %rdx
movl $0, %eax
movss .LC0(%rip), %xmm0
.L21:
movss %xmm0, (%r14,%rax)
movl $0x00000000, 0(%r13,%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L21
.L20:
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl 12(%rsp), %eax
leal 31(%rax), %ebx
testl %eax, %eax
cmovns %eax, %ebx
sarl $5, %ebx
leal 0(,%rbx,4), %r12d
movslq %r12d, %r12
leaq 32(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L22:
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L23:
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L24:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
cmpl $0, 12(%rsp)
jle .L25
movl $0, %ebp
leaq _ZSt4cout(%rip), %r15
jmp .L30
.L35:
movq 32(%rsp), %rcx
movl 12(%rsp), %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
jmp .L22
.L36:
movq 32(%rsp), %rdi
movq 40(%rsp), %rcx
movl 12(%rsp), %edx
movq %rdi, %rsi
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
jmp .L23
.L37:
movl 12(%rsp), %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
jmp .L24
.L39:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L38
call _ZSt16__throw_bad_castv@PLT
.L38:
call __stack_chk_fail@PLT
.L28:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
.L29:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, %rbp
cmpl %ebp, 12(%rsp)
jle .L25
.L30:
movl %ebp, %esi
movq %r15, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movl $2, %edx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%r13,%rbp,4), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r12
testq %r12, %r12
je .L39
cmpb $0, 56(%r12)
je .L28
movzbl 67(%r12), %esi
jmp .L29
.L25:
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L40
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z23Add_scalar_to_subvectorPfS_i"
.align 8
.LC4:
.string "_Z26Prefix_sum_oneblock_kernelPfS_iS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z23Add_scalar_to_subvectorPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z26Prefix_sum_oneblock_kernelPfS_iS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
cudaMalloc(&X, size); // allocate GPU memory
cudaMalloc(&Y, size);
cudaMemcpy(X, X_h, size, cudaMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
cudaMalloc(&S, size_s);
cudaMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
cudaMemcpy(Y_h, Y, size, cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
cudaFree(X); cudaFree(Y);
free(X_h); free(Y_h);
} | #include <hip/hip_runtime.h>
#include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
hipMalloc(&X, size); // allocate GPU memory
hipMalloc(&Y, size);
hipMemcpy(X, X_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
hipMalloc(&S, size_s);
hipMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
hipMemcpy(Y_h, Y, size, hipMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
hipFree(X); hipFree(Y);
free(X_h); free(Y_h);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
hipMalloc(&X, size); // allocate GPU memory
hipMalloc(&Y, size);
hipMemcpy(X, X_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
hipMalloc(&S, size_s);
hipMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
hipMemcpy(Y_h, Y, size, hipMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
hipFree(X); hipFree(Y);
free(X_h); free(Y_h);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26Prefix_sum_oneblock_kernelPfS_iS_
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.p2align 8
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_,@function
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_mov_b32 s3, 0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB0_3
s_mov_b32 s5, 1
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1)
v_subrev_nc_u32_e32 v4, s5, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_lshl_b32 s5, s5, 1
v_lshlrev_b32_e32 v4, 2, v4
v_cmp_gt_u32_e32 vcc_lo, s5, v0
ds_load_b32 v4, v4
ds_load_b32 v5, v3
s_or_b32 s3, vcc_lo, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v4, v4, v5
ds_store_b32 v3, v4
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_5
s_load_b64 s[4:5], s[0:1], 0x8
ds_load_b32 v3, v3
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_store_b32 v[1:2], v3, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
s_mov_b32 s3, 0
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x18
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0 offset:124
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26Prefix_sum_oneblock_kernelPfS_iS_
.amdhsa_group_segment_fixed_size 128
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, .Lfunc_end0-_Z26Prefix_sum_oneblock_kernelPfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23Add_scalar_to_subvectorPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.p2align 8
.type _Z23Add_scalar_to_subvectorPfS_i,@function
_Z23Add_scalar_to_subvectorPfS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_add_i32 s5, s15, 1
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s5, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_lshl_b64 s[0:1], s[2:3], 2
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_add_u32 s0, s6, s0
s_addc_u32 s1, s7, s1
s_load_b32 s0, s[0:1], 0x0
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v2, s0, v2
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23Add_scalar_to_subvectorPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z23Add_scalar_to_subvectorPfS_i, .Lfunc_end1-_Z23Add_scalar_to_subvectorPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 128
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26Prefix_sum_oneblock_kernelPfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z26Prefix_sum_oneblock_kernelPfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23Add_scalar_to_subvectorPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23Add_scalar_to_subvectorPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include<iostream>
#define SECTION_SIZE 32
using namespace std;
__global__ void Prefix_sum_oneblock_kernel(float *X, float *Y, int InputSize, float *S) {
__shared__ float XY[SECTION_SIZE];
int i = blockIdx.x*blockDim.x+ threadIdx.x;
XY[threadIdx.x] = X[i];
/*if (i < InputSize && threadIdx.x != 0) {
XY[threadIdx.x] = X[i-1];
}else{
XY[threadIdx.x] = 0;
}*/
// the code below performs iterative scan on XY
for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) {
__syncthreads();
XY[threadIdx.x] += XY[threadIdx.x-stride];
}
if(i<InputSize) Y[i] = XY[threadIdx.x];
//Y[i] = XY[threadIdx.x];
__syncthreads();
if(threadIdx.x == 0) S[blockIdx.x] = XY[SECTION_SIZE-1]; // get the last element in each section
}
__global__ void Add_scalar_to_subvector(float *Y, float *S, int InputSize){
int i = (blockIdx.x+1)*blockDim.x+ threadIdx.x;
if(i<InputSize) Y[i] += S[blockIdx.x];
//Y[i] += S[blockIdx.x];
}
int main(){
int n;
cin >> n;
int size = n*sizeof(float);
//float *A, *B, *C;
float *X_h = (float *) malloc( size ); // allocate CPU memory
float *Y_h = (float *) malloc( size );
for(int i=0; i<n; i++){ X_h[i] = 1; Y_h[i]=0; }
float *X, *Y, *S, *S1;
hipMalloc(&X, size); // allocate GPU memory
hipMalloc(&Y, size);
hipMemcpy(X, X_h, size, hipMemcpyHostToDevice);
int BLOCK_SIZE = SECTION_SIZE;
int GRID_SIZE=ceil(n/BLOCK_SIZE);
int size_s = GRID_SIZE*sizeof(float);
hipMalloc(&S, size_s);
hipMalloc(&S1, size_s);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (X, Y, n, S);
Prefix_sum_oneblock_kernel<<<GRID_SIZE,BLOCK_SIZE>>> (S, S, n, S1);
Add_scalar_to_subvector<<<GRID_SIZE,BLOCK_SIZE>>> (Y, S, n);
hipMemcpy(Y_h, Y, size, hipMemcpyDeviceToHost);
for(int i=0; i<n; i++){
cout<<i<<" "<<Y_h[i]<<endl;
}
hipFree(X); hipFree(Y);
free(X_h); free(Y_h);
} | .text
.file "prefix_sum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_ # -- Begin function _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.p2align 4, 0x90
.type _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_,@function
_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_: # @_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_, .Lfunc_end0-_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.cfi_endproc
# -- End function
.globl _Z38__device_stub__Add_scalar_to_subvectorPfS_i # -- Begin function _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.p2align 4, 0x90
.type _Z38__device_stub__Add_scalar_to_subvectorPfS_i,@function
_Z38__device_stub__Add_scalar_to_subvectorPfS_i: # @_Z38__device_stub__Add_scalar_to_subvectorPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23Add_scalar_to_subvectorPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__Add_scalar_to_subvectorPfS_i, .Lfunc_end1-_Z38__device_stub__Add_scalar_to_subvectorPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 8(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movl 8(%rsp), %r12d
leal (,%r12,4), %eax
movslq %eax, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
movq %r15, %rdi
callq malloc
movq %rax, %r14
testl %r12d, %r12d
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
leaq (,%r12,4), %rdx
xorl %r13d, %r13d
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%r13,4) # imm = 0x3F800000
incq %r13
cmpq %r13, %r12
jne .LBB2_2
.LBB2_3: # %._crit_edge
movabsq $4294967328, %r12 # imm = 0x100000020
leaq 104(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 88(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 104(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %eax
leal 31(%rax), %ebp
testl %eax, %eax
cmovnsl %eax, %ebp
sarl $5, %ebp
leal (,%rbp,4), %eax
movslq %eax, %r13
leaq 96(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq 144(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq (%r12,%rbp), %r13
addq $-32, %r13
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq 104(%rsp), %rax
movq 88(%rsp), %rcx
movl 8(%rsp), %edx
movq 96(%rsp), %rsi
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %edx, 12(%rsp)
movq %rsi, 32(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_5:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 96(%rsp), %rax
movl 8(%rsp), %ecx
movq 144(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rax, 72(%rsp)
movl %ecx, 12(%rsp)
movq %rdx, 32(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 88(%rsp), %rax
movq 96(%rsp), %rcx
movl 8(%rsp), %edx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %edx, 16(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z23Add_scalar_to_subvectorPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
movq 88(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
cmpl $0, 8(%rsp)
jle .LBB2_16
# %bb.10: # %.lr.ph74.preheader
xorl %r15d, %r15d
jmp .LBB2_11
.p2align 4, 0x90
.LBB2_14: # in Loop: Header=BB2_11 Depth=1
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB2_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_11 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jge .LBB2_16
.LBB2_11: # %.lr.ph74
# =>This Inner Loop Header: Depth=1
movl $_ZSt4cout, %edi
movl %r15d, %esi
callq _ZNSolsEi
movq %rax, %r12
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB2_17
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_11 Depth=1
cmpb $0, 56(%r12)
je .LBB2_14
# %bb.13: # in Loop: Header=BB2_11 Depth=1
movzbl 67(%r12), %ecx
jmp .LBB2_15
.LBB2_16: # %._crit_edge75
movq 104(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23Add_scalar_to_subvectorPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_,@object # @_Z26Prefix_sum_oneblock_kernelPfS_iS_
.section .rodata,"a",@progbits
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.p2align 3, 0x0
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
.quad _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, 8
.type _Z23Add_scalar_to_subvectorPfS_i,@object # @_Z23Add_scalar_to_subvectorPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.p2align 3, 0x0
_Z23Add_scalar_to_subvectorPfS_i:
.quad _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.size _Z23Add_scalar_to_subvectorPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z26Prefix_sum_oneblock_kernelPfS_iS_"
.size .L__unnamed_1, 38
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23Add_scalar_to_subvectorPfS_i"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.addrsig_sym _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z26Prefix_sum_oneblock_kernelPfS_iS_
.addrsig_sym _Z23Add_scalar_to_subvectorPfS_i
.addrsig_sym _ZSt3cin
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z23Add_scalar_to_subvectorPfS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e620000002100 */
/*0030*/ IADD3 R0, R2, 0x1, RZ ; /* 0x0000000102007810 */
/* 0x001fca0007ffe0ff */
/*0040*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x002fca00078e0203 */
/*0050*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fda0003f06270 */
/*0060*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0070*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0080*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0090*/ IMAD.WIDE.U32 R2, R2, R5, c[0x0][0x168] ; /* 0x00005a0002027625 */
/* 0x000fc800078e0005 */
/*00a0*/ IMAD.WIDE R4, R0, R5, c[0x0][0x160] ; /* 0x0000580000047625 */
/* 0x000fe400078e0205 */
/*00b0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00c0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ FADD R7, R0, R3 ; /* 0x0000000300077221 */
/* 0x004fca0000000000 */
/*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z26Prefix_sum_oneblock_kernelPfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000e220000002100 */
/*0020*/ IMAD.MOV.U32 R11, RZ, RZ, 0x4 ; /* 0x00000004ff0b7424 */
/* 0x000fe200078e00ff */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe40000000a00 */
/*0040*/ S2R R6, SR_CTAID.X ; /* 0x0000000000067919 */
/* 0x000e240000002500 */
/*0050*/ IMAD R0, R6, c[0x0][0x0], R7 ; /* 0x0000000006007a24 */
/* 0x001fc800078e0207 */
/*0060*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fca00078e020b */
/*0070*/ LDG.E R5, [R2.64] ; /* 0x0000000402057981 */
/* 0x000ea2000c1e1900 */
/*0080*/ ISETP.NE.AND P0, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fe40003f05270 */
/*0090*/ SHF.R.S32.HI R9, RZ, 0x1f, R0 ; /* 0x0000001fff097819 */
/* 0x000fe20000011400 */
/*00a0*/ STS [R7.X4], R5 ; /* 0x0000000507007388 */
/* 0x0041f40000004800 */
/*00b0*/ @!P0 BRA 0x170 ; /* 0x000000b000008947 */
/* 0x000fea0003800000 */
/*00c0*/ HFMA2.MMA R2, -RZ, RZ, 0, 5.9604644775390625e-08 ; /* 0x00000001ff027435 */
/* 0x000fd000000001ff */
/*00d0*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*00e0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*00f0*/ IMAD.IADD R3, R7, 0x1, -R2 ; /* 0x0000000107037824 */
/* 0x000fe200078e0a02 */
/*0100*/ SHF.L.U32 R2, R2, 0x1, RZ ; /* 0x0000000102027819 */
/* 0x000fc800000006ff */
/*0110*/ ISETP.GE.U32.AND P0, PT, R7, R2, PT ; /* 0x000000020700720c */
/* 0x000fe20003f06070 */
/*0120*/ LDS R3, [R3.X4] ; /* 0x0000000003037984 */
/* 0x000fe80000004800 */
/*0130*/ LDS R4, [R7.X4] ; /* 0x0000000007047984 */
/* 0x000e640000004800 */
/*0140*/ FADD R5, R4, R3 ; /* 0x0000000304057221 */
/* 0x003fca0000000000 */
/*0150*/ STS [R7.X4], R5 ; /* 0x0000000507007388 */
/* 0x0001e20000004800 */
/*0160*/ @P0 BRA 0xd0 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0170*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x170], PT ; /* 0x00005c0000007a0c */
/* 0x000fe20003f06270 */
/*0180*/ WARPSYNC 0xffffffff ; /* 0xffffffff00007948 */
/* 0x000fe20003800000 */
/*0190*/ ISETP.NE.AND P1, PT, R7, RZ, PT ; /* 0x000000ff0700720c */
/* 0x000fd60003f25270 */
/*01a0*/ @!P0 LEA R2, P2, R0, c[0x0][0x168], 0x2 ; /* 0x00005a0000028a11 */
/* 0x000fc800078410ff */
/*01b0*/ @!P0 LEA.HI.X R3, R0, c[0x0][0x16c], R9, 0x2, P2 ; /* 0x00005b0000038a11 */
/* 0x000fca00010f1409 */
/*01c0*/ @!P0 STG.E [R2.64], R5 ; /* 0x0000000502008986 */
/* 0x0003e8000c101904 */
/*01d0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*01e0*/ @P1 EXIT ; /* 0x000000000000194d */
/* 0x000fea0003800000 */
/*01f0*/ LDS R5, [0x7c] ; /* 0x00007c00ff057984 */
/* 0x003e220000000800 */
/*0200*/ IMAD.WIDE.U32 R2, R6, R11, c[0x0][0x178] ; /* 0x00005e0006027625 */
/* 0x000fca00078e000b */
/*0210*/ STG.E [R2.64], R5 ; /* 0x0000000502007986 */
/* 0x001fe2000c101904 */
/*0220*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0230*/ BRA 0x230; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z26Prefix_sum_oneblock_kernelPfS_iS_
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.p2align 8
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_,@function
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x2c
s_load_b64 s[4:5], s[0:1], 0x0
s_mov_b32 s2, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s2, s3, v[0:1]
s_mov_b32 s3, 0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[1:2]
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
s_mov_b32 s4, exec_lo
global_load_b32 v4, v[3:4], off
v_lshlrev_b32_e32 v3, 2, v0
s_waitcnt vmcnt(0)
ds_store_b32 v3, v4
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB0_3
s_mov_b32 s5, 1
.LBB0_2:
s_delay_alu instid0(SALU_CYCLE_1)
v_subrev_nc_u32_e32 v4, s5, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_lshl_b32 s5, s5, 1
v_lshlrev_b32_e32 v4, 2, v4
v_cmp_gt_u32_e32 vcc_lo, s5, v0
ds_load_b32 v4, v4
ds_load_b32 v5, v3
s_or_b32 s3, vcc_lo, s3
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v4, v4, v5
ds_store_b32 v3, v4
s_and_not1_b32 exec_lo, exec_lo, s3
s_cbranch_execnz .LBB0_2
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s4
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
v_cmp_gt_i32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_5
s_load_b64 s[4:5], s[0:1], 0x8
ds_load_b32 v3, v3
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v1, vcc_lo, s4, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_store_b32 v[1:2], v3, off
.LBB0_5:
s_or_b32 exec_lo, exec_lo, s3
s_mov_b32 s3, 0
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_mov_b32 s4, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_7
v_mov_b32_e32 v0, 0
s_load_b64 s[0:1], s[0:1], 0x18
s_lshl_b64 s[2:3], s[2:3], 2
ds_load_b32 v1, v0 offset:124
s_waitcnt lgkmcnt(0)
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
global_store_b32 v0, v1, s[0:1]
.LBB0_7:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z26Prefix_sum_oneblock_kernelPfS_iS_
.amdhsa_group_segment_fixed_size 128
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, .Lfunc_end0-_Z26Prefix_sum_oneblock_kernelPfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z23Add_scalar_to_subvectorPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.p2align 8
.type _Z23Add_scalar_to_subvectorPfS_i,@function
_Z23Add_scalar_to_subvectorPfS_i:
s_clause 0x1
s_load_b32 s3, s[0:1], 0x24
s_load_b32 s4, s[0:1], 0x10
s_add_i32 s5, s15, 1
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s3, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s5, s3, v[0:1]
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e64 s4, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s2, s15
s_mov_b32 s3, 0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_lshl_b64 s[0:1], s[2:3], 2
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_add_u32 s0, s6, s0
s_addc_u32 s1, s7, s1
s_load_b32 s0, s[0:1], 0x0
global_load_b32 v2, v[0:1], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_add_f32_e32 v2, s0, v2
global_store_b32 v[0:1], v2, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z23Add_scalar_to_subvectorPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z23Add_scalar_to_subvectorPfS_i, .Lfunc_end1-_Z23Add_scalar_to_subvectorPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 128
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z26Prefix_sum_oneblock_kernelPfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z26Prefix_sum_oneblock_kernelPfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z23Add_scalar_to_subvectorPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z23Add_scalar_to_subvectorPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0019988f_00000000-6_prefix_sum.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3672:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3672:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
.type _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_, @function
_Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_:
.LFB3694:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z26Prefix_sum_oneblock_kernelPfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3694:
.size _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_, .-_Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_, @function
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
.LFB3695:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3695:
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, .-_Z26Prefix_sum_oneblock_kernelPfS_iS_
.globl _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
.type _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i, @function
_Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i:
.LFB3696:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L15
.L11:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L16
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z23Add_scalar_to_subvectorPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L11
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i, .-_Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.type _Z23Add_scalar_to_subvectorPfS_i, @function
_Z23Add_scalar_to_subvectorPfS_i:
.LFB3697:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z23Add_scalar_to_subvectorPfS_i, .-_Z23Add_scalar_to_subvectorPfS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string " "
.text
.globl main
.type main, @function
main:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $88, %rsp
.cfi_def_cfa_offset 144
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
leaq 12(%rsp), %rsi
leaq _ZSt3cin(%rip), %rdi
call _ZNSirsERi@PLT
movl 12(%rsp), %r12d
leal 0(,%r12,4), %ebp
movslq %ebp, %rbp
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r14
movq %rbp, %rdi
call malloc@PLT
movq %rax, %r13
testl %r12d, %r12d
jle .L20
movslq %r12d, %rbx
leaq 0(,%rbx,4), %rdx
movl $0, %eax
movss .LC0(%rip), %xmm0
.L21:
movss %xmm0, (%r14,%rax)
movl $0x00000000, 0(%r13,%rax)
addq $4, %rax
cmpq %rax, %rdx
jne .L21
.L20:
leaq 16(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl 12(%rsp), %eax
leal 31(%rax), %ebx
testl %eax, %eax
cmovns %eax, %ebx
sarl $5, %ebx
leal 0(,%rbx,4), %r12d
movslq %r12d, %r12
leaq 32(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movq %r12, %rsi
call cudaMalloc@PLT
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L35
.L22:
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L23:
movl $32, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl %ebx, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L24:
movl $2, %ecx
movq %rbp, %rdx
movq 24(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
cmpl $0, 12(%rsp)
jle .L25
movl $0, %ebp
leaq _ZSt4cout(%rip), %r15
jmp .L30
.L35:
movq 32(%rsp), %rcx
movl 12(%rsp), %edx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
jmp .L22
.L36:
movq 32(%rsp), %rdi
movq 40(%rsp), %rcx
movl 12(%rsp), %edx
movq %rdi, %rsi
call _Z51__device_stub__Z26Prefix_sum_oneblock_kernelPfS_iS_PfS_iS_
jmp .L23
.L37:
movl 12(%rsp), %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z46__device_stub__Z23Add_scalar_to_subvectorPfS_iPfS_i
jmp .L24
.L39:
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L38
call _ZSt16__throw_bad_castv@PLT
.L38:
call __stack_chk_fail@PLT
.L28:
movq %r12, %rdi
call _ZNKSt5ctypeIcE13_M_widen_initEv@PLT
movq (%r12), %rax
movl $10, %esi
movq %r12, %rdi
call *48(%rax)
movl %eax, %esi
.L29:
movsbl %sil, %esi
movq %rbx, %rdi
call _ZNSo3putEc@PLT
movq %rax, %rdi
call _ZNSo5flushEv@PLT
addq $1, %rbp
cmpl %ebp, 12(%rsp)
jle .L25
.L30:
movl %ebp, %esi
movq %r15, %rdi
call _ZNSolsEi@PLT
movq %rax, %rbx
movl $2, %edx
leaq .LC2(%rip), %rsi
movq %rax, %rdi
call _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@PLT
pxor %xmm0, %xmm0
cvtss2sd 0(%r13,%rbp,4), %xmm0
movq %rbx, %rdi
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rbx
movq (%rax), %rax
movq -24(%rax), %rax
movq 240(%rbx,%rax), %r12
testq %r12, %r12
je .L39
cmpb $0, 56(%r12)
je .L28
movzbl 67(%r12), %esi
jmp .L29
.L25:
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq %r14, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L40
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L40:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3669:
.size main, .-main
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC3:
.string "_Z23Add_scalar_to_subvectorPfS_i"
.align 8
.LC4:
.string "_Z26Prefix_sum_oneblock_kernelPfS_iS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z23Add_scalar_to_subvectorPfS_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z26Prefix_sum_oneblock_kernelPfS_iS_(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC0:
.long 1065353216
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "prefix_sum.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_ # -- Begin function _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.p2align 4, 0x90
.type _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_,@function
_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_: # @_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_, .Lfunc_end0-_Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.cfi_endproc
# -- End function
.globl _Z38__device_stub__Add_scalar_to_subvectorPfS_i # -- Begin function _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.p2align 4, 0x90
.type _Z38__device_stub__Add_scalar_to_subvectorPfS_i,@function
_Z38__device_stub__Add_scalar_to_subvectorPfS_i: # @_Z38__device_stub__Add_scalar_to_subvectorPfS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z23Add_scalar_to_subvectorPfS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z38__device_stub__Add_scalar_to_subvectorPfS_i, .Lfunc_end1-_Z38__device_stub__Add_scalar_to_subvectorPfS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $152, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 8(%rsp), %rsi
movl $_ZSt3cin, %edi
callq _ZNSirsERi
movl 8(%rsp), %r12d
leal (,%r12,4), %eax
movslq %eax, %r15
movq %r15, %rdi
callq malloc
movq %rax, %rbx
movq %r15, %rdi
callq malloc
movq %rax, %r14
testl %r12d, %r12d
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
leaq (,%r12,4), %rdx
xorl %r13d, %r13d
movq %r14, %rdi
xorl %esi, %esi
callq memset@PLT
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movl $1065353216, (%rbx,%r13,4) # imm = 0x3F800000
incq %r13
cmpq %r13, %r12
jne .LBB2_2
.LBB2_3: # %._crit_edge
movabsq $4294967328, %r12 # imm = 0x100000020
leaq 104(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
leaq 88(%rsp), %rdi
movq %r15, %rsi
callq hipMalloc
movq 104(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
movl $1, %ecx
callq hipMemcpy
movl 8(%rsp), %eax
leal 31(%rax), %ebp
testl %eax, %eax
cmovnsl %eax, %ebp
sarl $5, %ebp
leal (,%rbp,4), %eax
movslq %eax, %r13
leaq 96(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq 144(%rsp), %rdi
movq %r13, %rsi
callq hipMalloc
leaq (%r12,%rbp), %r13
addq $-32, %r13
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_5
# %bb.4:
movq 104(%rsp), %rax
movq 88(%rsp), %rcx
movl 8(%rsp), %edx
movq 96(%rsp), %rsi
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %edx, 12(%rsp)
movq %rsi, 32(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_5:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_7
# %bb.6:
movq 96(%rsp), %rax
movl 8(%rsp), %ecx
movq 144(%rsp), %rdx
movq %rax, 80(%rsp)
movq %rax, 72(%rsp)
movl %ecx, 12(%rsp)
movq %rdx, 32(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 32(%rsp), %rax
movq %rax, 136(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_7:
movq %r13, %rdi
movl $1, %esi
movq %r12, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_9
# %bb.8:
movq 88(%rsp), %rax
movq 96(%rsp), %rcx
movl 8(%rsp), %edx
movq %rax, 80(%rsp)
movq %rcx, 72(%rsp)
movl %edx, 16(%rsp)
leaq 80(%rsp), %rax
movq %rax, 112(%rsp)
leaq 72(%rsp), %rax
movq %rax, 120(%rsp)
leaq 16(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rdi
leaq 40(%rsp), %rsi
leaq 32(%rsp), %rdx
leaq 24(%rsp), %rcx
callq __hipPopCallConfiguration
movq 56(%rsp), %rsi
movl 64(%rsp), %edx
movq 40(%rsp), %rcx
movl 48(%rsp), %r8d
leaq 112(%rsp), %r9
movl $_Z23Add_scalar_to_subvectorPfS_i, %edi
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
pushq 40(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_9:
movq 88(%rsp), %rsi
movq %r14, %rdi
movq %r15, %rdx
movl $2, %ecx
callq hipMemcpy
cmpl $0, 8(%rsp)
jle .LBB2_16
# %bb.10: # %.lr.ph74.preheader
xorl %r15d, %r15d
jmp .LBB2_11
.p2align 4, 0x90
.LBB2_14: # in Loop: Header=BB2_11 Depth=1
movq %r12, %rdi
movq %rax, %r13
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
movl %eax, %ecx
movq %r13, %rax
.LBB2_15: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
# in Loop: Header=BB2_11 Depth=1
movsbl %cl, %esi
movq %rax, %rdi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
incq %r15
movslq 8(%rsp), %rax
cmpq %rax, %r15
jge .LBB2_16
.LBB2_11: # %.lr.ph74
# =>This Inner Loop Header: Depth=1
movl $_ZSt4cout, %edi
movl %r15d, %esi
callq _ZNSolsEi
movq %rax, %r12
movl $.L.str, %esi
movl $2, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss (%r14,%r15,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movq %r12, %rdi
callq _ZNSo9_M_insertIdEERSoT_
movq (%rax), %rcx
movq -24(%rcx), %rcx
movq 240(%rax,%rcx), %r12
testq %r12, %r12
je .LBB2_17
# %bb.12: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
# in Loop: Header=BB2_11 Depth=1
cmpb $0, 56(%r12)
je .LBB2_14
# %bb.13: # in Loop: Header=BB2_11 Depth=1
movzbl 67(%r12), %ecx
jmp .LBB2_15
.LBB2_16: # %._crit_edge75
movq 104(%rsp), %rdi
callq hipFree
movq 88(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB2_17:
.cfi_def_cfa_offset 208
callq _ZSt16__throw_bad_castv
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z26Prefix_sum_oneblock_kernelPfS_iS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z23Add_scalar_to_subvectorPfS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z26Prefix_sum_oneblock_kernelPfS_iS_,@object # @_Z26Prefix_sum_oneblock_kernelPfS_iS_
.section .rodata,"a",@progbits
.globl _Z26Prefix_sum_oneblock_kernelPfS_iS_
.p2align 3, 0x0
_Z26Prefix_sum_oneblock_kernelPfS_iS_:
.quad _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.size _Z26Prefix_sum_oneblock_kernelPfS_iS_, 8
.type _Z23Add_scalar_to_subvectorPfS_i,@object # @_Z23Add_scalar_to_subvectorPfS_i
.globl _Z23Add_scalar_to_subvectorPfS_i
.p2align 3, 0x0
_Z23Add_scalar_to_subvectorPfS_i:
.quad _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.size _Z23Add_scalar_to_subvectorPfS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz " "
.size .L.str, 3
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z26Prefix_sum_oneblock_kernelPfS_iS_"
.size .L__unnamed_1, 38
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z23Add_scalar_to_subvectorPfS_i"
.size .L__unnamed_2, 33
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z41__device_stub__Prefix_sum_oneblock_kernelPfS_iS_
.addrsig_sym _Z38__device_stub__Add_scalar_to_subvectorPfS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z26Prefix_sum_oneblock_kernelPfS_iS_
.addrsig_sym _Z23Add_scalar_to_subvectorPfS_i
.addrsig_sym _ZSt3cin
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} | code for sm_80
Function : _Z18MatMulKernel_naive6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e220000002200 */
/*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e680000002500 */
/*0080*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0090*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x001fe400078e0203 */
/*00a0*/ IMAD R3, R2, c[0x0][0x0], R5 ; /* 0x0000000002037a24 */
/* 0x002fc600078e0205 */
/*00b0*/ @!P0 BRA 0xc10 ; /* 0x00000b5000008947 */
/* 0x000fea0003800000 */
/*00c0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */
/* 0x040fe40007ffe0ff */
/*00d0*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fe400078ec0ff */
/*00e0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*00f0*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fe40000000f00 */
/*0100*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fd20000000f00 */
/*0110*/ @!P0 BRA 0xb00 ; /* 0x000009e000008947 */
/* 0x000fea0003800000 */
/*0120*/ IADD3 R5, -R4, c[0x0][0x160], RZ ; /* 0x0000580004057a10 */
/* 0x000fe20007ffe1ff */
/*0130*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0140*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe20000000a00 */
/*0150*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe200000001ff */
/*0160*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f04270 */
/*0170*/ IMAD R6, R0, c[0x0][0x160], RZ ; /* 0x0000580000067a24 */
/* 0x000fe200078e02ff */
/*0180*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fca0000000f00 */
/*0190*/ IMAD.WIDE R8, R3, R8, c[0x0][0x178] ; /* 0x00005e0003087625 */
/* 0x000fcc00078e0208 */
/*01a0*/ @!P0 BRA 0x960 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01d0*/ @!P1 BRA 0x690 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01f0*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0200*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0000a2000c1e1900 */
/*0210*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0220*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */
/* 0x000fca00078e020c */
/*0230*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea2000c1e1900 */
/*0240*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fc60000000f00 */
/*0250*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */
/* 0x000ee4000c1e1900 */
/*0260*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x040fe400078e0208 */
/*0270*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */
/* 0x000f28000c1e1900 */
/*0280*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x0002e2000c1e1900 */
/*0290*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */
/* 0x000fc600078e020a */
/*02a0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */
/* 0x000f66000c1e1900 */
/*02b0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */
/* 0x040fe200078e0216 */
/*02c0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000328000c1e1900 */
/*02d0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */
/* 0x000362000c1e1900 */
/*02e0*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */
/* 0x000fc600078e021c */
/*02f0*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */
/* 0x001f68000c1e1900 */
/*0300*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */
/* 0x000168000c1e1900 */
/*0310*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */
/* 0x002f68000c1e1900 */
/*0320*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */
/* 0x000f62000c1e1900 */
/*0330*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */
/* 0x001fca00078e0210 */
/*0340*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000562000c1e1900 */
/*0350*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */
/* 0x000fc800078e0210 */
/*0360*/ FFMA R16, R21, R20, R24 ; /* 0x0000001415107223 */
/* 0x004fe40000000018 */
/*0370*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc600078e0216 */
/*0390*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03a0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0002a2000c1e1900 */
/*03b0*/ FFMA R16, R15, R14, R16 ; /* 0x0000000e0f107223 */
/* 0x008fe40000000010 */
/*03c0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */
/* 0x040fe200078e0218 */
/*03d0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */
/* 0x001ee6000c1e1900 */
/*03e0*/ FFMA R26, R26, R27, R16 ; /* 0x0000001b1a1a7223 */
/* 0x010fe20000000010 */
/*03f0*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */
/* 0x002f22000c1e1900 */
/*0400*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */
/* 0x000fc600078e020e */
/*0410*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0000e2000c1e1900 */
/*0420*/ FFMA R26, R19, R18, R26 ; /* 0x00000012131a7223 */
/* 0x020fe4000000001a */
/*0430*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */
/* 0x000fe200078e0210 */
/*0440*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */
/* 0x000f66000c1e1900 */
/*0450*/ FFMA R26, R9, R8, R26 ; /* 0x00000008091a7223 */
/* 0x000fe2000000001a */
/*0460*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000322000c1e1900 */
/*0470*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */
/* 0x000fc600078e0212 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000368000c1e1900 */
/*0490*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */
/* 0x000568000c1e1900 */
/*04a0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x001f62000c1e1900 */
/*04b0*/ FFMA R26, R11, R10, R26 ; /* 0x0000000a0b1a7223 */
/* 0x000fe4000000001a */
/*04c0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x000fe200078e0208 */
/*04d0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */
/* 0x002f66000c1e1900 */
/*04e0*/ FFMA R26, R21, R20, R26 ; /* 0x00000014151a7223 */
/* 0x004fc4000000001a */
/*04f0*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */
/* 0x000fe400078e020a */
/*0500*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0000a4000c1e1900 */
/*0510*/ FFMA R28, R29, R28, R26 ; /* 0x0000001c1d1c7223 */
/* 0x000fe4000000001a */
/*0520*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */
/* 0x040fe200078e0214 */
/*0530*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */
/* 0x000ea8000c1e1900 */
/*0540*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x0002a2000c1e1900 */
/*0550*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */
/* 0x000fc600078e021a */
/*0560*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0006a8000c1e1900 */
/*0570*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0010a8000c1e1900 */
/*0580*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */
/* 0x002ea8000c1e1900 */
/*0590*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */
/* 0x008ee2000c1e1900 */
/*05a0*/ FFMA R14, R14, R23, R28 ; /* 0x000000170e0e7223 */
/* 0x000fc8000000001c */
/*05b0*/ FFMA R25, R16, R25, R14 ; /* 0x0000001910197223 */
/* 0x010fe2000000000e */
/*05c0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */
/* 0x000fc60007ffe0ff */
/*05d0*/ FFMA R18, R18, R22, R25 ; /* 0x0000001612127223 */
/* 0x020fe20000000019 */
/*05e0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fc60003f24270 */
/*05f0*/ FFMA R15, R24, R15, R18 ; /* 0x0000000f180f7223 */
/* 0x000fe20000000012 */
/*0600*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0610*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */
/* 0x001fc600078e0208 */
/*0620*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0630*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fe20007ffe0ff */
/*0640*/ FFMA R10, R10, R17, R15 ; /* 0x000000110a0a7223 */
/* 0x004fc8000000000f */
/*0650*/ FFMA R10, R20, R29, R10 ; /* 0x0000001d140a7223 */
/* 0x000fc8000000000a */
/*0660*/ FFMA R10, R19, R21, R10 ; /* 0x00000015130a7223 */
/* 0x000fc8000000000a */
/*0670*/ FFMA R24, R11, R26, R10 ; /* 0x0000001a0b187223 */
/* 0x008fe2000000000a */
/*0680*/ @P1 BRA 0x1f0 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*0690*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */
/* 0x000fda0003f24270 */
/*06a0*/ @!P1 BRA 0x940 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*06b0*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*06c0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */
/* 0x0000a2000c1e1900 */
/*06d0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*06e0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*06f0*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x000fc800078e0208 */
/*0700*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*0710*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x040fe200078e0210 */
/*0720*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */
/* 0x000ea8000c1e1900 */
/*0730*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0002e2000c1e1900 */
/*0740*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0750*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0760*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */
/* 0x040fe200078e020e */
/*0770*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000968000c1e1900 */
/*0780*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f62000c1e1900 */
/*0790*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */
/* 0x000fc600078e0212 */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */
/* 0x001fc600078e0214 */
/*07d0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000168000c1e1900 */
/*07e0*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */
/* 0x000f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */
/* 0x010fc600078e0208 */
/*0800*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000968000c1e1900 */
/*0810*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */
/* 0x002f68000c1e1900 */
/*0820*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000368000c1e1900 */
/*0830*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */
/* 0x010f28000c1e1900 */
/*0840*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x001f28000c1e1900 */
/*0850*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */
/* 0x002f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ FFMA R22, R23, R22, R24 ; /* 0x0000001617167223 */
/* 0x004fc80000000018 */
/*08c0*/ FFMA R16, R16, R25, R22 ; /* 0x0000001910107223 */
/* 0x008fc80000000016 */
/*08d0*/ FFMA R16, R26, R27, R16 ; /* 0x0000001b1a107223 */
/* 0x020fc80000000010 */
/*08e0*/ FFMA R29, R14, R29, R16 ; /* 0x0000001d0e1d7223 */
/* 0x000fc80000000010 */
/*08f0*/ FFMA R18, R18, R28, R29 ; /* 0x0000001c12127223 */
/* 0x000fc8000000001d */
/*0900*/ FFMA R15, R20, R15, R18 ; /* 0x0000000f140f7223 */
/* 0x000fc80000000012 */
/*0910*/ FFMA R24, R17, R8, R15 ; /* 0x0000000811187223 */
/* 0x010fe4000000000f */
/*0920*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */
/* 0x000fc800078e020c */
/*0930*/ FFMA R24, R19, R21, R24 ; /* 0x0000001513187223 */
/* 0x000fe40000000018 */
/*0940*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xb00 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0970*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe40008000f00 */
/*0980*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fc60000000f00 */
/*0990*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*09a0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x040fe200078e0208 */
/*09b0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1900 */
/*09c0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ea2000c1e1900 */
/*09d0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x000fc600078e0210 */
/*09e0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ee8000c1e1900 */
/*09f0*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */
/* 0x000ee2000c1e1900 */
/*0a00*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0a10*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */
/* 0x000f28000c1e1900 */
/*0a30*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */
/* 0x000f68000c1e1900 */
/*0a40*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x000f62000c1e1900 */
/*0a50*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0a60*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0a70*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a80*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */
/* 0x000fc60007ffe0ff */
/*0a90*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0aa0*/ FFMA R18, R9, R18, R24 ; /* 0x0000001209127223 */
/* 0x004fc80000000018 */
/*0ab0*/ FFMA R18, R17, R19, R18 ; /* 0x0000001311127223 */
/* 0x008fe40000000012 */
/*0ac0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */
/* 0x000fc800078e020e */
/*0ad0*/ FFMA R18, R21, R20, R18 ; /* 0x0000001415127223 */
/* 0x010fc80000000012 */
/*0ae0*/ FFMA R24, R23, R22, R18 ; /* 0x0000001617187223 */
/* 0x020fe20000000012 */
/*0af0*/ @P0 BRA 0x960 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0b00*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0b10*/ @!P0 BRA 0xc10 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b20*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b30*/ IMAD R6, R0, c[0x0][0x160], R2 ; /* 0x0000580000067a24 */
/* 0x000fe400078e0202 */
/*0b40*/ IMAD R2, R2, c[0x0][0x170], R3 ; /* 0x00005c0002027a24 */
/* 0x000fce00078e0203 */
/*0b50*/ IMAD.WIDE R6, R6, R9, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e0209 */
/*0b60*/ IMAD.WIDE R8, R2, R9, c[0x0][0x178] ; /* 0x00005e0002087625 */
/* 0x000fca00078e0209 */
/*0b70*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x0000a8000c1e1900 */
/*0b80*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x0002a2000c1e1900 */
/*0b90*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*0ba0*/ MOV R11, c[0x0][0x170] ; /* 0x00005c00000b7a02 */
/* 0x000fe40000000f00 */
/*0bb0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0bc0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */
/* 0x001fe200078e0208 */
/*0bd0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0be0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bf0*/ FFMA R24, R5, R2, R24 ; /* 0x0000000205187223 */
/* 0x004fc80000000018 */
/*0c00*/ @P0 BRA 0xb70 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c10*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fe20000000f00 */
/*0c20*/ IMAD R3, R0, c[0x0][0x180], R3 ; /* 0x0000600000037a24 */
/* 0x000fc800078e0203 */
/*0c30*/ IMAD.WIDE R2, R3, R2, c[0x0][0x188] ; /* 0x0000620003027625 */
/* 0x000fca00078e0202 */
/*0c40*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */
/* 0x000fe2000c101904 */
/*0c50*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c60*/ BRA 0xc60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} | .file "tmpxft_000c4f4b_00000000-6_gpucppMatMulNaive.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16cpu_matrix_multiPfS_S_iii
.type _Z16cpu_matrix_multiPfS_S_iii, @function
_Z16cpu_matrix_multiPfS_S_iii:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rsi, -16(%rsp)
movq %rdx, -8(%rsp)
movl %ecx, -20(%rsp)
testl %ecx, %ecx
jle .L3
movq %rdi, %rbx
movl %r8d, %r10d
movl %r9d, %r14d
movslq %r9d, %rbp
leaq 0(,%rbp,4), %rsi
movl $0, %r13d
movl $0, %r12d
movl $0, %edx
movslq %r8d, %r15
movq %r15, %rcx
jmp .L5
.L6:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %rdi, %rax
jne .L6
.L8:
movss %xmm1, (%r11,%r8,4)
addq $1, %r8
addq $4, %r9
cmpq %r8, %rbp
je .L13
.L9:
movq %r9, %rdx
movq %r15, %rax
pxor %xmm1, %xmm1
testl %r10d, %r10d
jg .L6
jmp .L8
.L13:
movl -24(%rsp), %edx
.L7:
addl $1, %edx
addl %r14d, %r12d
addl %r10d, %r13d
cmpl %edx, -20(%rsp)
je .L3
.L5:
testl %r14d, %r14d
jle .L7
movq -16(%rsp), %r9
movslq %r13d, %rax
leaq (%rbx,%rax,4), %r15
addq %rcx, %rax
leaq (%rbx,%rax,4), %rdi
movslq %r12d, %rax
movq -8(%rsp), %r11
leaq (%r11,%rax,4), %r11
movl $0, %r8d
movl %edx, -24(%rsp)
jmp .L9
.L3:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z16cpu_matrix_multiPfS_S_iii, .-_Z16cpu_matrix_multiPfS_S_iii
.globl _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
.type _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_, @function
_Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_:
.LFB3696:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movq %rdi, 64(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 80(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z18MatMulKernel_naive6MatrixS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_, .-_Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
.globl _Z18MatMulKernel_naive6MatrixS_S_
.type _Z18MatMulKernel_naive6MatrixS_S_, @function
_Z18MatMulKernel_naive6MatrixS_S_:
.LFB3697:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %rdi, 32(%rsp)
movq %rsi, 40(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq %rsp, %rdx
leaq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z18MatMulKernel_naive6MatrixS_S_, .-_Z18MatMulKernel_naive6MatrixS_S_
.globl _Z6MatMul6MatrixS_S_
.type _Z6MatMul6MatrixS_S_, @function
_Z6MatMul6MatrixS_S_:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %rdi, %rbp
movq %rsi, %r14
movq %rdx, %r12
movq %rcx, (%rsp)
movq %r8, %rbx
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movq %rdi, %r13
sarq $32, %r13
movq %r8, %r15
sarq $32, %r15
movl %edi, 48(%rsp)
movl %r13d, 52(%rsp)
imull %r13d, %ebp
movslq %ebp, %rbp
salq $2, %rbp
leaq 56(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 64(%rsp)
movl %r12d, 68(%rsp)
movq %r12, %rbp
sarq $32, %rbp
imull %r12d, %ebp
movslq %ebp, %rbp
salq $2, %rbp
leaq 72(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq (%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, 80(%rsp)
movl %r15d, 84(%rsp)
imull %r15d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
shrl $4, %r12d
movl %r12d, 36(%rsp)
shrl $4, %r13d
movl %r13d, 40(%rsp)
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
movl $2, %ecx
movq %rbx, %rdx
movq 88(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
call cudaThreadSynchronize@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movdqa 48(%rsp), %xmm0
movaps %xmm0, 96(%rsp)
movdqa 64(%rsp), %xmm1
movaps %xmm1, 112(%rsp)
movdqa 80(%rsp), %xmm2
movaps %xmm2, 128(%rsp)
leaq 128(%rsp), %rdx
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
call _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
jmp .L25
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z6MatMul6MatrixS_S_, .-_Z6MatMul6MatrixS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "Matrix multiplication by GPU is right! "
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Results are not right! "
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $1103, %edi
call srand@PLT
leaq 4096(%rbp), %r12
leaq 4198400(%rbp), %r13
.L31:
leaq -4096(%r12), %rbx
.L32:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L32
addq $4096, %r12
cmpq %r13, %r12
jne .L31
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
leaq 4096(%rax), %r13
leaq 4198400(%rax), %r14
.L34:
leaq -4096(%r13), %r12
.L35:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, (%r12)
addq $4, %r12
cmpq %r13, %r12
jne .L35
addq $4096, %r13
cmpq %r14, %r13
jne .L34
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movabsq $4398046511104, %r8
movq %r8, %rdi
orq $1024, %rdi
movq %rdi, %rdx
movq %rdi, %r8
movq %rax, %r9
movq %rbx, %rcx
movq %rbp, %rsi
call _Z6MatMul6MatrixS_S_
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r13
movl $1024, %r9d
movl $1024, %r8d
movl $1024, %ecx
movq %rax, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z16cpu_matrix_multiPfS_S_iii
leaq 4096(%r12), %rcx
leaq 4198400(%r12), %rdi
movl $0, %edx
movss .LC2(%rip), %xmm1
movl $1, %esi
.L37:
leaq -4096(%rcx), %rax
.L39:
movss (%rax), %xmm0
subss %xmm0, %xmm0
andps %xmm1, %xmm0
ucomiss .LC3(%rip), %xmm0
cmova %esi, %edx
addq $4, %rax
cmpq %rax, %rcx
jne .L39
addq $4096, %rcx
cmpq %rcx, %rdi
jne .L37
testb %dl, %dl
jne .L41
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L42:
movq %rbp, %rdi
call _ZdaPv@PLT
movq %rbx, %rdi
call _ZdaPv@PLT
movq %r12, %rdi
call _ZdaPv@PLT
movq %r13, %rdi
call _ZdaPv@PLT
movl $0, %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L42
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC6:
.string "_Z18MatMulKernel_naive6MatrixS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z18MatMulKernel_naive6MatrixS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC2:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst4
.align 4
.LC3:
.long 897988541
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18MatMulKernel_naive6MatrixS_S_
.globl _Z18MatMulKernel_naive6MatrixS_S_
.p2align 8
.type _Z18MatMulKernel_naive6MatrixS_S_,@function
_Z18MatMulKernel_naive6MatrixS_S_:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x3c
s_load_b32 s6, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x28
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s4, v[3:4]
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_3
s_load_b64 s[8:9], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2)
v_mul_lo_u32 v2, v0, s6
s_clause 0x1
s_load_b32 s7, s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x18
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v4, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v5, 31, v4
s_add_i32 s6, s6, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s6, 0
v_lshlrev_b64 v[7:8], 2, v[4:5]
v_add_nc_u32_e32 v4, s7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
global_load_b32 v5, v[2:3], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v5, v7
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v6, 0
.LBB0_4:
s_load_b32 s0, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v6, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18MatMulKernel_naive6MatrixS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18MatMulKernel_naive6MatrixS_S_, .Lfunc_end0-_Z18MatMulKernel_naive6MatrixS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 16
.value_kind: by_value
- .offset: 16
.size: 16
.value_kind: by_value
- .offset: 32
.size: 16
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18MatMulKernel_naive6MatrixS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18MatMulKernel_naive6MatrixS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.width + col);
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_naive(const Matrix A, const Matrix B, Matrix C) {
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; e++)
Cvalue += A.elements[row*A.width+e] * B.elements[e*B.width+col];
C.elements[row*C.width+col] = Cvalue;
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_naive<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
} | .text
.file "gpucppMatMulNaive.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z16cpu_matrix_multiPfS_S_iii # -- Begin function _Z16cpu_matrix_multiPfS_S_iii
.p2align 4, 0x90
.type _Z16cpu_matrix_multiPfS_S_iii,@function
_Z16cpu_matrix_multiPfS_S_iii: # @_Z16cpu_matrix_multiPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, -8(%rsp) # 8-byte Spill
movq %rsi, -16(%rsp) # 8-byte Spill
movq %rdi, -24(%rsp) # 8-byte Spill
testl %ecx, %ecx
jle .LBB0_9
# %bb.1: # %.preheader27.lr.ph
movslq %r9d, %rdi
movl %ecx, %ecx
movl %edi, %r10d
movl %r8d, %r11d
leaq (,%rdi,4), %rbx
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # %._crit_edge31
# in Loop: Header=BB0_2 Depth=1
incq %r15
addl %r8d, %r14d
cmpq %rcx, %r15
je .LBB0_9
.LBB0_2: # %.preheader27
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
testl %r9d, %r9d
jle .LBB0_8
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r14d, %eax
movq -24(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r12
movq %r15, %rax
imulq %rdi, %rax
movq -8(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r13
movq -16(%rsp), %rsi # 8-byte Reload
xorl %eax, %eax
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_7: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
movss %xmm0, (%r13,%rax,4)
incq %rax
addq $4, %rsi
cmpq %r10, %rax
je .LBB0_8
.LBB0_4: # %.preheader
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
xorps %xmm0, %xmm0
testl %r8d, %r8d
jle .LBB0_7
# %bb.5: # %.lr.ph.preheader
# in Loop: Header=BB0_4 Depth=2
movq %rsi, %rbp
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_6: # %.lr.ph
# Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r12,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rbp), %xmm1
addss %xmm1, %xmm0
incq %rdx
addq %rbx, %rbp
cmpq %rdx, %r11
jne .LBB0_6
jmp .LBB0_7
.LBB0_9: # %._crit_edge33
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z16cpu_matrix_multiPfS_S_iii, .Lfunc_end0-_Z16cpu_matrix_multiPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z33__device_stub__MatMulKernel_naive6MatrixS_S_ # -- Begin function _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.p2align 4, 0x90
.type _Z33__device_stub__MatMulKernel_naive6MatrixS_S_,@function
_Z33__device_stub__MatMulKernel_naive6MatrixS_S_: # @_Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18MatMulKernel_naive6MatrixS_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z33__device_stub__MatMulKernel_naive6MatrixS_S_, .Lfunc_end1-_Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.cfi_endproc
# -- End function
.globl _Z6MatMul6MatrixS_S_ # -- Begin function _Z6MatMul6MatrixS_S_
.p2align 4, 0x90
.type _Z6MatMul6MatrixS_S_,@function
_Z6MatMul6MatrixS_S_: # @_Z6MatMul6MatrixS_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 72(%rsp) # 8-byte Spill
movq %r8, %r13
movq %rcx, 64(%rsp) # 8-byte Spill
movq %rdx, %rbp
movq %rsi, 56(%rsp) # 8-byte Spill
movq %rdi, %r12
movq %rdi, %rax
shrq $32, %rax
movq %rdx, %r15
shrq $32, %r15
movq %r8, %r14
shrq $32, %r14
movq %rdi, 40(%rsp)
imull %r12d, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 48(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, 8(%rsp)
movl %ebp, 12(%rsp)
imull %ebp, %r15d
movslq %r15d, %rbx
shlq $2, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 64(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r13, 24(%rsp)
imull %r13d, %r14d
movslq %r14d, %r14
shlq $2, %r14
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
shrl $4, %ebp
shrq $4, %r12
movabsq $1152921500311879680, %rdi # imm = 0xFFFFFFF00000000
andq %r12, %rdi
orq %rbp, %rdi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movups 40(%rsp), %xmm0
movups 8(%rsp), %xmm1
movups 24(%rsp), %xmm2
movups %xmm0, 184(%rsp)
movups %xmm1, 168(%rsp)
movups %xmm2, 152(%rsp)
leaq 184(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 152(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z18MatMulKernel_naive6MatrixS_S_, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
movq 32(%rsp), %rsi
movq 72(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movq 48(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z6MatMul6MatrixS_S_, .Lfunc_end2-_Z6MatMul6MatrixS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI3_2:
.long 0x358637bd # float 9.99999997E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $1103, %edi # imm = 0x44F
callq srand
xorl %r14d, %r14d
movq %rbx, %r15
.p2align 4, 0x90
.LBB3_1: # %.preheader95
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $4096, %r15 # imm = 0x1000
cmpq $1024, %r14 # imm = 0x400
jne .LBB3_1
# %bb.4:
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
xorl %r15d, %r15d
movq %rax, %r12
.p2align 4, 0x90
.LBB3_5: # %.preheader94
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r12,%r13,4)
incq %r13
cmpq $1024, %r13 # imm = 0x400
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r15
addq $4096, %r12 # imm = 0x1000
cmpq $1024, %r15 # imm = 0x400
jne .LBB3_5
# %bb.8: # %.preheader.preheader
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
movabsq $4398046512128, %rdi # imm = 0x40000000400
movq %rbx, %rsi
movq %rdi, %rdx
movq %r14, %rcx
movq %rdi, %r8
movq %rax, %r9
callq _Z6MatMul6MatrixS_S_
xorl %eax, %eax
movaps .LCPI3_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movss .LCPI3_2(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
movl $1, %ecx
movq %r15, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB3_9: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%rdi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss %xmm2, %xmm2
andps %xmm0, %xmm2
ucomiss %xmm1, %xmm2
movzbl %sil, %esi
cmoval %ecx, %esi
incq %rdi
cmpq $1024, %rdi # imm = 0x400
jne .LBB3_10
# %bb.11: # %_Z16cpu_matrix_multiPfS_S_iii.exit
# in Loop: Header=BB3_9 Depth=1
incq %rax
addq $4096, %rdx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB3_9
# %bb.12:
movl $_ZSt4cout, %edi
testb $1, %sil
jne .LBB3_16
# %bb.13:
movl $.L.str, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB3_21
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
je .LBB3_19
.LBB3_15:
movzbl 67(%r12), %eax
jmp .LBB3_20
.LBB3_16:
movl $.L.str.1, %esi
movl $23, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB3_21
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i90
cmpb $0, 56(%r12)
jne .LBB3_15
.LBB3_19:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rbx, %rdi
callq _ZdaPv
movq %r14, %rdi
callq _ZdaPv
movq %r15, %rdi
callq _ZdaPv
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_21:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18MatMulKernel_naive6MatrixS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18MatMulKernel_naive6MatrixS_S_,@object # @_Z18MatMulKernel_naive6MatrixS_S_
.section .rodata,"a",@progbits
.globl _Z18MatMulKernel_naive6MatrixS_S_
.p2align 3, 0x0
_Z18MatMulKernel_naive6MatrixS_S_:
.quad _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.size _Z18MatMulKernel_naive6MatrixS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Matrix multiplication by GPU is right! "
.size .L.str, 40
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Results are not right! "
.size .L.str.1, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z18MatMulKernel_naive6MatrixS_S_"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18MatMulKernel_naive6MatrixS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z18MatMulKernel_naive6MatrixS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ MOV R4, c[0x0][0x160] ; /* 0x0000580000047a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.Y ; /* 0x0000000000037919 */
/* 0x000e220000002200 */
/*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e680000002500 */
/*0080*/ S2R R5, SR_TID.X ; /* 0x0000000000057919 */
/* 0x000e620000002100 */
/*0090*/ IMAD R0, R0, c[0x0][0x4], R3 ; /* 0x0000010000007a24 */
/* 0x001fe400078e0203 */
/*00a0*/ IMAD R3, R2, c[0x0][0x0], R5 ; /* 0x0000000002037a24 */
/* 0x002fc600078e0205 */
/*00b0*/ @!P0 BRA 0xc10 ; /* 0x00000b5000008947 */
/* 0x000fea0003800000 */
/*00c0*/ IADD3 R2, R4.reuse, -0x1, RZ ; /* 0xffffffff04027810 */
/* 0x040fe40007ffe0ff */
/*00d0*/ LOP3.LUT R4, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304047812 */
/* 0x000fe400078ec0ff */
/*00e0*/ ISETP.GE.U32.AND P0, PT, R2, 0x3, PT ; /* 0x000000030200780c */
/* 0x000fe40003f06070 */
/*00f0*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fe40000000f00 */
/*0100*/ MOV R24, RZ ; /* 0x000000ff00187202 */
/* 0x000fd20000000f00 */
/*0110*/ @!P0 BRA 0xb00 ; /* 0x000009e000008947 */
/* 0x000fea0003800000 */
/*0120*/ IADD3 R5, -R4, c[0x0][0x160], RZ ; /* 0x0000580004057a10 */
/* 0x000fe20007ffe1ff */
/*0130*/ HFMA2.MMA R8, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff087435 */
/* 0x000fe200000001ff */
/*0140*/ ULDC.64 UR6, c[0x0][0x168] ; /* 0x00005a0000067ab9 */
/* 0x000fe20000000a00 */
/*0150*/ HFMA2.MMA R24, -RZ, RZ, 0, 0 ; /* 0x00000000ff187435 */
/* 0x000fe200000001ff */
/*0160*/ ISETP.GT.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f04270 */
/*0170*/ IMAD R6, R0, c[0x0][0x160], RZ ; /* 0x0000580000067a24 */
/* 0x000fe200078e02ff */
/*0180*/ MOV R2, RZ ; /* 0x000000ff00027202 */
/* 0x000fca0000000f00 */
/*0190*/ IMAD.WIDE R8, R3, R8, c[0x0][0x178] ; /* 0x00005e0003087625 */
/* 0x000fcc00078e0208 */
/*01a0*/ @!P0 BRA 0x960 ; /* 0x000007b000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fe40003f24270 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01d0*/ @!P1 BRA 0x690 ; /* 0x000004b000009947 */
/* 0x000fea0003800000 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01f0*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0200*/ LDG.E R21, [R8.64] ; /* 0x0000000408157981 */
/* 0x0000a2000c1e1900 */
/*0210*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fca0008000f00 */
/*0220*/ IMAD.WIDE R12, R6, 0x4, R12 ; /* 0x00000004060c7825 */
/* 0x000fca00078e020c */
/*0230*/ LDG.E R20, [R12.64] ; /* 0x000000040c147981 */
/* 0x000ea2000c1e1900 */
/*0240*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fc60000000f00 */
/*0250*/ LDG.E R14, [R12.64+0x4] ; /* 0x000004040c0e7981 */
/* 0x000ee4000c1e1900 */
/*0260*/ IMAD.WIDE R10, R7.reuse, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x040fe400078e0208 */
/*0270*/ LDG.E R27, [R12.64+0x8] ; /* 0x000008040c1b7981 */
/* 0x000f28000c1e1900 */
/*0280*/ LDG.E R15, [R10.64] ; /* 0x000000040a0f7981 */
/* 0x0002e2000c1e1900 */
/*0290*/ IMAD.WIDE R22, R7, 0x4, R10 ; /* 0x0000000407167825 */
/* 0x000fc600078e020a */
/*02a0*/ LDG.E R18, [R12.64+0xc] ; /* 0x00000c040c127981 */
/* 0x000f66000c1e1900 */
/*02b0*/ IMAD.WIDE R28, R7.reuse, 0x4, R22 ; /* 0x00000004071c7825 */
/* 0x040fe200078e0216 */
/*02c0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000328000c1e1900 */
/*02d0*/ LDG.E R19, [R28.64] ; /* 0x000000041c137981 */
/* 0x000362000c1e1900 */
/*02e0*/ IMAD.WIDE R16, R7, 0x4, R28 ; /* 0x0000000407107825 */
/* 0x000fc600078e021c */
/*02f0*/ LDG.E R8, [R12.64+0x10] ; /* 0x000010040c087981 */
/* 0x001f68000c1e1900 */
/*0300*/ LDG.E R9, [R16.64] ; /* 0x0000000410097981 */
/* 0x000168000c1e1900 */
/*0310*/ LDG.E R10, [R12.64+0x14] ; /* 0x000014040c0a7981 */
/* 0x002f68000c1e1900 */
/*0320*/ LDG.E R28, [R12.64+0x1c] ; /* 0x00001c040c1c7981 */
/* 0x000f62000c1e1900 */
/*0330*/ IMAD.WIDE R16, R7, 0x4, R16 ; /* 0x0000000407107825 */
/* 0x001fca00078e0210 */
/*0340*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x000562000c1e1900 */
/*0350*/ IMAD.WIDE R22, R7, 0x4, R16 ; /* 0x0000000407167825 */
/* 0x000fc800078e0210 */
/*0360*/ FFMA R16, R21, R20, R24 ; /* 0x0000001415107223 */
/* 0x004fe40000000018 */
/*0370*/ LDG.E R20, [R12.64+0x18] ; /* 0x000018040c147981 */
/* 0x000ea2000c1e1900 */
/*0380*/ IMAD.WIDE R24, R7, 0x4, R22 ; /* 0x0000000407187825 */
/* 0x000fc600078e0216 */
/*0390*/ LDG.E R21, [R22.64] ; /* 0x0000000416157981 */
/* 0x0000a8000c1e1900 */
/*03a0*/ LDG.E R29, [R24.64] ; /* 0x00000004181d7981 */
/* 0x0002a2000c1e1900 */
/*03b0*/ FFMA R16, R15, R14, R16 ; /* 0x0000000e0f107223 */
/* 0x008fe40000000010 */
/*03c0*/ IMAD.WIDE R14, R7.reuse, 0x4, R24 ; /* 0x00000004070e7825 */
/* 0x040fe200078e0218 */
/*03d0*/ LDG.E R23, [R12.64+0x20] ; /* 0x000020040c177981 */
/* 0x001ee6000c1e1900 */
/*03e0*/ FFMA R26, R26, R27, R16 ; /* 0x0000001b1a1a7223 */
/* 0x010fe20000000010 */
/*03f0*/ LDG.E R25, [R12.64+0x24] ; /* 0x000024040c197981 */
/* 0x002f22000c1e1900 */
/*0400*/ IMAD.WIDE R16, R7, 0x4, R14 ; /* 0x0000000407107825 */
/* 0x000fc600078e020e */
/*0410*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0000e2000c1e1900 */
/*0420*/ FFMA R26, R19, R18, R26 ; /* 0x00000012131a7223 */
/* 0x020fe4000000001a */
/*0430*/ IMAD.WIDE R18, R7, 0x4, R16 ; /* 0x0000000407127825 */
/* 0x000fe200078e0210 */
/*0440*/ LDG.E R22, [R12.64+0x28] ; /* 0x000028040c167981 */
/* 0x000f66000c1e1900 */
/*0450*/ FFMA R26, R9, R8, R26 ; /* 0x00000008091a7223 */
/* 0x000fe2000000001a */
/*0460*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x000322000c1e1900 */
/*0470*/ IMAD.WIDE R8, R7, 0x4, R18 ; /* 0x0000000407087825 */
/* 0x000fc600078e0212 */
/*0480*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000368000c1e1900 */
/*0490*/ LDG.E R24, [R8.64] ; /* 0x0000000408187981 */
/* 0x000568000c1e1900 */
/*04a0*/ LDG.E R15, [R12.64+0x2c] ; /* 0x00002c040c0f7981 */
/* 0x001f62000c1e1900 */
/*04b0*/ FFMA R26, R11, R10, R26 ; /* 0x0000000a0b1a7223 */
/* 0x000fe4000000001a */
/*04c0*/ IMAD.WIDE R10, R7, 0x4, R8 ; /* 0x00000004070a7825 */
/* 0x000fe200078e0208 */
/*04d0*/ LDG.E R17, [R12.64+0x30] ; /* 0x000030040c117981 */
/* 0x002f66000c1e1900 */
/*04e0*/ FFMA R26, R21, R20, R26 ; /* 0x00000014151a7223 */
/* 0x004fc4000000001a */
/*04f0*/ IMAD.WIDE R20, R7, 0x4, R10 ; /* 0x0000000407147825 */
/* 0x000fe400078e020a */
/*0500*/ LDG.E R10, [R10.64] ; /* 0x000000040a0a7981 */
/* 0x0000a4000c1e1900 */
/*0510*/ FFMA R28, R29, R28, R26 ; /* 0x0000001c1d1c7223 */
/* 0x000fe4000000001a */
/*0520*/ IMAD.WIDE R26, R7.reuse, 0x4, R20 ; /* 0x00000004071a7825 */
/* 0x040fe200078e0214 */
/*0530*/ LDG.E R29, [R12.64+0x34] ; /* 0x000034040c1d7981 */
/* 0x000ea8000c1e1900 */
/*0540*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x0002a2000c1e1900 */
/*0550*/ IMAD.WIDE R8, R7, 0x4, R26 ; /* 0x0000000407087825 */
/* 0x000fc600078e021a */
/*0560*/ LDG.E R19, [R26.64] ; /* 0x000000041a137981 */
/* 0x0006a8000c1e1900 */
/*0570*/ LDG.E R11, [R8.64] ; /* 0x00000004080b7981 */
/* 0x0010a8000c1e1900 */
/*0580*/ LDG.E R21, [R12.64+0x38] ; /* 0x000038040c157981 */
/* 0x002ea8000c1e1900 */
/*0590*/ LDG.E R26, [R12.64+0x3c] ; /* 0x00003c040c1a7981 */
/* 0x008ee2000c1e1900 */
/*05a0*/ FFMA R14, R14, R23, R28 ; /* 0x000000170e0e7223 */
/* 0x000fc8000000001c */
/*05b0*/ FFMA R25, R16, R25, R14 ; /* 0x0000001910197223 */
/* 0x010fe2000000000e */
/*05c0*/ IADD3 R5, R5, -0x10, RZ ; /* 0xfffffff005057810 */
/* 0x000fc60007ffe0ff */
/*05d0*/ FFMA R18, R18, R22, R25 ; /* 0x0000001612127223 */
/* 0x020fe20000000019 */
/*05e0*/ ISETP.GT.AND P1, PT, R5, 0xc, PT ; /* 0x0000000c0500780c */
/* 0x000fc60003f24270 */
/*05f0*/ FFMA R15, R24, R15, R18 ; /* 0x0000000f180f7223 */
/* 0x000fe20000000012 */
/*0600*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*0610*/ IMAD.WIDE R8, R7, 0x4, R8 ; /* 0x0000000407087825 */
/* 0x001fc600078e0208 */
/*0620*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0630*/ IADD3 R2, R2, 0x10, RZ ; /* 0x0000001002027810 */
/* 0x000fe20007ffe0ff */
/*0640*/ FFMA R10, R10, R17, R15 ; /* 0x000000110a0a7223 */
/* 0x004fc8000000000f */
/*0650*/ FFMA R10, R20, R29, R10 ; /* 0x0000001d140a7223 */
/* 0x000fc8000000000a */
/*0660*/ FFMA R10, R19, R21, R10 ; /* 0x00000015130a7223 */
/* 0x000fc8000000000a */
/*0670*/ FFMA R24, R11, R26, R10 ; /* 0x0000001a0b187223 */
/* 0x008fe2000000000a */
/*0680*/ @P1 BRA 0x1f0 ; /* 0xfffffb6000001947 */
/* 0x000fea000383ffff */
/*0690*/ ISETP.GT.AND P1, PT, R5, 0x4, PT ; /* 0x000000040500780c */
/* 0x000fda0003f24270 */
/*06a0*/ @!P1 BRA 0x940 ; /* 0x0000029000009947 */
/* 0x000fea0003800000 */
/*06b0*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fe20000000f00 */
/*06c0*/ LDG.E R23, [R8.64] ; /* 0x0000000408177981 */
/* 0x0000a2000c1e1900 */
/*06d0*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*06e0*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe20008000f00 */
/*06f0*/ IMAD.WIDE R16, R7, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x000fc800078e0208 */
/*0700*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*0710*/ IMAD.WIDE R12, R7.reuse, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x040fe200078e0210 */
/*0720*/ LDG.E R22, [R10.64] ; /* 0x000000040a167981 */
/* 0x000ea8000c1e1900 */
/*0730*/ LDG.E R16, [R16.64] ; /* 0x0000000410107981 */
/* 0x0002e2000c1e1900 */
/*0740*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0750*/ LDG.E R25, [R10.64+0x4] ; /* 0x000004040a197981 */
/* 0x000ee6000c1e1900 */
/*0760*/ IMAD.WIDE R18, R7.reuse, 0x4, R14 ; /* 0x0000000407127825 */
/* 0x040fe200078e020e */
/*0770*/ LDG.E R26, [R12.64] ; /* 0x000000040c1a7981 */
/* 0x000968000c1e1900 */
/*0780*/ LDG.E R27, [R10.64+0x8] ; /* 0x000008040a1b7981 */
/* 0x000f62000c1e1900 */
/*0790*/ IMAD.WIDE R20, R7, 0x4, R18 ; /* 0x0000000407147825 */
/* 0x000fc600078e0212 */
/*07a0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000368000c1e1900 */
/*07b0*/ LDG.E R29, [R10.64+0xc] ; /* 0x00000c040a1d7981 */
/* 0x000f62000c1e1900 */
/*07c0*/ IMAD.WIDE R8, R7, 0x4, R20 ; /* 0x0000000407087825 */
/* 0x001fc600078e0214 */
/*07d0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000168000c1e1900 */
/*07e0*/ LDG.E R28, [R10.64+0x10] ; /* 0x000010040a1c7981 */
/* 0x000f62000c1e1900 */
/*07f0*/ IMAD.WIDE R12, R7, 0x4, R8 ; /* 0x00000004070c7825 */
/* 0x010fc600078e0208 */
/*0800*/ LDG.E R20, [R20.64] ; /* 0x0000000414147981 */
/* 0x000968000c1e1900 */
/*0810*/ LDG.E R15, [R10.64+0x14] ; /* 0x000014040a0f7981 */
/* 0x002f68000c1e1900 */
/*0820*/ LDG.E R17, [R8.64] ; /* 0x0000000408117981 */
/* 0x000368000c1e1900 */
/*0830*/ LDG.E R21, [R10.64+0x1c] ; /* 0x00001c040a157981 */
/* 0x010f28000c1e1900 */
/*0840*/ LDG.E R19, [R12.64] ; /* 0x000000040c137981 */
/* 0x001f28000c1e1900 */
/*0850*/ LDG.E R8, [R10.64+0x18] ; /* 0x000018040a087981 */
/* 0x002f22000c1e1900 */
/*0860*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*0870*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fc40003f0e170 */
/*0880*/ IADD3 R2, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007ffe0ff */
/*0890*/ IADD3 R5, R5, -0x8, RZ ; /* 0xfffffff805057810 */
/* 0x000fe20007ffe0ff */
/*08a0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08b0*/ FFMA R22, R23, R22, R24 ; /* 0x0000001617167223 */
/* 0x004fc80000000018 */
/*08c0*/ FFMA R16, R16, R25, R22 ; /* 0x0000001910107223 */
/* 0x008fc80000000016 */
/*08d0*/ FFMA R16, R26, R27, R16 ; /* 0x0000001b1a107223 */
/* 0x020fc80000000010 */
/*08e0*/ FFMA R29, R14, R29, R16 ; /* 0x0000001d0e1d7223 */
/* 0x000fc80000000010 */
/*08f0*/ FFMA R18, R18, R28, R29 ; /* 0x0000001c12127223 */
/* 0x000fc8000000001d */
/*0900*/ FFMA R15, R20, R15, R18 ; /* 0x0000000f140f7223 */
/* 0x000fc80000000012 */
/*0910*/ FFMA R24, R17, R8, R15 ; /* 0x0000000811187223 */
/* 0x010fe4000000000f */
/*0920*/ IMAD.WIDE R8, R7, 0x4, R12 ; /* 0x0000000407087825 */
/* 0x000fc800078e020c */
/*0930*/ FFMA R24, R19, R21, R24 ; /* 0x0000001513187223 */
/* 0x000fe40000000018 */
/*0940*/ ISETP.NE.OR P0, PT, R5, RZ, P0 ; /* 0x000000ff0500720c */
/* 0x000fda0000705670 */
/*0950*/ @!P0 BRA 0xb00 ; /* 0x000001a000008947 */
/* 0x000fea0003800000 */
/*0960*/ MOV R10, UR6 ; /* 0x00000006000a7c02 */
/* 0x000fe40008000f00 */
/*0970*/ MOV R11, UR7 ; /* 0x00000007000b7c02 */
/* 0x000fe40008000f00 */
/*0980*/ MOV R7, c[0x0][0x170] ; /* 0x00005c0000077a02 */
/* 0x000fc60000000f00 */
/*0990*/ IMAD.WIDE R10, R6, 0x4, R10 ; /* 0x00000004060a7825 */
/* 0x000fc800078e020a */
/*09a0*/ IMAD.WIDE R16, R7.reuse, 0x4, R8 ; /* 0x0000000407107825 */
/* 0x040fe200078e0208 */
/*09b0*/ LDG.E R18, [R10.64] ; /* 0x000000040a127981 */
/* 0x000ea8000c1e1900 */
/*09c0*/ LDG.E R9, [R8.64] ; /* 0x0000000408097981 */
/* 0x000ea2000c1e1900 */
/*09d0*/ IMAD.WIDE R12, R7, 0x4, R16 ; /* 0x00000004070c7825 */
/* 0x000fc600078e0210 */
/*09e0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ee8000c1e1900 */
/*09f0*/ LDG.E R19, [R10.64+0x4] ; /* 0x000004040a137981 */
/* 0x000ee2000c1e1900 */
/*0a00*/ IMAD.WIDE R14, R7, 0x4, R12 ; /* 0x00000004070e7825 */
/* 0x000fc600078e020c */
/*0a10*/ LDG.E R21, [R12.64] ; /* 0x000000040c157981 */
/* 0x000f28000c1e1900 */
/*0a20*/ LDG.E R20, [R10.64+0x8] ; /* 0x000008040a147981 */
/* 0x000f28000c1e1900 */
/*0a30*/ LDG.E R22, [R10.64+0xc] ; /* 0x00000c040a167981 */
/* 0x000f68000c1e1900 */
/*0a40*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x000f62000c1e1900 */
/*0a50*/ IADD3 R5, R5, -0x4, RZ ; /* 0xfffffffc05057810 */
/* 0x000fc80007ffe0ff */
/*0a60*/ ISETP.NE.AND P0, PT, R5, RZ, PT ; /* 0x000000ff0500720c */
/* 0x000fe20003f05270 */
/*0a70*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a80*/ IADD3 R2, R2, 0x4, RZ ; /* 0x0000000402027810 */
/* 0x000fc60007ffe0ff */
/*0a90*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0aa0*/ FFMA R18, R9, R18, R24 ; /* 0x0000001209127223 */
/* 0x004fc80000000018 */
/*0ab0*/ FFMA R18, R17, R19, R18 ; /* 0x0000001311127223 */
/* 0x008fe40000000012 */
/*0ac0*/ IMAD.WIDE R8, R7, 0x4, R14 ; /* 0x0000000407087825 */
/* 0x000fc800078e020e */
/*0ad0*/ FFMA R18, R21, R20, R18 ; /* 0x0000001415127223 */
/* 0x010fc80000000012 */
/*0ae0*/ FFMA R24, R23, R22, R18 ; /* 0x0000001617187223 */
/* 0x020fe20000000012 */
/*0af0*/ @P0 BRA 0x960 ; /* 0xfffffe6000000947 */
/* 0x000fea000383ffff */
/*0b00*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fda0003f05270 */
/*0b10*/ @!P0 BRA 0xc10 ; /* 0x000000f000008947 */
/* 0x000fea0003800000 */
/*0b20*/ HFMA2.MMA R9, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff097435 */
/* 0x000fe200000001ff */
/*0b30*/ IMAD R6, R0, c[0x0][0x160], R2 ; /* 0x0000580000067a24 */
/* 0x000fe400078e0202 */
/*0b40*/ IMAD R2, R2, c[0x0][0x170], R3 ; /* 0x00005c0002027a24 */
/* 0x000fce00078e0203 */
/*0b50*/ IMAD.WIDE R6, R6, R9, c[0x0][0x168] ; /* 0x00005a0006067625 */
/* 0x000fc800078e0209 */
/*0b60*/ IMAD.WIDE R8, R2, R9, c[0x0][0x178] ; /* 0x00005e0002087625 */
/* 0x000fca00078e0209 */
/*0b70*/ LDG.E R5, [R8.64] ; /* 0x0000000408057981 */
/* 0x0000a8000c1e1900 */
/*0b80*/ LDG.E R2, [R6.64] ; /* 0x0000000406027981 */
/* 0x0002a2000c1e1900 */
/*0b90*/ IADD3 R4, R4, -0x1, RZ ; /* 0xffffffff04047810 */
/* 0x000fe40007ffe0ff */
/*0ba0*/ MOV R11, c[0x0][0x170] ; /* 0x00005c00000b7a02 */
/* 0x000fe40000000f00 */
/*0bb0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc60003f05270 */
/*0bc0*/ IMAD.WIDE R8, R11, 0x4, R8 ; /* 0x000000040b087825 */
/* 0x001fe200078e0208 */
/*0bd0*/ IADD3 R6, P1, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x002fc80007f3e0ff */
/*0be0*/ IADD3.X R7, RZ, R7, RZ, P1, !PT ; /* 0x00000007ff077210 */
/* 0x000fe20000ffe4ff */
/*0bf0*/ FFMA R24, R5, R2, R24 ; /* 0x0000000205187223 */
/* 0x004fc80000000018 */
/*0c00*/ @P0 BRA 0xb70 ; /* 0xffffff6000000947 */
/* 0x000fea000383ffff */
/*0c10*/ MOV R2, 0x4 ; /* 0x0000000400027802 */
/* 0x000fe20000000f00 */
/*0c20*/ IMAD R3, R0, c[0x0][0x180], R3 ; /* 0x0000600000037a24 */
/* 0x000fc800078e0203 */
/*0c30*/ IMAD.WIDE R2, R3, R2, c[0x0][0x188] ; /* 0x0000620003027625 */
/* 0x000fca00078e0202 */
/*0c40*/ STG.E [R2.64], R24 ; /* 0x0000001802007986 */
/* 0x000fe2000c101904 */
/*0c50*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c60*/ BRA 0xc60; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c70*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z18MatMulKernel_naive6MatrixS_S_
.globl _Z18MatMulKernel_naive6MatrixS_S_
.p2align 8
.type _Z18MatMulKernel_naive6MatrixS_S_,@function
_Z18MatMulKernel_naive6MatrixS_S_:
s_clause 0x2
s_load_b32 s4, s[0:1], 0x3c
s_load_b32 s6, s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x28
v_bfe_u32 v2, v0, 10, 10
v_and_b32_e32 v3, 0x3ff, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s5, s4, 16
s_and_b32 s4, s4, 0xffff
s_delay_alu instid0(VALU_DEP_1)
v_mad_u64_u32 v[0:1], null, s15, s5, v[2:3]
v_mad_u64_u32 v[1:2], null, s14, s4, v[3:4]
s_cmp_lt_i32 s6, 1
s_cbranch_scc1 .LBB0_3
s_load_b64 s[8:9], s[0:1], 0x8
s_delay_alu instid0(VALU_DEP_2)
v_mul_lo_u32 v2, v0, s6
s_clause 0x1
s_load_b32 s7, s[0:1], 0x10
s_load_b64 s[4:5], s[0:1], 0x18
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v4, v1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v2
v_lshlrev_b64 v[2:3], 2, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v2, vcc_lo, s8, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s9, v3, vcc_lo
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v5, 31, v4
s_add_i32 s6, s6, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s6, 0
v_lshlrev_b64 v[7:8], 2, v[4:5]
v_add_nc_u32_e32 v4, s7, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
global_load_b32 v5, v[2:3], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v2, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v5, v7
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v6, 0
.LBB0_4:
s_load_b32 s0, s[0:1], 0x20
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[2:3], null, v0, s0, v[1:2]
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[2:3]
v_add_co_u32 v0, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_store_b32 v[0:1], v6, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z18MatMulKernel_naive6MatrixS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z18MatMulKernel_naive6MatrixS_S_, .Lfunc_end0-_Z18MatMulKernel_naive6MatrixS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 16
.value_kind: by_value
- .offset: 16
.size: 16
.value_kind: by_value
- .offset: 32
.size: 16
.value_kind: by_value
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z18MatMulKernel_naive6MatrixS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z18MatMulKernel_naive6MatrixS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000c4f4b_00000000-6_gpucppMatMulNaive.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3674:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3674:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z16cpu_matrix_multiPfS_S_iii
.type _Z16cpu_matrix_multiPfS_S_iii, @function
_Z16cpu_matrix_multiPfS_S_iii:
.LFB3669:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
movq %rsi, -16(%rsp)
movq %rdx, -8(%rsp)
movl %ecx, -20(%rsp)
testl %ecx, %ecx
jle .L3
movq %rdi, %rbx
movl %r8d, %r10d
movl %r9d, %r14d
movslq %r9d, %rbp
leaq 0(,%rbp,4), %rsi
movl $0, %r13d
movl $0, %r12d
movl $0, %edx
movslq %r8d, %r15
movq %r15, %rcx
jmp .L5
.L6:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rsi, %rdx
cmpq %rdi, %rax
jne .L6
.L8:
movss %xmm1, (%r11,%r8,4)
addq $1, %r8
addq $4, %r9
cmpq %r8, %rbp
je .L13
.L9:
movq %r9, %rdx
movq %r15, %rax
pxor %xmm1, %xmm1
testl %r10d, %r10d
jg .L6
jmp .L8
.L13:
movl -24(%rsp), %edx
.L7:
addl $1, %edx
addl %r14d, %r12d
addl %r10d, %r13d
cmpl %edx, -20(%rsp)
je .L3
.L5:
testl %r14d, %r14d
jle .L7
movq -16(%rsp), %r9
movslq %r13d, %rax
leaq (%rbx,%rax,4), %r15
addq %rcx, %rax
leaq (%rbx,%rax,4), %rdi
movslq %r12d, %rax
movq -8(%rsp), %r11
leaq (%r11,%rax,4), %r11
movl $0, %r8d
movl %edx, -24(%rsp)
jmp .L9
.L3:
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3669:
.size _Z16cpu_matrix_multiPfS_S_iii, .-_Z16cpu_matrix_multiPfS_S_iii
.globl _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
.type _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_, @function
_Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_:
.LFB3696:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movq %rdi, 64(%rsp)
movq %rsi, 72(%rsp)
movq %rdx, 80(%rsp)
movl $1, 16(%rsp)
movl $1, 20(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
leaq 8(%rsp), %rcx
movq %rsp, %rdx
leaq 28(%rsp), %rsi
leaq 16(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L20
.L16:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L21
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L20:
.cfi_restore_state
pushq 8(%rsp)
.cfi_def_cfa_offset 120
pushq 8(%rsp)
.cfi_def_cfa_offset 128
leaq 80(%rsp), %r9
movq 44(%rsp), %rcx
movl 52(%rsp), %r8d
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
leaq _Z18MatMulKernel_naive6MatrixS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L16
.L21:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3696:
.size _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_, .-_Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
.globl _Z18MatMulKernel_naive6MatrixS_S_
.type _Z18MatMulKernel_naive6MatrixS_S_, @function
_Z18MatMulKernel_naive6MatrixS_S_:
.LFB3697:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %rdi, 32(%rsp)
movq %rsi, 40(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 24(%rsp)
movq %r8, (%rsp)
movq %r9, 8(%rsp)
movq %rsp, %rdx
leaq 16(%rsp), %rsi
leaq 32(%rsp), %rdi
call _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
addq $56, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3697:
.size _Z18MatMulKernel_naive6MatrixS_S_, .-_Z18MatMulKernel_naive6MatrixS_S_
.globl _Z6MatMul6MatrixS_S_
.type _Z6MatMul6MatrixS_S_, @function
_Z6MatMul6MatrixS_S_:
.LFB3670:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %rdi, %rbp
movq %rsi, %r14
movq %rdx, %r12
movq %rcx, (%rsp)
movq %r8, %rbx
movq %r9, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
movq %rdi, %r13
sarq $32, %r13
movq %r8, %r15
sarq $32, %r15
movl %edi, 48(%rsp)
movl %r13d, 52(%rsp)
imull %r13d, %ebp
movslq %ebp, %rbp
salq $2, %rbp
leaq 56(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq %r14, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl %r12d, 64(%rsp)
movl %r12d, 68(%rsp)
movq %r12, %rbp
sarq $32, %rbp
imull %r12d, %ebp
movslq %ebp, %rbp
salq $2, %rbp
leaq 72(%rsp), %rdi
movq %rbp, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %rbp, %rdx
movq (%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebx, 80(%rsp)
movl %r15d, 84(%rsp)
imull %r15d, %ebx
movslq %ebx, %rbx
salq $2, %rbx
leaq 88(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
shrl $4, %r12d
movl %r12d, 36(%rsp)
shrl $4, %r13d
movl %r13d, 40(%rsp)
movl $16, 24(%rsp)
movl $16, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 24(%rsp), %rdx
movl $1, %ecx
movq 36(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L28
.L25:
movl $2, %ecx
movq %rbx, %rdx
movq 88(%rsp), %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
call cudaThreadSynchronize@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L29
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore_state
movdqa 48(%rsp), %xmm0
movaps %xmm0, 96(%rsp)
movdqa 64(%rsp), %xmm1
movaps %xmm1, 112(%rsp)
movdqa 80(%rsp), %xmm2
movaps %xmm2, 128(%rsp)
leaq 128(%rsp), %rdx
leaq 112(%rsp), %rsi
leaq 96(%rsp), %rdi
call _Z47__device_stub__Z18MatMulKernel_naive6MatrixS_S_RK6MatrixS1_RS_
jmp .L25
.L29:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3670:
.size _Z6MatMul6MatrixS_S_, .-_Z6MatMul6MatrixS_S_
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC4:
.string "Matrix multiplication by GPU is right! "
.section .rodata.str1.1,"aMS",@progbits,1
.LC5:
.string "Results are not right! "
.text
.globl main
.type main, @function
main:
.LFB3671:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $1103, %edi
call srand@PLT
leaq 4096(%rbp), %r12
leaq 4198400(%rbp), %r13
.L31:
leaq -4096(%r12), %rbx
.L32:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %r12, %rbx
jne .L32
addq $4096, %r12
cmpq %r13, %r12
jne .L31
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
leaq 4096(%rax), %r13
leaq 4198400(%rax), %r14
.L34:
leaq -4096(%r13), %r12
.L35:
call rand@PLT
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
mulss .LC1(%rip), %xmm0
movss %xmm0, (%r12)
addq $4, %r12
cmpq %r13, %r12
jne .L35
addq $4096, %r13
cmpq %r14, %r13
jne .L34
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movabsq $4398046511104, %r8
movq %r8, %rdi
orq $1024, %rdi
movq %rdi, %rdx
movq %rdi, %r8
movq %rax, %r9
movq %rbx, %rcx
movq %rbp, %rsi
call _Z6MatMul6MatrixS_S_
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r13
movl $1024, %r9d
movl $1024, %r8d
movl $1024, %ecx
movq %rax, %rdx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z16cpu_matrix_multiPfS_S_iii
leaq 4096(%r12), %rcx
leaq 4198400(%r12), %rdi
movl $0, %edx
movss .LC2(%rip), %xmm1
movl $1, %esi
.L37:
leaq -4096(%rcx), %rax
.L39:
movss (%rax), %xmm0
subss %xmm0, %xmm0
andps %xmm1, %xmm0
ucomiss .LC3(%rip), %xmm0
cmova %esi, %edx
addq $4, %rax
cmpq %rax, %rcx
jne .L39
addq $4096, %rcx
cmpq %rcx, %rdi
jne .L37
testb %dl, %dl
jne .L41
leaq .LC4(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
.L42:
movq %rbp, %rdi
call _ZdaPv@PLT
movq %rbx, %rdi
call _ZdaPv@PLT
movq %r12, %rdi
call _ZdaPv@PLT
movq %r13, %rdi
call _ZdaPv@PLT
movl $0, %eax
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L41:
.cfi_restore_state
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
call _ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@PLT
jmp .L42
.cfi_endproc
.LFE3671:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC6:
.string "_Z18MatMulKernel_naive6MatrixS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3699:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z18MatMulKernel_naive6MatrixS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3699:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 805306368
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC2:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst4
.align 4
.LC3:
.long 897988541
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "gpucppMatMulNaive.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z16cpu_matrix_multiPfS_S_iii # -- Begin function _Z16cpu_matrix_multiPfS_S_iii
.p2align 4, 0x90
.type _Z16cpu_matrix_multiPfS_S_iii,@function
_Z16cpu_matrix_multiPfS_S_iii: # @_Z16cpu_matrix_multiPfS_S_iii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdx, -8(%rsp) # 8-byte Spill
movq %rsi, -16(%rsp) # 8-byte Spill
movq %rdi, -24(%rsp) # 8-byte Spill
testl %ecx, %ecx
jle .LBB0_9
# %bb.1: # %.preheader27.lr.ph
movslq %r9d, %rdi
movl %ecx, %ecx
movl %edi, %r10d
movl %r8d, %r11d
leaq (,%rdi,4), %rbx
xorl %r14d, %r14d
xorl %r15d, %r15d
jmp .LBB0_2
.p2align 4, 0x90
.LBB0_8: # %._crit_edge31
# in Loop: Header=BB0_2 Depth=1
incq %r15
addl %r8d, %r14d
cmpq %rcx, %r15
je .LBB0_9
.LBB0_2: # %.preheader27
# =>This Loop Header: Depth=1
# Child Loop BB0_4 Depth 2
# Child Loop BB0_6 Depth 3
testl %r9d, %r9d
jle .LBB0_8
# %bb.3: # %.preheader.lr.ph
# in Loop: Header=BB0_2 Depth=1
movl %r14d, %eax
movq -24(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r12
movq %r15, %rax
imulq %rdi, %rax
movq -8(%rsp), %rdx # 8-byte Reload
leaq (%rdx,%rax,4), %r13
movq -16(%rsp), %rsi # 8-byte Reload
xorl %eax, %eax
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_7: # %._crit_edge
# in Loop: Header=BB0_4 Depth=2
movss %xmm0, (%r13,%rax,4)
incq %rax
addq $4, %rsi
cmpq %r10, %rax
je .LBB0_8
.LBB0_4: # %.preheader
# Parent Loop BB0_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_6 Depth 3
xorps %xmm0, %xmm0
testl %r8d, %r8d
jle .LBB0_7
# %bb.5: # %.lr.ph.preheader
# in Loop: Header=BB0_4 Depth=2
movq %rsi, %rbp
xorl %edx, %edx
.p2align 4, 0x90
.LBB0_6: # %.lr.ph
# Parent Loop BB0_2 Depth=1
# Parent Loop BB0_4 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r12,%rdx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rbp), %xmm1
addss %xmm1, %xmm0
incq %rdx
addq %rbx, %rbp
cmpq %rdx, %r11
jne .LBB0_6
jmp .LBB0_7
.LBB0_9: # %._crit_edge33
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z16cpu_matrix_multiPfS_S_iii, .Lfunc_end0-_Z16cpu_matrix_multiPfS_S_iii
.cfi_endproc
# -- End function
.globl _Z33__device_stub__MatMulKernel_naive6MatrixS_S_ # -- Begin function _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.p2align 4, 0x90
.type _Z33__device_stub__MatMulKernel_naive6MatrixS_S_,@function
_Z33__device_stub__MatMulKernel_naive6MatrixS_S_: # @_Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 80(%rsp)
movq %rsi, 88(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 72(%rsp)
movq %r8, 48(%rsp)
movq %r9, 56(%rsp)
leaq 80(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 48(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z18MatMulKernel_naive6MatrixS_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end1:
.size _Z33__device_stub__MatMulKernel_naive6MatrixS_S_, .Lfunc_end1-_Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.cfi_endproc
# -- End function
.globl _Z6MatMul6MatrixS_S_ # -- Begin function _Z6MatMul6MatrixS_S_
.p2align 4, 0x90
.type _Z6MatMul6MatrixS_S_,@function
_Z6MatMul6MatrixS_S_: # @_Z6MatMul6MatrixS_S_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $200, %rsp
.cfi_def_cfa_offset 256
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r9, 72(%rsp) # 8-byte Spill
movq %r8, %r13
movq %rcx, 64(%rsp) # 8-byte Spill
movq %rdx, %rbp
movq %rsi, 56(%rsp) # 8-byte Spill
movq %rdi, %r12
movq %rdi, %rax
shrq $32, %rax
movq %rdx, %r15
shrq $32, %r15
movq %r8, %r14
shrq $32, %r14
movq %rdi, 40(%rsp)
imull %r12d, %eax
movslq %eax, %rbx
shlq $2, %rbx
leaq 48(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 48(%rsp), %rdi
movq 56(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movl %ebp, 8(%rsp)
movl %ebp, 12(%rsp)
imull %ebp, %r15d
movslq %r15d, %rbx
shlq $2, %rbx
leaq 16(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
movq 16(%rsp), %rdi
movq 64(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movq %r13, 24(%rsp)
imull %r13d, %r14d
movslq %r14d, %r14
shlq $2, %r14
leaq 32(%rsp), %rdi
movq %r14, %rsi
callq hipMalloc
shrl $4, %ebp
shrq $4, %r12
movabsq $1152921500311879680, %rdi # imm = 0xFFFFFFF00000000
andq %r12, %rdi
orq %rbp, %rdi
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_2
# %bb.1:
movups 40(%rsp), %xmm0
movups 8(%rsp), %xmm1
movups 24(%rsp), %xmm2
movups %xmm0, 184(%rsp)
movups %xmm1, 168(%rsp)
movups %xmm2, 152(%rsp)
leaq 184(%rsp), %rax
movq %rax, 128(%rsp)
leaq 168(%rsp), %rax
movq %rax, 136(%rsp)
leaq 152(%rsp), %rax
movq %rax, 144(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z18MatMulKernel_naive6MatrixS_S_, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_2:
movq 32(%rsp), %rsi
movq 72(%rsp), %rdi # 8-byte Reload
movq %r14, %rdx
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movq 48(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
addq $200, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size _Z6MatMul6MatrixS_S_, .Lfunc_end2-_Z6MatMul6MatrixS_S_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI3_0:
.long 0x30000000 # float 4.65661287E-10
.LCPI3_2:
.long 0x358637bd # float 9.99999997E-7
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI3_1:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $1103, %edi # imm = 0x44F
callq srand
xorl %r14d, %r14d
movq %rbx, %r15
.p2align 4, 0x90
.LBB3_1: # %.preheader95
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
xorl %r12d, %r12d
.p2align 4, 0x90
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
movss .LCPI3_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss %xmm1, %xmm0
movss %xmm0, (%r15,%r12,4)
incq %r12
cmpq $1024, %r12 # imm = 0x400
jne .LBB3_2
# %bb.3: # in Loop: Header=BB3_1 Depth=1
incq %r14
addq $4096, %r15 # imm = 0x1000
cmpq $1024, %r14 # imm = 0x400
jne .LBB3_1
# %bb.4:
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
xorl %r15d, %r15d
movq %rax, %r12
.p2align 4, 0x90
.LBB3_5: # %.preheader94
# =>This Loop Header: Depth=1
# Child Loop BB3_6 Depth 2
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB3_6: # Parent Loop BB3_5 Depth=1
# => This Inner Loop Header: Depth=2
callq rand
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
mulss .LCPI3_0(%rip), %xmm0
movss %xmm0, (%r12,%r13,4)
incq %r13
cmpq $1024, %r13 # imm = 0x400
jne .LBB3_6
# %bb.7: # in Loop: Header=BB3_5 Depth=1
incq %r15
addq $4096, %r12 # imm = 0x1000
cmpq $1024, %r15 # imm = 0x400
jne .LBB3_5
# %bb.8: # %.preheader.preheader
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
movabsq $4398046512128, %rdi # imm = 0x40000000400
movq %rbx, %rsi
movq %rdi, %rdx
movq %r14, %rcx
movq %rdi, %r8
movq %rax, %r9
callq _Z6MatMul6MatrixS_S_
xorl %eax, %eax
movaps .LCPI3_1(%rip), %xmm0 # xmm0 = [NaN,NaN,NaN,NaN]
movss .LCPI3_2(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
movl $1, %ecx
movq %r15, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB3_9: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_10 Depth 2
xorl %edi, %edi
.p2align 4, 0x90
.LBB3_10: # Parent Loop BB3_9 Depth=1
# => This Inner Loop Header: Depth=2
movss (%rdx,%rdi,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
subss %xmm2, %xmm2
andps %xmm0, %xmm2
ucomiss %xmm1, %xmm2
movzbl %sil, %esi
cmoval %ecx, %esi
incq %rdi
cmpq $1024, %rdi # imm = 0x400
jne .LBB3_10
# %bb.11: # %_Z16cpu_matrix_multiPfS_S_iii.exit
# in Loop: Header=BB3_9 Depth=1
incq %rax
addq $4096, %rdx # imm = 0x1000
cmpq $1024, %rax # imm = 0x400
jne .LBB3_9
# %bb.12:
movl $_ZSt4cout, %edi
testb $1, %sil
jne .LBB3_16
# %bb.13:
movl $.L.str, %esi
movl $39, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB3_21
# %bb.14: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i
cmpb $0, 56(%r12)
je .LBB3_19
.LBB3_15:
movzbl 67(%r12), %eax
jmp .LBB3_20
.LBB3_16:
movl $.L.str.1, %esi
movl $23, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq _ZSt4cout(%rip), %rax
movq -24(%rax), %rax
movq _ZSt4cout+240(%rax), %r12
testq %r12, %r12
je .LBB3_21
# %bb.17: # %_ZSt13__check_facetISt5ctypeIcEERKT_PS3_.exit.i.i90
cmpb $0, 56(%r12)
jne .LBB3_15
.LBB3_19:
movq %r12, %rdi
callq _ZNKSt5ctypeIcE13_M_widen_initEv
movq (%r12), %rax
movq %r12, %rdi
movl $10, %esi
callq *48(%rax)
.LBB3_20: # %_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_.exit
movsbl %al, %esi
movl $_ZSt4cout, %edi
callq _ZNSo3putEc
movq %rax, %rdi
callq _ZNSo5flushEv
movq %rbx, %rdi
callq _ZdaPv
movq %r14, %rdi
callq _ZdaPv
movq %r15, %rdi
callq _ZdaPv
xorl %eax, %eax
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB3_21:
.cfi_def_cfa_offset 48
callq _ZSt16__throw_bad_castv
.Lfunc_end3:
.size main, .Lfunc_end3-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB4_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB4_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z18MatMulKernel_naive6MatrixS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end4:
.size __hip_module_ctor, .Lfunc_end4-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB5_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB5_2:
retq
.Lfunc_end5:
.size __hip_module_dtor, .Lfunc_end5-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z18MatMulKernel_naive6MatrixS_S_,@object # @_Z18MatMulKernel_naive6MatrixS_S_
.section .rodata,"a",@progbits
.globl _Z18MatMulKernel_naive6MatrixS_S_
.p2align 3, 0x0
_Z18MatMulKernel_naive6MatrixS_S_:
.quad _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.size _Z18MatMulKernel_naive6MatrixS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Matrix multiplication by GPU is right! "
.size .L.str, 40
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "Results are not right! "
.size .L.str.1, 24
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z18MatMulKernel_naive6MatrixS_S_"
.size .L__unnamed_1, 34
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z33__device_stub__MatMulKernel_naive6MatrixS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z18MatMulKernel_naive6MatrixS_S_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numBlock*sizeof(float));
cudaMalloc((void **)&dmaxstart, numBlock*sizeof(int));
cudaMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
cudaMemcpy(darr, x, n*sizeof(float), cudaMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
cudaThreadSynchronize();
//copy the results from device to cpu
cudaMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(maxend, dmaxend, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
//free memory on device
cudaFree(darr);
cudaFree(dmaxarr);
cudaFree(dmaxstart);
cudaFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
} | .file "tmpxft_0013a73d_00000000-6_compare.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20find_max_from_blocksPfPiS0_iS0_S_
.type _Z20find_max_from_blocksPfPiS0_iS0_S_, @function
_Z20find_max_from_blocksPfPiS0_iS0_S_:
.LFB2059:
.cfi_startproc
endbr64
movq %rdx, %r10
movl $0x00000000, (%r9)
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rdx
movl $0, %eax
jmp .L7
.L5:
addq $4, %rax
cmpq %rdx, %rax
je .L3
.L7:
movss (%rdi,%rax), %xmm0
comiss (%r9), %xmm0
jbe .L5
movss %xmm0, (%r9)
movl (%rsi,%rax), %ecx
movl %ecx, (%r8)
movl (%r10,%rax), %ecx
movl %ecx, 4(%r8)
jmp .L5
.L3:
ret
.cfi_endproc
.LFE2059:
.size _Z20find_max_from_blocksPfPiS0_iS0_S_, .-_Z20find_max_from_blocksPfPiS0_iS0_S_
.globl _Z4meanPfii
.type _Z4meanPfii, @function
_Z4meanPfii:
.LFB2061:
.cfi_startproc
endbr64
cmpl %edx, %esi
jg .L13
movslq %esi, %r8
leaq (%rdi,%r8,4), %rax
movl %edx, %ecx
subl %esi, %ecx
addq %r8, %rcx
leaq 4(%rdi,%rcx,4), %rcx
pxor %xmm0, %xmm0
.L12:
addss (%rax), %xmm0
addq $4, %rax
cmpq %rcx, %rax
jne .L12
.L11:
subl %esi, %edx
addl $1, %edx
pxor %xmm1, %xmm1
cvtsi2ssl %edx, %xmm1
divss %xmm1, %xmm0
ret
.L13:
pxor %xmm0, %xmm0
jmp .L11
.cfi_endproc
.LFE2061:
.size _Z4meanPfii, .-_Z4meanPfii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "\nburst start from %d end %d, max-mean is %f\n"
.text
.globl _Z11s_max_burstPfii
.type _Z11s_max_burstPfii, @function
_Z11s_max_burstPfii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edx, 24(%rsp)
movl %esi, %eax
subl %edx, %eax
js .L22
movq %rdi, %r13
movl %esi, %r14d
addl $1, %eax
movl %eax, 28(%rsp)
movl $0, %ebp
movss .LC1(%rip), %xmm2
movss %xmm2, 12(%rsp)
leal -1(%rdx), %r15d
jmp .L17
.L18:
addl $1, %ebx
cmpl %ebx, %r12d
je .L21
.L19:
movl %ebx, %edx
movl %ebp, %esi
movq %r13, %rdi
call _Z4meanPfii
comiss 12(%rsp), %xmm0
jbe .L18
movl %ebx, 20(%rsp)
movl %ebp, 16(%rsp)
movss %xmm0, 12(%rsp)
jmp .L18
.L21:
addl $1, %ebp
subl $1, %r14d
movl 28(%rsp), %eax
cmpl %eax, %ebp
je .L16
.L17:
leal (%r15,%rbp), %ebx
leal (%r14,%rbp), %r12d
cmpl %r14d, 24(%rsp)
jle .L19
jmp .L21
.L22:
movss .LC1(%rip), %xmm3
movss %xmm3, 12(%rsp)
.L16:
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
movl 20(%rsp), %ecx
movl 16(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11s_max_burstPfii, .-_Z11s_max_burstPfii
.globl _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
.type _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii, @function
_Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14max_each_blockPfPiS0_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii, .-_Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
.globl _Z14max_each_blockPfPiS0_S_ii
.type _Z14max_each_blockPfPiS0_S_ii, @function
_Z14max_each_blockPfPiS0_S_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z14max_each_blockPfPiS0_S_ii, .-_Z14max_each_blockPfPiS0_S_ii
.section .rodata.str1.8
.align 8
.LC3:
.string "burst start from %d end at %d; max-mean is %f\n"
.text
.globl _Z9max_burstPfiiPiS_
.type _Z9max_burstPfiiPiS_, @function
_Z9max_burstPfiiPiS_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 32(%rsp)
movl %esi, %r12d
movl %edx, 44(%rsp)
movq %rcx, %r13
movq %r8, %r14
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leal 2046(%rsi), %ebp
movl %esi, %eax
addl $1023, %eax
cmovns %eax, %ebp
sarl $10, %ebp
movslq %ebp, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movslq %r12d, %r15
salq $2, %r15
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 80(%rsp)
movl $1, 84(%rsp)
movl $1024, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $12288, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L36:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 8(%rsp), %r12
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 64(%rsp), %rsi
movq 16(%rsp), %r15
movq %r15, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 24(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r14, %r9
movq %r13, %r8
movl %ebp, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq %r12, %rdi
call _Z20find_max_from_blocksPfPiS0_iS0_S_
pxor %xmm0, %xmm0
cvtss2sd (%r14), %xmm0
movl 4(%r13), %ecx
movl 0(%r13), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
movl 44(%rsp), %r9d
movl %r12d, %r8d
movq 48(%rsp), %rcx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
jmp .L36
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z9max_burstPfiiPiS_, .-_Z9max_burstPfiiPiS_
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, %r14d
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r13d
movslq %ebp, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r12
testl %ebp, %ebp
jle .L42
movq %rax, %rbx
leal -1(%rbp), %eax
leaq 4(%r12,%rax,4), %rbp
.L43:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
sall $4, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L43
.L42:
leaq 16(%rsp), %rcx
leaq 12(%rsp), %r8
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z9max_burstPfiiPiS_
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z11s_max_burstPfii
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L47
movl $0, %eax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "_Z14max_each_blockPfPiS0_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z14max_each_blockPfPiS0_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long -1082130432
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numBlock*sizeof(float));
cudaMalloc((void **)&dmaxstart, numBlock*sizeof(int));
cudaMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
cudaMemcpy(darr, x, n*sizeof(float), cudaMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
cudaThreadSynchronize();
//copy the results from device to cpu
cudaMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(maxend, dmaxend, numBlock*sizeof(int), cudaMemcpyDeviceToHost);
//free memory on device
cudaFree(darr);
cudaFree(dmaxarr);
cudaFree(dmaxstart);
cudaFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
} | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
hipMalloc((void **)&darr, n*sizeof(float));
hipMalloc((void **)&dmaxarr, numBlock*sizeof(float));
hipMalloc((void **)&dmaxstart, numBlock*sizeof(int));
hipMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
hipMemcpy(darr, x, n*sizeof(float), hipMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
hipDeviceSynchronize();
//copy the results from device to cpu
hipMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(maxend, dmaxend, numBlock*sizeof(int), hipMemcpyDeviceToHost);
//free memory on device
hipFree(darr);
hipFree(dmaxarr);
hipFree(dmaxstart);
hipFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
hipMalloc((void **)&darr, n*sizeof(float));
hipMalloc((void **)&dmaxarr, numBlock*sizeof(float));
hipMalloc((void **)&dmaxstart, numBlock*sizeof(int));
hipMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
hipMemcpy(darr, x, n*sizeof(float), hipMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
hipDeviceSynchronize();
//copy the results from device to cpu
hipMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(maxend, dmaxend, numBlock*sizeof(int), hipMemcpyDeviceToHost);
//free memory on device
hipFree(darr);
hipFree(dmaxarr);
hipFree(dmaxstart);
hipFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14max_each_blockPfPiS0_S_ii
.globl _Z14max_each_blockPfPiS0_S_ii
.p2align 8
.type _Z14max_each_blockPfPiS0_S_ii,@function
_Z14max_each_blockPfPiS0_S_ii:
s_clause 0x1
s_load_b32 s5, s[0:1], 0x34
s_load_b64 s[2:3], s[0:1], 0x20
s_mov_b32 s4, s15
s_waitcnt lgkmcnt(0)
s_and_b32 s9, s5, 0xffff
s_sub_i32 s5, s2, s3
v_mad_u64_u32 v[1:2], null, s4, s9, v[0:1]
s_lshl_b32 s6, s9, 2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_add_i32 s8, s6, 0
v_cmp_ge_i32_e32 vcc_lo, s5, v1
s_add_i32 s5, s8, s6
s_and_saveexec_b32 s6, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s10, exec_lo, s6
s_cbranch_execz .LBB0_15
v_sub_nc_u32_e32 v12, s2, v1
s_mov_b32 s11, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_le_i32_e64 s3, v12
s_cbranch_execz .LBB0_14
v_lshlrev_b32_e32 v4, 2, v0
s_load_b64 s[6:7], s[0:1], 0x18
v_ashrrev_i32_e32 v2, 31, v1
s_mov_b32 s12, 0
s_mov_b32 s13, s3
v_add_nc_u32_e32 v13, 0, v4
v_add_nc_u32_e32 v14, s8, v4
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_nc_u32_e32 v15, s5, v4
ds_load_b32 v10, v13
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s6, v2
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v3, vcc_lo
s_branch .LBB0_4
.LBB0_3:
s_or_b32 exec_lo, exec_lo, s2
v_cmp_ge_i32_e32 vcc_lo, s13, v12
s_add_i32 s13, s13, 1
s_or_b32 s12, vcc_lo, s12
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s12
s_cbranch_execz .LBB0_14
.LBB0_4:
v_cvt_f64_i32_e32 v[6:7], s13
v_add_nc_u32_e32 v16, s13, v1
s_cmp_lg_u32 s13, s3
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v4, -1, v16
s_cbranch_scc0 .LBB0_6
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v5, 31, v4
s_add_i32 s2, s13, -1
v_lshlrev_b64 v[17:18], 2, v[4:5]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v17, vcc_lo, s6, v17
v_add_co_ci_u32_e32 v18, vcc_lo, s7, v18, vcc_lo
global_load_b32 v5, v[17:18], off
v_cvt_f64_i32_e32 v[17:18], s2
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[19:20], v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[8:9], v[8:9], v[17:18], v[19:20]
v_div_scale_f64 v[17:18], null, v[6:7], v[6:7], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f64_e32 v[19:20], v[17:18]
s_waitcnt_depctr 0xfff
v_fma_f64 v[21:22], -v[17:18], v[19:20], 1.0
v_fma_f64 v[19:20], v[19:20], v[21:22], v[19:20]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[21:22], -v[17:18], v[19:20], 1.0
v_fma_f64 v[19:20], v[19:20], v[21:22], v[19:20]
v_div_scale_f64 v[21:22], vcc_lo, v[8:9], v[6:7], v[8:9]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f64 v[23:24], v[21:22], v[19:20]
v_fma_f64 v[17:18], -v[17:18], v[23:24], v[21:22]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fmas_f64 v[17:18], v[17:18], v[19:20], v[23:24]
v_div_fixup_f64 v[8:9], v[17:18], v[6:7], v[8:9]
s_cbranch_execz .LBB0_7
s_branch .LBB0_12
.LBB0_6:
.LBB0_7:
v_mov_b32_e32 v8, 0
v_mov_b32_e32 v9, 0
s_cmp_lt_i32 s13, 1
s_cbranch_scc1 .LBB0_11
v_dual_mov_b32 v11, v3 :: v_dual_mov_b32 v10, v2
v_mov_b32_e32 v5, v1
s_mov_b32 s14, 0
.LBB0_9:
global_load_b32 v17, v[10:11], off
v_add_nc_u32_e32 v5, 1, v5
v_add_co_u32 v10, s2, v10, 4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v11, s2, 0, v11, s2
v_cmp_ge_i32_e32 vcc_lo, v5, v16
s_or_b32 s14, vcc_lo, s14
s_waitcnt vmcnt(0)
v_cvt_f64_f32_e32 v[17:18], v17
s_delay_alu instid0(VALU_DEP_1)
v_add_f64 v[8:9], v[8:9], v[17:18]
s_and_not1_b32 exec_lo, exec_lo, s14
s_cbranch_execnz .LBB0_9
s_or_b32 exec_lo, exec_lo, s14
.LBB0_11:
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_div_scale_f64 v[10:11], null, v[6:7], v[6:7], v[8:9]
v_div_scale_f64 v[20:21], vcc_lo, v[8:9], v[6:7], v[8:9]
v_rcp_f64_e32 v[16:17], v[10:11]
s_waitcnt_depctr 0xfff
v_fma_f64 v[18:19], -v[10:11], v[16:17], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[16:17], v[16:17], v[18:19], v[16:17]
v_fma_f64 v[18:19], -v[10:11], v[16:17], 1.0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[16:17], v[16:17], v[18:19], v[16:17]
v_mul_f64 v[18:19], v[20:21], v[16:17]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[10:11], -v[10:11], v[18:19], v[20:21]
v_div_fmas_f64 v[10:11], v[10:11], v[16:17], v[18:19]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_div_fixup_f64 v[8:9], v[10:11], v[6:7], v[8:9]
v_cvt_f32_f64_e32 v10, v[8:9]
ds_store_b32 v13, v10
ds_store_b32 v14, v1
ds_store_b32 v15, v4
.LBB0_12:
v_cvt_f64_f32_e32 v[5:6], v10
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_f64_e32 v[8:9], v[5:6]
s_cbranch_execz .LBB0_3
v_cvt_f32_f64_e32 v10, v[8:9]
ds_store_b32 v13, v10
ds_store_b32 v14, v1
ds_store_b32 v15, v4
s_branch .LBB0_3
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s11
.LBB0_15:
s_and_not1_saveexec_b32 s2, s10
s_cbranch_execz .LBB0_17
v_lshl_add_u32 v1, v0, 2, 0
v_mov_b32_e32 v2, 0
ds_store_b32 v1, v2
.LBB0_17:
s_or_b32 exec_lo, exec_lo, s2
s_cmp_lt_u32 s9, 2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_23
v_lshlrev_b32_e32 v3, 2, v0
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, 0, v3
v_add_nc_u32_e32 v2, s8, v3
v_add_nc_u32_e32 v3, s5, v3
s_set_inst_prefetch_distance 0x1
s_branch .LBB0_20
.p2align 6
.LBB0_19:
s_or_b32 exec_lo, exec_lo, s3
s_cmp_lt_u32 s2, 4
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_23
.LBB0_20:
s_mov_b32 s2, s9
s_lshr_b32 s9, s9, 1
s_mov_b32 s3, exec_lo
v_cmpx_gt_u32_e64 s9, v0
s_cbranch_execz .LBB0_19
v_add_nc_u32_e32 v5, s9, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v4, v5, 2, 0
ds_load_b32 v4, v4
ds_load_b32 v6, v1
s_waitcnt lgkmcnt(0)
v_cmp_gt_f32_e32 vcc_lo, v4, v6
s_and_b32 exec_lo, exec_lo, vcc_lo
s_cbranch_execz .LBB0_19
v_lshlrev_b32_e32 v5, 2, v5
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v6, s8, v5
v_add_nc_u32_e32 v5, s5, v5
ds_load_b32 v6, v6
s_waitcnt lgkmcnt(0)
ds_store_b32 v2, v6
ds_load_b32 v5, v5
ds_store_b32 v1, v4
s_waitcnt lgkmcnt(1)
ds_store_b32 v3, v5
s_branch .LBB0_19
.LBB0_23:
s_set_inst_prefetch_distance 0x2
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB0_25
s_load_b128 s[12:15], s[0:1], 0x0
v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v1, s8
v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v3, 0
s_load_b64 s[0:1], s[0:1], 0x10
ds_load_b32 v0, v0
ds_load_b32 v1, v1
ds_load_b32 v2, v2
s_mov_b32 s5, 0
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b64 s[2:3], s[4:5], 2
s_waitcnt lgkmcnt(0)
s_add_u32 s4, s12, s2
s_addc_u32 s5, s13, s3
s_add_u32 s6, s14, s2
s_addc_u32 s7, s15, s3
s_add_u32 s0, s0, s2
s_addc_u32 s1, s1, s3
s_clause 0x2
global_store_b32 v3, v0, s[4:5]
global_store_b32 v3, v1, s[6:7]
global_store_b32 v3, v2, s[0:1]
.LBB0_25:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14max_each_blockPfPiS0_S_ii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 25
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14max_each_blockPfPiS0_S_ii, .Lfunc_end0-_Z14max_each_blockPfPiS0_S_ii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 36
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
- .offset: 160
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14max_each_blockPfPiS0_S_ii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14max_each_blockPfPiS0_S_ii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 25
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | /* This code implements the serial solution and CUDA version for finding the maximal burst in a time series;
How to compile:
nvcc compare.cu
How to run:
./a.out n k //n is the length of the time series and k is the minimum lenght of a subsequence
Results to see:
The burst found by two methods are printed out: "burst start from .. end at ..; max-me is .."
Notes:
The serial takes long time for a large n (e.g n=3000)
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
// kernel function
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n, int k);
// max_burst calls the kernel, return three arrays, one for the maxval, one for startpoint, one for the endpoint in each block;
void max_burst(float *x, int n, int k, int *startend, float *bigmax);
//find the maximum from the returned arrays from the kernel. This function is called by max_burst
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax);
//serial solution
void s_max_burst(float *arr, int n, int k);
float mean(float *y, int s, int e);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)(rand() % 80);
}
// Cuda solution
int startend[2];
float bigmax;
max_burst(arr, n,k, startend, &bigmax);
// serial solution
s_max_burst(arr, n,k);
return 0;
}
__global__ void max_each_block(float *dmaxarr, int *dmaxstart, int *dmaxend, float * darr, int n,int k) {
// declare three array for the maximum found by each thread
// learning material for shared memory: https://devblogs.nvidia.com/parallelforall/using-shared-memory-cuda-cc/
extern __shared__ float sh[];
float *mymaxvals = sh;
int *mystartmaxes = (int *)&mymaxvals[blockDim.x];
int *myendmaxes = (int *)&mystartmaxes[blockDim.x];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
int i, tid = threadIdx.x;
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += darr[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
} else {
xbar = ( (perlen-1) * xbar + darr[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next subsequence in a thread has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it with the smallest number
}
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
//put the maximum among the mymaxvals in this block to dmaxarr
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
dmaxstart[blockIdx.x] = mystartmaxes[0];
dmaxend[blockIdx.x] = myendmaxes[0];
}
}
void max_burst(float *x, int n, int k, int *startend, float *bigmax) {
const int numthreadsBlock = 1024;
int numBlock = ( n + numthreadsBlock - 1)/numthreadsBlock;
//declare arrays on cpu to store the results from the kernel
float *maxarr = (float *)malloc(numBlock * sizeof(float));
int *maxstart = (int *)malloc(numBlock * sizeof(int));
int *maxend = (int *)malloc(numBlock * sizeof(int));
// declare GPU memory pointers
float *darr, * dmaxarr;
int *dmaxstart, *dmaxend;
hipMalloc((void **)&darr, n*sizeof(float));
hipMalloc((void **)&dmaxarr, numBlock*sizeof(float));
hipMalloc((void **)&dmaxstart, numBlock*sizeof(int));
hipMalloc((void **)&dmaxend, numBlock*sizeof(int));
//copy the input x to device
hipMemcpy(darr, x, n*sizeof(float), hipMemcpyHostToDevice);
// execution configuration
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
//call the kernel
max_each_block<<<dimGrid,dimBlock,(3*numthreadsBlock)*sizeof(float)>>>(dmaxarr,dmaxstart,dmaxend, darr, n, k);
hipDeviceSynchronize();
//copy the results from device to cpu
hipMemcpy(maxarr, dmaxarr, numBlock*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(maxstart, dmaxstart, numBlock*sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(maxend, dmaxend, numBlock*sizeof(int), hipMemcpyDeviceToHost);
//free memory on device
hipFree(darr);
hipFree(dmaxarr);
hipFree(dmaxstart);
hipFree(dmaxend);
find_max_from_blocks( maxarr, maxstart, maxend, numBlock,startend, bigmax);
printf("burst start from %d end at %d; max-mean is %f\n", startend[0], startend[1], *bigmax);
}
void find_max_from_blocks(float *maxarr, int *maxstart, int *maxend, int numBlock,int *startend, float *bigmax) {
*bigmax = 0;
for (int i = 0; i < numBlock; i++) {
if (*bigmax < maxarr[i]) {
*bigmax = maxarr[i];
startend[0] = maxstart[i];
startend[1] = maxend[i];
}
}
}
void s_max_burst(float *arr, int n, int k) {
float mymaxval = -1;
int perstart, perlen,perend, mystart, myend;
float xbar;
for (perstart = 0; perstart <= n-k; perstart++) {
for (perlen = k; perlen <= n - perstart; perlen++) {
perend = perstart + perlen -1;
xbar = mean(arr, perstart, perend);
if (xbar > mymaxval) {
mymaxval = xbar;
mystart = perstart;
myend = perend;
}
}
}
printf("\nburst start from %d end %d, max-mean is %f\n", mystart, myend,mymaxval);
}
float mean(float *y, int s, int e){
int i;
float tot =0;
for (i=s; i<=e; i++) tot += y[i];
return tot / (e -s + 1);
} | .text
.file "compare.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI0_0:
.long 0xbf800000 # float -1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movslq %r12d, %r14
leaq (,%r14,4), %rdi
callq malloc
movq %rax, %r15
testl %r14d, %r14d
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %r12d, %r12d
incq %r12
movq %r15, %r13
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
shll $4, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r13)
decq %r12
addq $4, %r13
cmpq $1, %r12
jg .LBB0_2
.LBB0_3: # %._crit_edge
leaq 8(%rsp), %rcx
leaq 4(%rsp), %r8
movq %r15, %rdi
movl %r14d, %esi
movl %ebx, %edx
callq _Z9max_burstPfiiPiS_
leal 1(%r14), %eax
movslq %ebx, %rdx
movl %eax, %ecx
subl %ebx, %ecx
subq %rdx, %r14
movss .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %edi, %edi
# implicit-def: $edx
# implicit-def: $esi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_10: # %._crit_edge.i
# in Loop: Header=BB0_4 Depth=1
incq %rdi
decl %eax
addq $4, %r15
cmpq %rcx, %rdi
je .LBB0_11
.LBB0_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_6 Depth 2
# Child Loop BB0_8 Depth 3
cmpq %r14, %rdi
jg .LBB0_10
# %bb.5: # %.lr.ph.i.preheader
# in Loop: Header=BB0_4 Depth=1
leal -1(%rdi), %r8d
movq %rbx, %r9
movaps %xmm0, %xmm1
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_9: # %_Z4meanPfii.exit.i
# in Loop: Header=BB0_6 Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %r9d, %xmm2
divss %xmm2, %xmm0
ucomiss %xmm1, %xmm0
cmoval %edi, %esi
leal (%r8,%r9), %r10d
cmoval %r10d, %edx
maxss %xmm1, %xmm0
incq %r9
movaps %xmm0, %xmm1
cmpl %eax, %r9d
je .LBB0_10
.LBB0_6: # %.lr.ph.i
# Parent Loop BB0_4 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_8 Depth 3
xorps %xmm0, %xmm0
testl %r9d, %r9d
jle .LBB0_9
# %bb.7: # %.lr.ph.i.i.preheader
# in Loop: Header=BB0_6 Depth=2
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB0_8: # %.lr.ph.i.i
# Parent Loop BB0_4 Depth=1
# Parent Loop BB0_6 Depth=2
# => This Inner Loop Header: Depth=3
addss (%r15,%r10,4), %xmm0
incq %r10
cmpl %r10d, %r9d
jne .LBB0_8
jmp .LBB0_9
.LBB0_11: # %_Z11s_max_burstPfii.exit
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z9max_burstPfiiPiS_ # -- Begin function _Z9max_burstPfiiPiS_
.p2align 4, 0x90
.type _Z9max_burstPfiiPiS_,@function
_Z9max_burstPfiiPiS_: # @_Z9max_burstPfiiPiS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r8, 72(%rsp) # 8-byte Spill
movq %rcx, %r14
movl %edx, %r13d
movl %esi, %ebx
movq %rbx, 40(%rsp) # 8-byte Spill
leal 1023(%rbx), %eax
leal 2046(%rbx), %r12d
testl %eax, %eax
cmovnsl %eax, %r12d
movq %rdi, 48(%rsp) # 8-byte Spill
sarl $10, %r12d
movslq %r12d, %rbp
shlq $2, %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %r15
movq %rbp, %rdi
callq malloc
movq %rax, 64(%rsp) # 8-byte Spill
movq %rbp, %rdi
callq malloc
movq %rax, 56(%rsp) # 8-byte Spill
movslq %ebx, %rbx
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %rbp, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%r12,%rdx), %rdi
orq $1024, %rdx # imm = 0x400
movl $12288, %r8d # imm = 0x3000
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq 24(%rsp), %rsi
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq %rsi, 128(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movl %eax, 36(%rsp)
movl %r13d, 32(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 128(%rsp), %rax
movq %rax, 184(%rsp)
leaq 36(%rsp), %rax
movq %rax, 192(%rsp)
leaq 32(%rsp), %rax
movq %rax, 200(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z14max_each_blockPfPiS0_S_ii, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 16(%rsp), %rsi
movq %r15, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movq 64(%rsp), %r13 # 8-byte Reload
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rsi
movq 56(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi # 8-byte Reload
movl $0, (%rdi)
cmpl $0, 40(%rsp) # 4-byte Folded Reload
jle .LBB1_7
# %bb.3: # %.lr.ph.i
xorps %xmm0, %xmm0
xorl %eax, %eax
jmp .LBB1_4
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_4 Depth=1
incq %rax
cmpq %rax, %r12
je .LBB1_7
.LBB1_4: # =>This Inner Loop Header: Depth=1
movss (%r15,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jbe .LBB1_6
# %bb.5: # in Loop: Header=BB1_4 Depth=1
movss %xmm1, (%rdi)
movl (%r13,%rax,4), %ecx
movl %ecx, (%r14)
movl (%rbx,%rax,4), %ecx
movl %ecx, 4(%r14)
movaps %xmm1, %xmm0
jmp .LBB1_6
.LBB1_7: # %_Z20find_max_from_blocksPfPiS0_iS0_S_.exit
movl (%r14), %esi
movl 4(%r14), %edx
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9max_burstPfiiPiS_, .Lfunc_end1-_Z9max_burstPfiiPiS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11s_max_burstPfii
.LCPI2_0:
.long 0xbf800000 # float -1
.text
.globl _Z11s_max_burstPfii
.p2align 4, 0x90
.type _Z11s_max_burstPfii,@function
_Z11s_max_burstPfii: # @_Z11s_max_burstPfii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movslq %esi, %rcx
incl %esi
movslq %edx, %rax
movl %esi, %r8d
subl %eax, %r8d
subq %rax, %rcx
movl %eax, %r9d
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %r10d, %r10d
# implicit-def: $edx
# implicit-def: $eax
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_7: # %._crit_edge
# in Loop: Header=BB2_1 Depth=1
incq %r10
decl %esi
addq $4, %rdi
cmpq %r8, %r10
je .LBB2_8
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_5 Depth 3
cmpq %rcx, %r10
jg .LBB2_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB2_1 Depth=1
movq %r9, %r11
movaps %xmm0, %xmm1
jmp .LBB2_3
.p2align 4, 0x90
.LBB2_6: # %_Z4meanPfii.exit
# in Loop: Header=BB2_3 Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %r11d, %xmm2
divss %xmm2, %xmm0
ucomiss %xmm1, %xmm0
cmoval %r10d, %eax
leal -1(%r11,%r10), %ebx
cmoval %ebx, %edx
maxss %xmm1, %xmm0
incq %r11
movaps %xmm0, %xmm1
cmpl %esi, %r11d
je .LBB2_7
.LBB2_3: # %.lr.ph
# Parent Loop BB2_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_5 Depth 3
xorps %xmm0, %xmm0
testl %r11d, %r11d
jle .LBB2_6
# %bb.4: # %.lr.ph.i.preheader
# in Loop: Header=BB2_3 Depth=2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i
# Parent Loop BB2_1 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
addss (%rdi,%rbx,4), %xmm0
incq %rbx
cmpl %ebx, %r11d
jne .LBB2_5
jmp .LBB2_6
.LBB2_8: # %._crit_edge43
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %eax, %esi
movb $1, %al
popq %rbx
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end2:
.size _Z11s_max_burstPfii, .Lfunc_end2-_Z11s_max_burstPfii
.cfi_endproc
# -- End function
.globl _Z29__device_stub__max_each_blockPfPiS0_S_ii # -- Begin function _Z29__device_stub__max_each_blockPfPiS0_S_ii
.p2align 4, 0x90
.type _Z29__device_stub__max_each_blockPfPiS0_S_ii,@function
_Z29__device_stub__max_each_blockPfPiS0_S_ii: # @_Z29__device_stub__max_each_blockPfPiS0_S_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14max_each_blockPfPiS0_S_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z29__device_stub__max_each_blockPfPiS0_S_ii, .Lfunc_end3-_Z29__device_stub__max_each_blockPfPiS0_S_ii
.cfi_endproc
# -- End function
.globl _Z20find_max_from_blocksPfPiS0_iS0_S_ # -- Begin function _Z20find_max_from_blocksPfPiS0_iS0_S_
.p2align 4, 0x90
.type _Z20find_max_from_blocksPfPiS0_iS0_S_,@function
_Z20find_max_from_blocksPfPiS0_iS0_S_: # @_Z20find_max_from_blocksPfPiS0_iS0_S_
.cfi_startproc
# %bb.0:
movl $0, (%r9)
testl %ecx, %ecx
jle .LBB4_5
# %bb.1: # %.lr.ph
movss (%r9), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl %ecx, %eax
xorl %ecx, %ecx
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_4: # in Loop: Header=BB4_2 Depth=1
incq %rcx
cmpq %rcx, %rax
je .LBB4_5
.LBB4_2: # =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jbe .LBB4_4
# %bb.3: # in Loop: Header=BB4_2 Depth=1
movss %xmm1, (%r9)
movl (%rsi,%rcx,4), %r10d
movl %r10d, (%r8)
movl (%rdx,%rcx,4), %r10d
movl %r10d, 4(%r8)
movaps %xmm1, %xmm0
jmp .LBB4_4
.LBB4_5: # %._crit_edge
retq
.Lfunc_end4:
.size _Z20find_max_from_blocksPfPiS0_iS0_S_, .Lfunc_end4-_Z20find_max_from_blocksPfPiS0_iS0_S_
.cfi_endproc
# -- End function
.globl _Z4meanPfii # -- Begin function _Z4meanPfii
.p2align 4, 0x90
.type _Z4meanPfii,@function
_Z4meanPfii: # @_Z4meanPfii
.cfi_startproc
# %bb.0:
# kill: def $edx killed $edx def $rdx
xorps %xmm0, %xmm0
subl %esi, %edx
jl .LBB5_3
# %bb.1: # %.lr.ph.preheader
movslq %esi, %rax
leaq (%rdi,%rax,4), %rax
leal 1(%rdx), %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addss (%rax,%rsi,4), %xmm0
incq %rsi
cmpl %esi, %ecx
jne .LBB5_2
.LBB5_3: # %._crit_edge
incl %edx
cvtsi2ss %edx, %xmm1
divss %xmm1, %xmm0
retq
.Lfunc_end5:
.size _Z4meanPfii, .Lfunc_end5-_Z4meanPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14max_each_blockPfPiS0_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14max_each_blockPfPiS0_S_ii,@object # @_Z14max_each_blockPfPiS0_S_ii
.section .rodata,"a",@progbits
.globl _Z14max_each_blockPfPiS0_S_ii
.p2align 3, 0x0
_Z14max_each_blockPfPiS0_S_ii:
.quad _Z29__device_stub__max_each_blockPfPiS0_S_ii
.size _Z14max_each_blockPfPiS0_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "burst start from %d end at %d
.size .L.str, 47
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nburst start from %d end %d, max-mean is %f\n"
.size .L.str.1, 45
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14max_each_blockPfPiS0_S_ii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__max_each_blockPfPiS0_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14max_each_blockPfPiS0_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0013a73d_00000000-6_compare.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z20find_max_from_blocksPfPiS0_iS0_S_
.type _Z20find_max_from_blocksPfPiS0_iS0_S_, @function
_Z20find_max_from_blocksPfPiS0_iS0_S_:
.LFB2059:
.cfi_startproc
endbr64
movq %rdx, %r10
movl $0x00000000, (%r9)
testl %ecx, %ecx
jle .L3
movslq %ecx, %rcx
leaq 0(,%rcx,4), %rdx
movl $0, %eax
jmp .L7
.L5:
addq $4, %rax
cmpq %rdx, %rax
je .L3
.L7:
movss (%rdi,%rax), %xmm0
comiss (%r9), %xmm0
jbe .L5
movss %xmm0, (%r9)
movl (%rsi,%rax), %ecx
movl %ecx, (%r8)
movl (%r10,%rax), %ecx
movl %ecx, 4(%r8)
jmp .L5
.L3:
ret
.cfi_endproc
.LFE2059:
.size _Z20find_max_from_blocksPfPiS0_iS0_S_, .-_Z20find_max_from_blocksPfPiS0_iS0_S_
.globl _Z4meanPfii
.type _Z4meanPfii, @function
_Z4meanPfii:
.LFB2061:
.cfi_startproc
endbr64
cmpl %edx, %esi
jg .L13
movslq %esi, %r8
leaq (%rdi,%r8,4), %rax
movl %edx, %ecx
subl %esi, %ecx
addq %r8, %rcx
leaq 4(%rdi,%rcx,4), %rcx
pxor %xmm0, %xmm0
.L12:
addss (%rax), %xmm0
addq $4, %rax
cmpq %rcx, %rax
jne .L12
.L11:
subl %esi, %edx
addl $1, %edx
pxor %xmm1, %xmm1
cvtsi2ssl %edx, %xmm1
divss %xmm1, %xmm0
ret
.L13:
pxor %xmm0, %xmm0
jmp .L11
.cfi_endproc
.LFE2061:
.size _Z4meanPfii, .-_Z4meanPfii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC2:
.string "\nburst start from %d end %d, max-mean is %f\n"
.text
.globl _Z11s_max_burstPfii
.type _Z11s_max_burstPfii, @function
_Z11s_max_burstPfii:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $40, %rsp
.cfi_def_cfa_offset 96
movl %edx, 24(%rsp)
movl %esi, %eax
subl %edx, %eax
js .L22
movq %rdi, %r13
movl %esi, %r14d
addl $1, %eax
movl %eax, 28(%rsp)
movl $0, %ebp
movss .LC1(%rip), %xmm2
movss %xmm2, 12(%rsp)
leal -1(%rdx), %r15d
jmp .L17
.L18:
addl $1, %ebx
cmpl %ebx, %r12d
je .L21
.L19:
movl %ebx, %edx
movl %ebp, %esi
movq %r13, %rdi
call _Z4meanPfii
comiss 12(%rsp), %xmm0
jbe .L18
movl %ebx, 20(%rsp)
movl %ebp, 16(%rsp)
movss %xmm0, 12(%rsp)
jmp .L18
.L21:
addl $1, %ebp
subl $1, %r14d
movl 28(%rsp), %eax
cmpl %eax, %ebp
je .L16
.L17:
leal (%r15,%rbp), %ebx
leal (%r14,%rbp), %r12d
cmpl %r14d, 24(%rsp)
jle .L19
jmp .L21
.L22:
movss .LC1(%rip), %xmm3
movss %xmm3, 12(%rsp)
.L16:
pxor %xmm0, %xmm0
cvtss2sd 12(%rsp), %xmm0
movl 20(%rsp), %ecx
movl 16(%rsp), %edx
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
addq $40, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z11s_max_burstPfii, .-_Z11s_max_burstPfii
.globl _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
.type _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii, @function
_Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii:
.LFB2086:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L31
.L27:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L32
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L31:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14max_each_blockPfPiS0_S_ii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L27
.L32:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii, .-_Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
.globl _Z14max_each_blockPfPiS0_S_ii
.type _Z14max_each_blockPfPiS0_S_ii, @function
_Z14max_each_blockPfPiS0_S_ii:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z14max_each_blockPfPiS0_S_ii, .-_Z14max_each_blockPfPiS0_S_ii
.section .rodata.str1.8
.align 8
.LC3:
.string "burst start from %d end at %d; max-mean is %f\n"
.text
.globl _Z9max_burstPfiiPiS_
.type _Z9max_burstPfiiPiS_, @function
_Z9max_burstPfiiPiS_:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $120, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 32(%rsp)
movl %esi, %r12d
movl %edx, 44(%rsp)
movq %rcx, %r13
movq %r8, %r14
movq %fs:40, %rax
movq %rax, 104(%rsp)
xorl %eax, %eax
leal 2046(%rsi), %ebp
movl %esi, %eax
addl $1023, %eax
cmovns %eax, %ebp
sarl $10, %ebp
movslq %ebp, %rbx
salq $2, %rbx
movq %rbx, %rdi
call malloc@PLT
movq %rax, 8(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 16(%rsp)
movq %rbx, %rdi
call malloc@PLT
movq %rax, 24(%rsp)
movslq %r12d, %r15
salq $2, %r15
leaq 48(%rsp), %rdi
movq %r15, %rsi
call cudaMalloc@PLT
leaq 56(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movq %rbx, %rsi
call cudaMalloc@PLT
movl $1, %ecx
movq %r15, %rdx
movq 32(%rsp), %rsi
movq 48(%rsp), %rdi
call cudaMemcpy@PLT
movl %ebp, 80(%rsp)
movl $1, 84(%rsp)
movl $1024, 92(%rsp)
movl $1, 96(%rsp)
movl $0, %r9d
movl $12288, %r8d
movq 92(%rsp), %rdx
movl $1, %ecx
movq 80(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L39
.L36:
call cudaThreadSynchronize@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 56(%rsp), %rsi
movq 8(%rsp), %r12
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 64(%rsp), %rsi
movq 16(%rsp), %r15
movq %r15, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movq %rbx, %rdx
movq 72(%rsp), %rsi
movq 24(%rsp), %rbx
movq %rbx, %rdi
call cudaMemcpy@PLT
movq 48(%rsp), %rdi
call cudaFree@PLT
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq %r14, %r9
movq %r13, %r8
movl %ebp, %ecx
movq %rbx, %rdx
movq %r15, %rsi
movq %r12, %rdi
call _Z20find_max_from_blocksPfPiS0_iS0_S_
pxor %xmm0, %xmm0
cvtss2sd (%r14), %xmm0
movl 4(%r13), %ecx
movl 0(%r13), %edx
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 104(%rsp), %rax
subq %fs:40, %rax
jne .L40
addq $120, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
movl 44(%rsp), %r9d
movl %r12d, %r8d
movq 48(%rsp), %rcx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z43__device_stub__Z14max_each_blockPfPiS0_S_iiPfPiS0_S_ii
jmp .L36
.L40:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z9max_burstPfiiPiS_, .-_Z9max_burstPfiiPiS_
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $32, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %rbx
movq %fs:40, %rax
movq %rax, 24(%rsp)
xorl %eax, %eax
movq 8(%rsi), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movq %rax, %rbp
movl %eax, %r14d
movq 16(%rbx), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r13d
movslq %ebp, %rdi
salq $2, %rdi
call malloc@PLT
movq %rax, %r12
testl %ebp, %ebp
jle .L42
movq %rax, %rbx
leal -1(%rbp), %eax
leaq 4(%r12,%rax,4), %rbp
.L43:
call rand@PLT
movslq %eax, %rdx
imulq $1717986919, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
leal (%rdx,%rdx,4), %edx
sall $4, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L43
.L42:
leaq 16(%rsp), %rcx
leaq 12(%rsp), %r8
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z9max_burstPfiiPiS_
movl %r13d, %edx
movl %r14d, %esi
movq %r12, %rdi
call _Z11s_max_burstPfii
movq 24(%rsp), %rax
subq %fs:40, %rax
jne .L47
movl $0, %eax
addq $32, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L47:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "_Z14max_each_blockPfPiS0_S_ii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _Z14max_each_blockPfPiS0_S_ii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long -1082130432
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "compare.hip"
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI0_0:
.long 0xbf800000 # float -1
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $16, %rsp
.cfi_def_cfa_offset 64
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movq 8(%rsi), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %r12
movq 16(%rbx), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movq %rax, %rbx
movslq %r12d, %r14
leaq (,%r14,4), %rdi
callq malloc
movq %rax, %r15
testl %r14d, %r14d
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %r12d, %r12d
incq %r12
movq %r15, %r13
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1717986919, %rax, %rcx # imm = 0x66666667
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
shll $4, %ecx
leal (%rcx,%rcx,4), %ecx
subl %ecx, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss %xmm0, (%r13)
decq %r12
addq $4, %r13
cmpq $1, %r12
jg .LBB0_2
.LBB0_3: # %._crit_edge
leaq 8(%rsp), %rcx
leaq 4(%rsp), %r8
movq %r15, %rdi
movl %r14d, %esi
movl %ebx, %edx
callq _Z9max_burstPfiiPiS_
leal 1(%r14), %eax
movslq %ebx, %rdx
movl %eax, %ecx
subl %ebx, %ecx
subq %rdx, %r14
movss .LCPI0_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %edi, %edi
# implicit-def: $edx
# implicit-def: $esi
jmp .LBB0_4
.p2align 4, 0x90
.LBB0_10: # %._crit_edge.i
# in Loop: Header=BB0_4 Depth=1
incq %rdi
decl %eax
addq $4, %r15
cmpq %rcx, %rdi
je .LBB0_11
.LBB0_4: # %.preheader.i
# =>This Loop Header: Depth=1
# Child Loop BB0_6 Depth 2
# Child Loop BB0_8 Depth 3
cmpq %r14, %rdi
jg .LBB0_10
# %bb.5: # %.lr.ph.i.preheader
# in Loop: Header=BB0_4 Depth=1
leal -1(%rdi), %r8d
movq %rbx, %r9
movaps %xmm0, %xmm1
jmp .LBB0_6
.p2align 4, 0x90
.LBB0_9: # %_Z4meanPfii.exit.i
# in Loop: Header=BB0_6 Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %r9d, %xmm2
divss %xmm2, %xmm0
ucomiss %xmm1, %xmm0
cmoval %edi, %esi
leal (%r8,%r9), %r10d
cmoval %r10d, %edx
maxss %xmm1, %xmm0
incq %r9
movaps %xmm0, %xmm1
cmpl %eax, %r9d
je .LBB0_10
.LBB0_6: # %.lr.ph.i
# Parent Loop BB0_4 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB0_8 Depth 3
xorps %xmm0, %xmm0
testl %r9d, %r9d
jle .LBB0_9
# %bb.7: # %.lr.ph.i.i.preheader
# in Loop: Header=BB0_6 Depth=2
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB0_8: # %.lr.ph.i.i
# Parent Loop BB0_4 Depth=1
# Parent Loop BB0_6 Depth=2
# => This Inner Loop Header: Depth=3
addss (%r15,%r10,4), %xmm0
incq %r10
cmpl %r10d, %r9d
jne .LBB0_8
jmp .LBB0_9
.LBB0_11: # %_Z11s_max_burstPfii.exit
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size main, .Lfunc_end0-main
.cfi_endproc
# -- End function
.globl _Z9max_burstPfiiPiS_ # -- Begin function _Z9max_burstPfiiPiS_
.p2align 4, 0x90
.type _Z9max_burstPfiiPiS_,@function
_Z9max_burstPfiiPiS_: # @_Z9max_burstPfiiPiS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $216, %rsp
.cfi_def_cfa_offset 272
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %r8, 72(%rsp) # 8-byte Spill
movq %rcx, %r14
movl %edx, %r13d
movl %esi, %ebx
movq %rbx, 40(%rsp) # 8-byte Spill
leal 1023(%rbx), %eax
leal 2046(%rbx), %r12d
testl %eax, %eax
cmovnsl %eax, %r12d
movq %rdi, 48(%rsp) # 8-byte Spill
sarl $10, %r12d
movslq %r12d, %rbp
shlq $2, %rbp
movq %rbp, %rdi
callq malloc
movq %rax, %r15
movq %rbp, %rdi
callq malloc
movq %rax, 64(%rsp) # 8-byte Spill
movq %rbp, %rdi
callq malloc
movq %rax, 56(%rsp) # 8-byte Spill
movslq %ebx, %rbx
shlq $2, %rbx
leaq 24(%rsp), %rdi
movq %rbx, %rsi
callq hipMalloc
leaq 16(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
leaq 8(%rsp), %rdi
movq %rbp, %rsi
callq hipMalloc
movq %rsp, %rdi
movq %rbp, %rsi
callq hipMalloc
movq 24(%rsp), %rdi
movq 48(%rsp), %rsi # 8-byte Reload
movq %rbx, %rdx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967296, %rdx # imm = 0x100000000
leaq (%r12,%rdx), %rdi
orq $1024, %rdx # imm = 0x400
movl $12288, %r8d # imm = 0x3000
movl $1, %esi
movl $1, %ecx
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq 8(%rsp), %rcx
movq (%rsp), %rdx
movq 24(%rsp), %rsi
movq %rax, 152(%rsp)
movq %rcx, 144(%rsp)
movq %rdx, 136(%rsp)
movq %rsi, 128(%rsp)
movq 40(%rsp), %rax # 8-byte Reload
movl %eax, 36(%rsp)
movl %r13d, 32(%rsp)
leaq 152(%rsp), %rax
movq %rax, 160(%rsp)
leaq 144(%rsp), %rax
movq %rax, 168(%rsp)
leaq 136(%rsp), %rax
movq %rax, 176(%rsp)
leaq 128(%rsp), %rax
movq %rax, 184(%rsp)
leaq 36(%rsp), %rax
movq %rax, 192(%rsp)
leaq 32(%rsp), %rax
movq %rax, 200(%rsp)
leaq 112(%rsp), %rdi
leaq 96(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
movq 96(%rsp), %rcx
movl 104(%rsp), %r8d
leaq 160(%rsp), %r9
movl $_Z14max_each_blockPfPiS0_S_ii, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
callq hipDeviceSynchronize
movq 16(%rsp), %rsi
movq %r15, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movq 64(%rsp), %r13 # 8-byte Reload
movq %r13, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq (%rsp), %rsi
movq 56(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movq %rbp, %rdx
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 72(%rsp), %rdi # 8-byte Reload
movl $0, (%rdi)
cmpl $0, 40(%rsp) # 4-byte Folded Reload
jle .LBB1_7
# %bb.3: # %.lr.ph.i
xorps %xmm0, %xmm0
xorl %eax, %eax
jmp .LBB1_4
.p2align 4, 0x90
.LBB1_6: # in Loop: Header=BB1_4 Depth=1
incq %rax
cmpq %rax, %r12
je .LBB1_7
.LBB1_4: # =>This Inner Loop Header: Depth=1
movss (%r15,%rax,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jbe .LBB1_6
# %bb.5: # in Loop: Header=BB1_4 Depth=1
movss %xmm1, (%rdi)
movl (%r13,%rax,4), %ecx
movl %ecx, (%r14)
movl (%rbx,%rax,4), %ecx
movl %ecx, 4(%r14)
movaps %xmm1, %xmm0
jmp .LBB1_6
.LBB1_7: # %_Z20find_max_from_blocksPfPiS0_iS0_S_.exit
movl (%r14), %esi
movl 4(%r14), %edx
movss (%rdi), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str, %edi
movb $1, %al
callq printf
addq $216, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size _Z9max_burstPfiiPiS_, .Lfunc_end1-_Z9max_burstPfiiPiS_
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z11s_max_burstPfii
.LCPI2_0:
.long 0xbf800000 # float -1
.text
.globl _Z11s_max_burstPfii
.p2align 4, 0x90
.type _Z11s_max_burstPfii,@function
_Z11s_max_burstPfii: # @_Z11s_max_burstPfii
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
movslq %esi, %rcx
incl %esi
movslq %edx, %rax
movl %esi, %r8d
subl %eax, %r8d
subq %rax, %rcx
movl %eax, %r9d
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
xorl %r10d, %r10d
# implicit-def: $edx
# implicit-def: $eax
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_7: # %._crit_edge
# in Loop: Header=BB2_1 Depth=1
incq %r10
decl %esi
addq $4, %rdi
cmpq %r8, %r10
je .LBB2_8
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_3 Depth 2
# Child Loop BB2_5 Depth 3
cmpq %rcx, %r10
jg .LBB2_7
# %bb.2: # %.lr.ph.preheader
# in Loop: Header=BB2_1 Depth=1
movq %r9, %r11
movaps %xmm0, %xmm1
jmp .LBB2_3
.p2align 4, 0x90
.LBB2_6: # %_Z4meanPfii.exit
# in Loop: Header=BB2_3 Depth=2
xorps %xmm2, %xmm2
cvtsi2ss %r11d, %xmm2
divss %xmm2, %xmm0
ucomiss %xmm1, %xmm0
cmoval %r10d, %eax
leal -1(%r11,%r10), %ebx
cmoval %ebx, %edx
maxss %xmm1, %xmm0
incq %r11
movaps %xmm0, %xmm1
cmpl %esi, %r11d
je .LBB2_7
.LBB2_3: # %.lr.ph
# Parent Loop BB2_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_5 Depth 3
xorps %xmm0, %xmm0
testl %r11d, %r11d
jle .LBB2_6
# %bb.4: # %.lr.ph.i.preheader
# in Loop: Header=BB2_3 Depth=2
xorl %ebx, %ebx
.p2align 4, 0x90
.LBB2_5: # %.lr.ph.i
# Parent Loop BB2_1 Depth=1
# Parent Loop BB2_3 Depth=2
# => This Inner Loop Header: Depth=3
addss (%rdi,%rbx,4), %xmm0
incq %rbx
cmpl %ebx, %r11d
jne .LBB2_5
jmp .LBB2_6
.LBB2_8: # %._crit_edge43
cvtss2sd %xmm0, %xmm0
movl $.L.str.1, %edi
movl %eax, %esi
movb $1, %al
popq %rbx
.cfi_def_cfa_offset 8
jmp printf # TAILCALL
.Lfunc_end2:
.size _Z11s_max_burstPfii, .Lfunc_end2-_Z11s_max_burstPfii
.cfi_endproc
# -- End function
.globl _Z29__device_stub__max_each_blockPfPiS0_S_ii # -- Begin function _Z29__device_stub__max_each_blockPfPiS0_S_ii
.p2align 4, 0x90
.type _Z29__device_stub__max_each_blockPfPiS0_S_ii,@function
_Z29__device_stub__max_each_blockPfPiS0_S_ii: # @_Z29__device_stub__max_each_blockPfPiS0_S_ii
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
movl %r9d, 8(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 8(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14max_each_blockPfPiS0_S_ii, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end3:
.size _Z29__device_stub__max_each_blockPfPiS0_S_ii, .Lfunc_end3-_Z29__device_stub__max_each_blockPfPiS0_S_ii
.cfi_endproc
# -- End function
.globl _Z20find_max_from_blocksPfPiS0_iS0_S_ # -- Begin function _Z20find_max_from_blocksPfPiS0_iS0_S_
.p2align 4, 0x90
.type _Z20find_max_from_blocksPfPiS0_iS0_S_,@function
_Z20find_max_from_blocksPfPiS0_iS0_S_: # @_Z20find_max_from_blocksPfPiS0_iS0_S_
.cfi_startproc
# %bb.0:
movl $0, (%r9)
testl %ecx, %ecx
jle .LBB4_5
# %bb.1: # %.lr.ph
movss (%r9), %xmm0 # xmm0 = mem[0],zero,zero,zero
movl %ecx, %eax
xorl %ecx, %ecx
jmp .LBB4_2
.p2align 4, 0x90
.LBB4_4: # in Loop: Header=BB4_2 Depth=1
incq %rcx
cmpq %rcx, %rax
je .LBB4_5
.LBB4_2: # =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
ucomiss %xmm0, %xmm1
jbe .LBB4_4
# %bb.3: # in Loop: Header=BB4_2 Depth=1
movss %xmm1, (%r9)
movl (%rsi,%rcx,4), %r10d
movl %r10d, (%r8)
movl (%rdx,%rcx,4), %r10d
movl %r10d, 4(%r8)
movaps %xmm1, %xmm0
jmp .LBB4_4
.LBB4_5: # %._crit_edge
retq
.Lfunc_end4:
.size _Z20find_max_from_blocksPfPiS0_iS0_S_, .Lfunc_end4-_Z20find_max_from_blocksPfPiS0_iS0_S_
.cfi_endproc
# -- End function
.globl _Z4meanPfii # -- Begin function _Z4meanPfii
.p2align 4, 0x90
.type _Z4meanPfii,@function
_Z4meanPfii: # @_Z4meanPfii
.cfi_startproc
# %bb.0:
# kill: def $edx killed $edx def $rdx
xorps %xmm0, %xmm0
subl %esi, %edx
jl .LBB5_3
# %bb.1: # %.lr.ph.preheader
movslq %esi, %rax
leaq (%rdi,%rax,4), %rax
leal 1(%rdx), %ecx
xorl %esi, %esi
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
addss (%rax,%rsi,4), %xmm0
incq %rsi
cmpl %esi, %ecx
jne .LBB5_2
.LBB5_3: # %._crit_edge
incl %edx
cvtsi2ss %edx, %xmm1
divss %xmm1, %xmm0
retq
.Lfunc_end5:
.size _Z4meanPfii, .Lfunc_end5-_Z4meanPfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB6_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB6_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14max_each_blockPfPiS0_S_ii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end6:
.size __hip_module_ctor, .Lfunc_end6-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB7_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB7_2:
retq
.Lfunc_end7:
.size __hip_module_dtor, .Lfunc_end7-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z14max_each_blockPfPiS0_S_ii,@object # @_Z14max_each_blockPfPiS0_S_ii
.section .rodata,"a",@progbits
.globl _Z14max_each_blockPfPiS0_S_ii
.p2align 3, 0x0
_Z14max_each_blockPfPiS0_S_ii:
.quad _Z29__device_stub__max_each_blockPfPiS0_S_ii
.size _Z14max_each_blockPfPiS0_S_ii, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "burst start from %d end at %d
.size .L.str, 47
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "\nburst start from %d end %d, max-mean is %f\n"
.size .L.str.1, 45
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14max_each_blockPfPiS0_S_ii"
.size .L__unnamed_1, 30
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__max_each_blockPfPiS0_S_ii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14max_each_blockPfPiS0_S_ii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <cmath>
#include <device_functions.h>
#define N 1048576
#define THREADS_PER_BLOCK 1024
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} else { \
printf("success\n"); \
} \
} while (0)
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads(); //Just so sync threads doesnt get underlined in red
int atomicAdd(int* address, int val);
#endif
typedef struct Node {
int sum;
int fromLeft;
}Node;
void randomGen(int *input, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
input[i] = rand() % 100;
}
}
__device__ void downPass(Node *nodeArray, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//set root node
if (index%N == 0) {
nodeArray[0].fromLeft = 0;
}
if (index%(N) == 0) {
nodeArray[1].fromLeft = nodeArray[0].fromLeft;
nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum;
}
__syncthreads();
int divisor = N / 2;
for (int i = 1; i < count; i++) {
if (index%divisor == 0) {
//int random = i - 1;
//int leftIndex = power(2, i) + index / divisor + 2 * random + 1;
int leftIndex = 2 * i + 1;
nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child
nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum;
divisor /= 2;
}
__syncthreads();
}
}
//Tree builds!!!!
__device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes
int divisor = 2;
for (int i = 0; i < count; i++) { //Generate all of the parent nodes
if (index % divisor == 0) {
nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum;
divisor *= 2;
}
__syncthreads();
}
}
__global__ void find_repeats(int *a, int *b, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n - 1) {
if (a[index] == a[index + 1]) {
b[index] = 1;
}
else {
b[index] = 0;
}
}
}
__global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) {
//int index = threadIdx.x + blockIdx.x * blockDim.x;
buildTree(input, nodeArray, totalSum, iterations);
__syncthreads();
downPass(nodeArray, iterations);
}
//In order to implement this we need a working version of exclusive scan
__global__ void index_repeats(int *b, int *c, int *B) {
}
int main() {
int *a, *b, *c; //host ABC
int *dev_a, *dev_b, *dev_A;
int *totalSum;
int sum = 0;
//int *dev_c, *dev_B;
int size = N * sizeof(int);
Node *nodeArray, *dev_nodeArray;
int nodeArraySize = (2 * N - 1) * sizeof(Node);
//cudamalloc a, b, and c on device memory
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_A, size);
cudaMalloc((void**)&totalSum, sizeof(int));
cudaMalloc((void**)&dev_nodeArray, nodeArraySize);
//cudaMalloc((void**)&dev_c, size);
//cudaMalloc((void**)&dev_B, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
nodeArray = (Node *)malloc(nodeArraySize);
//Make random array
randomGen(a, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
//This works properly
find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N);
//Do exclusive scan on dev_b, store in dev_c
//exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c);
int iterations = log(N) / log(2); //Cannot be called from the device
exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations);
//Create B and C with some cuda operations on dev_c
//need to implement exclusive scan on dev_b in order to do index repeats
//index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
//cudaCheckErrors("failed");
//cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(&sum, totalSum, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(nodeArray, dev_nodeArray, nodeArraySize, cudaMemcpyDeviceToHost);
//remove repeats cannot be done in parallel
int j = 0;
for (int i = 0; i < N; i++) {
if (b[i] == 0) {
c[j] = a[i];
j++;
}
}
printf("Last element of find_repeats: %d\n", c[N-j]); //replace
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_A);
cudaFree(totalSum);
cudaFree(dev_nodeArray);
//cudaFree(dev_c);
free(a); free(b); free(c); free(nodeArray);
return 0;
} | .file "tmpxft_0003ec1d_00000000-6_cuda_prefix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9randomGenPii
.type _Z9randomGenPii, @function
_Z9randomGenPii:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movl %esi, %ebp
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebp, %ebp
jle .L3
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
.L3:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z9randomGenPii, .-_Z9randomGenPii
.globl _Z8downPassP4Nodei
.type _Z8downPassP4Nodei, @function
_Z8downPassP4Nodei:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z8downPassP4Nodei, .-_Z8downPassP4Nodei
.globl _Z9buildTreePiP4NodeS_i
.type _Z9buildTreePiP4NodeS_i, @function
_Z9buildTreePiP4NodeS_i:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z9buildTreePiP4NodeS_i, .-_Z9buildTreePiP4NodeS_i
.globl _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
.type _Z35__device_stub__Z12find_repeatsPiS_iPiS_i, @function
_Z35__device_stub__Z12find_repeatsPiS_iPiS_i:
.LFB2086:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12find_repeatsPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z35__device_stub__Z12find_repeatsPiS_iPiS_i, .-_Z35__device_stub__Z12find_repeatsPiS_iPiS_i
.globl _Z12find_repeatsPiS_i
.type _Z12find_repeatsPiS_i, @function
_Z12find_repeatsPiS_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z12find_repeatsPiS_i, .-_Z12find_repeatsPiS_i
.globl _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
.type _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i, @function
_Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i:
.LFB2088:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14exclusive_scanPiS_P4NodeS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i, .-_Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
.globl _Z14exclusive_scanPiS_P4NodeS_i
.type _Z14exclusive_scanPiS_P4NodeS_i, @function
_Z14exclusive_scanPiS_P4NodeS_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14exclusive_scanPiS_P4NodeS_i, .-_Z14exclusive_scanPiS_P4NodeS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Last element of find_repeats: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $16777208, %esi
call cudaMalloc@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbx
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $16777208, %edi
call malloc@PLT
movq %rax, %r13
movl $1048576, %esi
movq %rbp, %rdi
call _Z9randomGenPii
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L29:
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L30:
movl $2, %ecx
movl $4194304, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 4(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
movl $2, %ecx
movl $16777208, %edx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $0, %eax
movl $0, %edx
jmp .L32
.L36:
movl $1048576, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
jmp .L29
.L37:
movl $20, %r8d
movq 32(%rsp), %rcx
movq 40(%rsp), %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
jmp .L30
.L31:
addq $4, %rax
cmpq $4194304, %rax
je .L38
.L32:
cmpl $0, (%rbx,%rax)
jne .L31
movslq %edx, %rcx
movl 0(%rbp,%rax), %esi
movl %esi, (%r12,%rcx,4)
addl $1, %edx
jmp .L31
.L38:
movl $1048576, %eax
subl %edx, %eax
cltq
movl (%r12,%rax,4), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.globl _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
.type _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_, @function
_Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_:
.LFB2090:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L44
.L40:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13index_repeatsPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L40
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_, .-_Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
.globl _Z13index_repeatsPiS_S_
.type _Z13index_repeatsPiS_S_, @function
_Z13index_repeatsPiS_S_:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z13index_repeatsPiS_S_, .-_Z13index_repeatsPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z13index_repeatsPiS_S_"
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z14exclusive_scanPiS_P4NodeS_i"
.section .rodata.str1.1
.LC3:
.string "_Z12find_repeatsPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13index_repeatsPiS_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z14exclusive_scanPiS_P4NodeS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z12find_repeatsPiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <cmath>
#include <device_functions.h>
#define N 1048576
#define THREADS_PER_BLOCK 1024
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} else { \
printf("success\n"); \
} \
} while (0)
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads(); //Just so sync threads doesnt get underlined in red
int atomicAdd(int* address, int val);
#endif
typedef struct Node {
int sum;
int fromLeft;
}Node;
void randomGen(int *input, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
input[i] = rand() % 100;
}
}
__device__ void downPass(Node *nodeArray, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//set root node
if (index%N == 0) {
nodeArray[0].fromLeft = 0;
}
if (index%(N) == 0) {
nodeArray[1].fromLeft = nodeArray[0].fromLeft;
nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum;
}
__syncthreads();
int divisor = N / 2;
for (int i = 1; i < count; i++) {
if (index%divisor == 0) {
//int random = i - 1;
//int leftIndex = power(2, i) + index / divisor + 2 * random + 1;
int leftIndex = 2 * i + 1;
nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child
nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum;
divisor /= 2;
}
__syncthreads();
}
}
//Tree builds!!!!
__device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes
int divisor = 2;
for (int i = 0; i < count; i++) { //Generate all of the parent nodes
if (index % divisor == 0) {
nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum;
divisor *= 2;
}
__syncthreads();
}
}
__global__ void find_repeats(int *a, int *b, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n - 1) {
if (a[index] == a[index + 1]) {
b[index] = 1;
}
else {
b[index] = 0;
}
}
}
__global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) {
//int index = threadIdx.x + blockIdx.x * blockDim.x;
buildTree(input, nodeArray, totalSum, iterations);
__syncthreads();
downPass(nodeArray, iterations);
}
//In order to implement this we need a working version of exclusive scan
__global__ void index_repeats(int *b, int *c, int *B) {
}
int main() {
int *a, *b, *c; //host ABC
int *dev_a, *dev_b, *dev_A;
int *totalSum;
int sum = 0;
//int *dev_c, *dev_B;
int size = N * sizeof(int);
Node *nodeArray, *dev_nodeArray;
int nodeArraySize = (2 * N - 1) * sizeof(Node);
//cudamalloc a, b, and c on device memory
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_A, size);
cudaMalloc((void**)&totalSum, sizeof(int));
cudaMalloc((void**)&dev_nodeArray, nodeArraySize);
//cudaMalloc((void**)&dev_c, size);
//cudaMalloc((void**)&dev_B, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
nodeArray = (Node *)malloc(nodeArraySize);
//Make random array
randomGen(a, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
//This works properly
find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N);
//Do exclusive scan on dev_b, store in dev_c
//exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c);
int iterations = log(N) / log(2); //Cannot be called from the device
exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations);
//Create B and C with some cuda operations on dev_c
//need to implement exclusive scan on dev_b in order to do index repeats
//index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B);
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
//cudaCheckErrors("failed");
//cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(&sum, totalSum, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(nodeArray, dev_nodeArray, nodeArraySize, cudaMemcpyDeviceToHost);
//remove repeats cannot be done in parallel
int j = 0;
for (int i = 0; i < N; i++) {
if (b[i] == 0) {
c[j] = a[i];
j++;
}
}
printf("Last element of find_repeats: %d\n", c[N-j]); //replace
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_A);
cudaFree(totalSum);
cudaFree(dev_nodeArray);
//cudaFree(dev_c);
free(a); free(b); free(c); free(nodeArray);
return 0;
} | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <cmath>
#include <hip/device_functions.h>
#define N 1048576
#define THREADS_PER_BLOCK 1024
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} else { \
printf("success\n"); \
} \
} while (0)
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads(); //Just so sync threads doesnt get underlined in red
int atomicAdd(int* address, int val);
#endif
typedef struct Node {
int sum;
int fromLeft;
}Node;
void randomGen(int *input, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
input[i] = rand() % 100;
}
}
__device__ void downPass(Node *nodeArray, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//set root node
if (index%N == 0) {
nodeArray[0].fromLeft = 0;
}
if (index%(N) == 0) {
nodeArray[1].fromLeft = nodeArray[0].fromLeft;
nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum;
}
__syncthreads();
int divisor = N / 2;
for (int i = 1; i < count; i++) {
if (index%divisor == 0) {
//int random = i - 1;
//int leftIndex = power(2, i) + index / divisor + 2 * random + 1;
int leftIndex = 2 * i + 1;
nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child
nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum;
divisor /= 2;
}
__syncthreads();
}
}
//Tree builds!!!!
__device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes
int divisor = 2;
for (int i = 0; i < count; i++) { //Generate all of the parent nodes
if (index % divisor == 0) {
nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum;
divisor *= 2;
}
__syncthreads();
}
}
__global__ void find_repeats(int *a, int *b, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n - 1) {
if (a[index] == a[index + 1]) {
b[index] = 1;
}
else {
b[index] = 0;
}
}
}
__global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) {
//int index = threadIdx.x + blockIdx.x * blockDim.x;
buildTree(input, nodeArray, totalSum, iterations);
__syncthreads();
downPass(nodeArray, iterations);
}
//In order to implement this we need a working version of exclusive scan
__global__ void index_repeats(int *b, int *c, int *B) {
}
int main() {
int *a, *b, *c; //host ABC
int *dev_a, *dev_b, *dev_A;
int *totalSum;
int sum = 0;
//int *dev_c, *dev_B;
int size = N * sizeof(int);
Node *nodeArray, *dev_nodeArray;
int nodeArraySize = (2 * N - 1) * sizeof(Node);
//cudamalloc a, b, and c on device memory
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_A, size);
hipMalloc((void**)&totalSum, sizeof(int));
hipMalloc((void**)&dev_nodeArray, nodeArraySize);
//cudaMalloc((void**)&dev_c, size);
//cudaMalloc((void**)&dev_B, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
nodeArray = (Node *)malloc(nodeArraySize);
//Make random array
randomGen(a, N);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
//This works properly
find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N);
//Do exclusive scan on dev_b, store in dev_c
//exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c);
int iterations = log(N) / log(2); //Cannot be called from the device
exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations);
//Create B and C with some cuda operations on dev_c
//need to implement exclusive scan on dev_b in order to do index repeats
//index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B);
hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost);
//cudaCheckErrors("failed");
//cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost);
hipMemcpy(&sum, totalSum, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(nodeArray, dev_nodeArray, nodeArraySize, hipMemcpyDeviceToHost);
//remove repeats cannot be done in parallel
int j = 0;
for (int i = 0; i < N; i++) {
if (b[i] == 0) {
c[j] = a[i];
j++;
}
}
printf("Last element of find_repeats: %d\n", c[N-j]); //replace
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_A);
hipFree(totalSum);
hipFree(dev_nodeArray);
//cudaFree(dev_c);
free(a); free(b); free(c); free(nodeArray);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <cmath>
#include <hip/device_functions.h>
#define N 1048576
#define THREADS_PER_BLOCK 1024
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} else { \
printf("success\n"); \
} \
} while (0)
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads(); //Just so sync threads doesnt get underlined in red
int atomicAdd(int* address, int val);
#endif
typedef struct Node {
int sum;
int fromLeft;
}Node;
void randomGen(int *input, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
input[i] = rand() % 100;
}
}
__device__ void downPass(Node *nodeArray, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//set root node
if (index%N == 0) {
nodeArray[0].fromLeft = 0;
}
if (index%(N) == 0) {
nodeArray[1].fromLeft = nodeArray[0].fromLeft;
nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum;
}
__syncthreads();
int divisor = N / 2;
for (int i = 1; i < count; i++) {
if (index%divisor == 0) {
//int random = i - 1;
//int leftIndex = power(2, i) + index / divisor + 2 * random + 1;
int leftIndex = 2 * i + 1;
nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child
nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum;
divisor /= 2;
}
__syncthreads();
}
}
//Tree builds!!!!
__device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes
int divisor = 2;
for (int i = 0; i < count; i++) { //Generate all of the parent nodes
if (index % divisor == 0) {
nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum;
divisor *= 2;
}
__syncthreads();
}
}
__global__ void find_repeats(int *a, int *b, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n - 1) {
if (a[index] == a[index + 1]) {
b[index] = 1;
}
else {
b[index] = 0;
}
}
}
__global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) {
//int index = threadIdx.x + blockIdx.x * blockDim.x;
buildTree(input, nodeArray, totalSum, iterations);
__syncthreads();
downPass(nodeArray, iterations);
}
//In order to implement this we need a working version of exclusive scan
__global__ void index_repeats(int *b, int *c, int *B) {
}
int main() {
int *a, *b, *c; //host ABC
int *dev_a, *dev_b, *dev_A;
int *totalSum;
int sum = 0;
//int *dev_c, *dev_B;
int size = N * sizeof(int);
Node *nodeArray, *dev_nodeArray;
int nodeArraySize = (2 * N - 1) * sizeof(Node);
//cudamalloc a, b, and c on device memory
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_A, size);
hipMalloc((void**)&totalSum, sizeof(int));
hipMalloc((void**)&dev_nodeArray, nodeArraySize);
//cudaMalloc((void**)&dev_c, size);
//cudaMalloc((void**)&dev_B, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
nodeArray = (Node *)malloc(nodeArraySize);
//Make random array
randomGen(a, N);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
//This works properly
find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N);
//Do exclusive scan on dev_b, store in dev_c
//exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c);
int iterations = log(N) / log(2); //Cannot be called from the device
exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations);
//Create B and C with some cuda operations on dev_c
//need to implement exclusive scan on dev_b in order to do index repeats
//index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B);
hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost);
//cudaCheckErrors("failed");
//cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost);
hipMemcpy(&sum, totalSum, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(nodeArray, dev_nodeArray, nodeArraySize, hipMemcpyDeviceToHost);
//remove repeats cannot be done in parallel
int j = 0;
for (int i = 0; i < N; i++) {
if (b[i] == 0) {
c[j] = a[i];
j++;
}
}
printf("Last element of find_repeats: %d\n", c[N-j]); //replace
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_A);
hipFree(totalSum);
hipFree(dev_nodeArray);
//cudaFree(dev_c);
free(a); free(b); free(c); free(nodeArray);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z12find_repeatsPiS_i
.globl _Z12find_repeatsPiS_i
.p2align 8
.type _Z12find_repeatsPiS_i,@function
_Z12find_repeatsPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_add_i32 s3, s3, -1
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
global_load_b64 v[2:3], v[2:3], off
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v2, v3
v_cndmask_b32_e64 v2, 0, 1, vcc_lo
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12find_repeatsPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z12find_repeatsPiS_i, .Lfunc_end0-_Z12find_repeatsPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z14exclusive_scanPiS_P4NodeS_i
.globl _Z14exclusive_scanPiS_P4NodeS_i
.p2align 8
.type _Z14exclusive_scanPiS_P4NodeS_i,@function
_Z14exclusive_scanPiS_P4NodeS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x34
s_load_b64 s[4:5], s[0:1], 0x0
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_clause 0x1
s_load_b64 s[2:3], s[0:1], 0x10
s_load_b32 s6, s[0:1], 0x20
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[2:3], 2, v[1:2]
v_add_co_u32 v2, vcc_lo, s4, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e32 v3, vcc_lo, s5, v3, vcc_lo
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s6, 1
global_load_b32 v0, v[2:3], off
v_add_nc_u32_e32 v2, 0xfffff, v1
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 3, v[2:3]
v_add_co_u32 v3, vcc_lo, s2, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s3, v4, vcc_lo
s_waitcnt vmcnt(0)
global_store_b32 v[3:4], v0, off
s_cbranch_scc1 .LBB1_5
v_add_nc_u32_e32 v5, 0x100000, v1
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v0, 31, v1
s_mov_b32 s1, s6
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_dual_mov_b32 v2, 2 :: v_dual_add_nc_u32 v7, v2, v3
v_ashrrev_i32_e32 v4, 31, v5
v_add_nc_u32_e32 v6, v1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v8, v5, v4
v_xor_b32_e32 v5, v6, v0
v_xor_b32_e32 v6, v7, v3
s_delay_alu instid0(VALU_DEP_3)
v_xor_b32_e32 v7, v8, v4
s_branch .LBB1_3
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s4
s_add_i32 s1, s1, -1
s_waitcnt_vscnt null, 0x0
s_cmp_lg_u32 s1, 0
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_5
.LBB1_3:
v_ashrrev_i32_e32 v8, 31, v2
s_mov_b32 s4, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v9, v2, v8
v_xor_b32_e32 v9, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v10, v9
v_sub_nc_u32_e32 v11, 0, v9
v_rcp_iflag_f32_e32 v10, v10
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v10, 0x4f7ffffe, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v10, v10
v_mul_lo_u32 v11, v11, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v11, v10, v11
v_add_nc_u32_e32 v10, v10, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v11, v5, v10
v_mul_lo_u32 v11, v11, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v11, v5, v11
v_sub_nc_u32_e32 v12, v11, v9
v_cmp_ge_u32_e32 vcc_lo, v11, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v11, v11, v12, vcc_lo
v_sub_nc_u32_e32 v12, v11, v9
v_cmp_ge_u32_e32 vcc_lo, v11, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v11, v11, v12, vcc_lo
v_xor_b32_e32 v11, v11, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v11, v11, v0
v_cmpx_eq_u32_e32 0, v11
s_cbranch_execz .LBB1_2
v_lshrrev_b32_e32 v11, 31, v2
v_mul_hi_u32 v10, v6, v10
v_xor_b32_e32 v8, v3, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v11, v2, v11
v_lshlrev_b32_e32 v2, 1, v2
v_ashrrev_i32_e32 v12, 1, v11
v_ashrrev_i32_e32 v11, 31, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v12, v12, v11
v_xor_b32_e32 v12, v12, v11
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v13, v12
v_sub_nc_u32_e32 v14, 0, v12
v_rcp_iflag_f32_e32 v13, v13
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v13, 0x4f7ffffe, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v13, v13
v_mul_lo_u32 v14, v14, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v14, v13, v14
v_add_nc_u32_e32 v13, v13, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v14, v6, v13
v_mul_lo_u32 v15, v14, v12
v_add_nc_u32_e32 v17, 1, v14
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v15, v6, v15
v_cmp_ge_u32_e32 vcc_lo, v15, v12
v_sub_nc_u32_e32 v19, v15, v12
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_cndmask_b32_e32 v14, v14, v17, vcc_lo
v_mul_hi_u32 v13, v7, v13
v_cndmask_b32_e32 v15, v15, v19, vcc_lo
v_xor_b32_e32 v19, v3, v11
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_2)
v_cmp_ge_u32_e32 vcc_lo, v15, v12
v_xor_b32_e32 v15, v4, v11
v_mul_lo_u32 v16, v13, v12
v_add_nc_u32_e32 v18, 1, v13
v_sub_nc_u32_e32 v16, v7, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cmp_ge_u32_e64 s0, v16, v12
v_sub_nc_u32_e32 v17, v16, v12
v_cndmask_b32_e64 v13, v13, v18, s0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cndmask_b32_e64 v16, v16, v17, s0
v_add_nc_u32_e32 v17, 1, v13
v_add_nc_u32_e32 v18, 1, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_cndmask_b32_e32 v14, v14, v18, vcc_lo
v_cmp_ge_u32_e32 vcc_lo, v16, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_xor_b32_e32 v12, v14, v19
v_cndmask_b32_e32 v11, v13, v17, vcc_lo
v_xor_b32_e32 v13, v11, v15
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_sub_nc_u32_e32 v11, v12, v19
v_sub_nc_u32_e32 v13, v13, v15
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v12, 31, v11
v_ashrrev_i32_e32 v14, 31, v13
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[11:12], 3, v[11:12]
v_lshlrev_b64 v[13:14], 3, v[13:14]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v11, vcc_lo, s2, v11
v_add_co_ci_u32_e32 v12, vcc_lo, s3, v12, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v13, vcc_lo, s2, v13
v_add_co_ci_u32_e32 v14, vcc_lo, s3, v14, vcc_lo
s_clause 0x1
global_load_b32 v11, v[11:12], off
global_load_b32 v12, v[13:14], off
v_mul_lo_u32 v13, v10, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v13, v6, v13
v_sub_nc_u32_e32 v15, v13, v9
v_cmp_ge_u32_e32 vcc_lo, v13, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v13, v13, v15 :: v_dual_add_nc_u32 v14, 1, v10
v_cndmask_b32_e32 v10, v10, v14, vcc_lo
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_ge_u32_e32 vcc_lo, v13, v9
v_add_nc_u32_e32 v14, 1, v10
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_dual_cndmask_b32 v9, v10, v14 :: v_dual_add_nc_u32 v10, v12, v11
v_xor_b32_e32 v9, v9, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v8, v9, v8
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[8:9], 3, v[8:9]
v_add_co_u32 v8, vcc_lo, s2, v8
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v9, vcc_lo, s3, v9, vcc_lo
global_store_b32 v[8:9], v10, off
s_branch .LBB1_2
.LBB1_5:
v_and_b32_e32 v0, 0xfffff, v1
s_mov_b32 s0, exec_lo
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_7
v_mov_b32_e32 v0, 0
global_load_b32 v2, v0, s[2:3] offset:8
s_clause 0x1
global_store_b32 v0, v0, s[2:3] offset:4
global_store_b32 v0, v0, s[2:3] offset:12
s_waitcnt vmcnt(0)
global_store_b32 v0, v2, s[2:3] offset:20
.LBB1_7:
s_or_b32 exec_lo, exec_lo, s0
s_cmp_lt_i32 s6, 2
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB1_12
v_ashrrev_i32_e32 v0, 31, v1
s_add_u32 s0, s2, 12
v_mov_b32_e32 v3, 0x80000
s_addc_u32 s1, s3, 0
s_add_u32 s4, s2, 28
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v1, v1, v0
s_addc_u32 s5, s3, 0
s_add_i32 s8, s6, -1
s_mov_b32 s6, 4
s_delay_alu instid0(VALU_DEP_1)
v_xor_b32_e32 v1, v1, v0
s_branch .LBB1_10
.LBB1_9:
s_or_b32 exec_lo, exec_lo, s9
s_add_u32 s0, s0, 8
s_addc_u32 s1, s1, 0
s_add_i32 s6, s6, 2
s_add_u32 s4, s4, 16
s_addc_u32 s5, s5, 0
s_add_i32 s8, s8, -1
s_waitcnt_vscnt null, 0x0
s_cmp_lg_u32 s8, 0
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB1_12
.LBB1_10:
v_ashrrev_i32_e32 v4, 31, v3
s_mov_b32 s9, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v5, v3, v4
v_xor_b32_e32 v4, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_f32_u32_e32 v5, v4
v_sub_nc_u32_e32 v6, 0, v4
v_rcp_iflag_f32_e32 v5, v5
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v5, 0x4f7ffffe, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cvt_u32_f32_e32 v5, v5
v_mul_lo_u32 v6, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v6, v5, v6
v_add_nc_u32_e32 v5, v5, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_u32 v5, v1, v5
v_mul_lo_u32 v5, v5, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v5, v1, v5
v_sub_nc_u32_e32 v6, v5, v4
v_cmp_ge_u32_e32 vcc_lo, v5, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v5, v5, v6, vcc_lo
v_sub_nc_u32_e32 v6, v5, v4
v_cmp_ge_u32_e32 vcc_lo, v5, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v4, v5, v6, vcc_lo
v_xor_b32_e32 v4, v4, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v4, v4, v0
v_cmpx_eq_u32_e32 0, v4
s_cbranch_execz .LBB1_9
s_clause 0x1
global_load_b32 v4, v2, s[0:1]
global_load_b32 v5, v2, s[4:5] offset:-4
v_lshrrev_b32_e32 v6, 31, v3
s_ashr_i32 s7, s6, 31
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_lshl_b64 s[10:11], s[6:7], 3
v_add_nc_u32_e32 v3, v3, v6
s_add_u32 s10, s2, s10
s_addc_u32 s11, s3, s11
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 1, v3
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v5, v5, v4
s_clause 0x1
global_store_b32 v2, v4, s[4:5]
global_store_b32 v2, v5, s[10:11] offset:4
s_branch .LBB1_9
.LBB1_12:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14exclusive_scanPiS_P4NodeS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 20
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z14exclusive_scanPiS_P4NodeS_i, .Lfunc_end1-_Z14exclusive_scanPiS_P4NodeS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z13index_repeatsPiS_S_
.globl _Z13index_repeatsPiS_S_
.p2align 8
.type _Z13index_repeatsPiS_S_,@function
_Z13index_repeatsPiS_S_:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z13index_repeatsPiS_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 24
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 1
.amdhsa_next_free_sgpr 1
.amdhsa_reserve_vcc 0
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z13index_repeatsPiS_S_, .Lfunc_end2-_Z13index_repeatsPiS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12find_repeatsPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12find_repeatsPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14exclusive_scanPiS_P4NodeS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14exclusive_scanPiS_P4NodeS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 20
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 24
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z13index_repeatsPiS_S_
.private_segment_fixed_size: 0
.sgpr_count: 0
.sgpr_spill_count: 0
.symbol: _Z13index_repeatsPiS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 0
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <time.h>
#include <cmath>
#include <hip/device_functions.h>
#define N 1048576
#define THREADS_PER_BLOCK 1024
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} else { \
printf("success\n"); \
} \
} while (0)
#pragma once
#ifdef __INTELLISENSE__
void __syncthreads(); //Just so sync threads doesnt get underlined in red
int atomicAdd(int* address, int val);
#endif
typedef struct Node {
int sum;
int fromLeft;
}Node;
void randomGen(int *input, int size) {
srand(time(NULL));
for (int i = 0; i < size; i++) {
input[i] = rand() % 100;
}
}
__device__ void downPass(Node *nodeArray, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//set root node
if (index%N == 0) {
nodeArray[0].fromLeft = 0;
}
if (index%(N) == 0) {
nodeArray[1].fromLeft = nodeArray[0].fromLeft;
nodeArray[2].fromLeft = nodeArray[0].fromLeft + nodeArray[1].sum;
}
__syncthreads();
int divisor = N / 2;
for (int i = 1; i < count; i++) {
if (index%divisor == 0) {
//int random = i - 1;
//int leftIndex = power(2, i) + index / divisor + 2 * random + 1;
int leftIndex = 2 * i + 1;
nodeArray[leftIndex].fromLeft = nodeArray[leftIndex / 2].fromLeft;//left child
nodeArray[leftIndex + 1].fromLeft = nodeArray[leftIndex / 2].fromLeft + nodeArray[leftIndex].sum;
divisor /= 2;
}
__syncthreads();
}
}
//Tree builds!!!!
__device__ void buildTree(int *input, Node *nodeArray, int *sum, int count) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
nodeArray[index + (N-1)].sum = input[index]; //Save all the leaf nodes
int divisor = 2;
for (int i = 0; i < count; i++) { //Generate all of the parent nodes
if (index % divisor == 0) {
nodeArray[(index+(N-1))/divisor].sum = nodeArray[(index+(N-1))/(divisor/2)].sum + nodeArray[(index + N)/(divisor/2)].sum;
divisor *= 2;
}
__syncthreads();
}
}
__global__ void find_repeats(int *a, int *b, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n - 1) {
if (a[index] == a[index + 1]) {
b[index] = 1;
}
else {
b[index] = 0;
}
}
}
__global__ void exclusive_scan(int *input, int *output, Node *nodeArray, int *totalSum, int iterations) {
//int index = threadIdx.x + blockIdx.x * blockDim.x;
buildTree(input, nodeArray, totalSum, iterations);
__syncthreads();
downPass(nodeArray, iterations);
}
//In order to implement this we need a working version of exclusive scan
__global__ void index_repeats(int *b, int *c, int *B) {
}
int main() {
int *a, *b, *c; //host ABC
int *dev_a, *dev_b, *dev_A;
int *totalSum;
int sum = 0;
//int *dev_c, *dev_B;
int size = N * sizeof(int);
Node *nodeArray, *dev_nodeArray;
int nodeArraySize = (2 * N - 1) * sizeof(Node);
//cudamalloc a, b, and c on device memory
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_A, size);
hipMalloc((void**)&totalSum, sizeof(int));
hipMalloc((void**)&dev_nodeArray, nodeArraySize);
//cudaMalloc((void**)&dev_c, size);
//cudaMalloc((void**)&dev_B, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
nodeArray = (Node *)malloc(nodeArraySize);
//Make random array
randomGen(a, N);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
//This works properly
find_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, N);
//Do exclusive scan on dev_b, store in dev_c
//exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c);
int iterations = log(N) / log(2); //Cannot be called from the device
exclusive_scan <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_A, dev_nodeArray, totalSum, iterations);
//Create B and C with some cuda operations on dev_c
//need to implement exclusive scan on dev_b in order to do index repeats
//index_repeats <<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_b, dev_c, dev_B);
hipMemcpy(b, dev_b, size, hipMemcpyDeviceToHost);
//cudaCheckErrors("failed");
//cudaMemcpy(a, dev_A, size, cudaMemcpyDeviceToHost);
hipMemcpy(&sum, totalSum, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(nodeArray, dev_nodeArray, nodeArraySize, hipMemcpyDeviceToHost);
//remove repeats cannot be done in parallel
int j = 0;
for (int i = 0; i < N; i++) {
if (b[i] == 0) {
c[j] = a[i];
j++;
}
}
printf("Last element of find_repeats: %d\n", c[N-j]); //replace
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_A);
hipFree(totalSum);
hipFree(dev_nodeArray);
//cudaFree(dev_c);
free(a); free(b); free(c); free(nodeArray);
return 0;
} | .text
.file "cuda_prefix.hip"
.globl _Z9randomGenPii # -- Begin function _Z9randomGenPii
.p2align 4, 0x90
.type _Z9randomGenPii,@function
_Z9randomGenPii: # @_Z9randomGenPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
.LBB0_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9randomGenPii, .Lfunc_end0-_Z9randomGenPii
.cfi_endproc
# -- End function
.globl _Z27__device_stub__find_repeatsPiS_i # -- Begin function _Z27__device_stub__find_repeatsPiS_i
.p2align 4, 0x90
.type _Z27__device_stub__find_repeatsPiS_i,@function
_Z27__device_stub__find_repeatsPiS_i: # @_Z27__device_stub__find_repeatsPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12find_repeatsPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z27__device_stub__find_repeatsPiS_i, .Lfunc_end1-_Z27__device_stub__find_repeatsPiS_i
.cfi_endproc
# -- End function
.globl _Z29__device_stub__exclusive_scanPiS_P4NodeS_i # -- Begin function _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.p2align 4, 0x90
.type _Z29__device_stub__exclusive_scanPiS_P4NodeS_i,@function
_Z29__device_stub__exclusive_scanPiS_P4NodeS_i: # @_Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14exclusive_scanPiS_P4NodeS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end2:
.size _Z29__device_stub__exclusive_scanPiS_P4NodeS_i, .Lfunc_end2-_Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__index_repeatsPiS_S_ # -- Begin function _Z28__device_stub__index_repeatsPiS_S_
.p2align 4, 0x90
.type _Z28__device_stub__index_repeatsPiS_S_,@function
_Z28__device_stub__index_repeatsPiS_S_: # @_Z28__device_stub__index_repeatsPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13index_repeatsPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z28__device_stub__index_repeatsPiS_S_, .Lfunc_end3-_Z28__device_stub__index_repeatsPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 112(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 120(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $16777208, %esi # imm = 0xFFFFF8
callq hipMalloc
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r15
movl $16777208, %edi # imm = 0xFFFFF8
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB4_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r13,4)
incq %r13
cmpq $1048576, %r13 # imm = 0x100000
jne .LBB4_1
# %bb.2: # %_Z9randomGenPii.exit
movabsq $4294968320, %r13 # imm = 0x100000400
movq 8(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 8(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1048576, 40(%rsp) # imm = 0x100000
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z12find_repeatsPiS_i, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
xorl %ebp, %ebp
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_6
# %bb.5:
movq 8(%rsp), %rax
movq 120(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
movl $20, 116(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 88(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rax
movq %rax, 152(%rsp)
leaq 116(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 176(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14exclusive_scanPiS_P4NodeS_i, %edi
pushq 176(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_6:
movq 32(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rsi
leaq 112(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movl $16777208, %edx # imm = 0xFFFFF8
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
jmp .LBB4_7
.p2align 4, 0x90
.LBB4_9: # in Loop: Header=BB4_7 Depth=1
incq %rax
cmpq $1048576, %rax # imm = 0x100000
je .LBB4_10
.LBB4_7: # =>This Inner Loop Header: Depth=1
cmpl $0, (%r14,%rax,4)
jne .LBB4_9
# %bb.8: # in Loop: Header=BB4_7 Depth=1
movl (%rbx,%rax,4), %ecx
movslq %ebp, %rbp
movl %ecx, (%r15,%rbp,4)
incl %ebp
jmp .LBB4_9
.LBB4_10:
movl $1048576, %eax # imm = 0x100000
subl %ebp, %eax
cltq
movl (%r15,%rax,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 120(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12find_repeatsPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14exclusive_scanPiS_P4NodeS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13index_repeatsPiS_S_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12find_repeatsPiS_i,@object # @_Z12find_repeatsPiS_i
.section .rodata,"a",@progbits
.globl _Z12find_repeatsPiS_i
.p2align 3, 0x0
_Z12find_repeatsPiS_i:
.quad _Z27__device_stub__find_repeatsPiS_i
.size _Z12find_repeatsPiS_i, 8
.type _Z14exclusive_scanPiS_P4NodeS_i,@object # @_Z14exclusive_scanPiS_P4NodeS_i
.globl _Z14exclusive_scanPiS_P4NodeS_i
.p2align 3, 0x0
_Z14exclusive_scanPiS_P4NodeS_i:
.quad _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.size _Z14exclusive_scanPiS_P4NodeS_i, 8
.type _Z13index_repeatsPiS_S_,@object # @_Z13index_repeatsPiS_S_
.globl _Z13index_repeatsPiS_S_
.p2align 3, 0x0
_Z13index_repeatsPiS_S_:
.quad _Z28__device_stub__index_repeatsPiS_S_
.size _Z13index_repeatsPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Last element of find_repeats: %d\n"
.size .L.str, 34
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12find_repeatsPiS_i"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14exclusive_scanPiS_P4NodeS_i"
.size .L__unnamed_2, 32
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z13index_repeatsPiS_S_"
.size .L__unnamed_3, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__find_repeatsPiS_i
.addrsig_sym _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.addrsig_sym _Z28__device_stub__index_repeatsPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12find_repeatsPiS_i
.addrsig_sym _Z14exclusive_scanPiS_P4NodeS_i
.addrsig_sym _Z13index_repeatsPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0003ec1d_00000000-6_cuda_prefix.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2064:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2064:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9randomGenPii
.type _Z9randomGenPii, @function
_Z9randomGenPii:
.LFB2057:
.cfi_startproc
endbr64
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rdi, %r12
movl %esi, %ebp
movl $0, %edi
call time@PLT
movl %eax, %edi
call srand@PLT
testl %ebp, %ebp
jle .L3
movq %r12, %rbx
movslq %ebp, %rbp
leaq (%r12,%rbp,4), %rbp
.L5:
call rand@PLT
movslq %eax, %rdx
imulq $1374389535, %rdx, %rdx
sarq $37, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $100, %edx, %edx
subl %edx, %eax
movl %eax, (%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L5
.L3:
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2057:
.size _Z9randomGenPii, .-_Z9randomGenPii
.globl _Z8downPassP4Nodei
.type _Z8downPassP4Nodei, @function
_Z8downPassP4Nodei:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z8downPassP4Nodei, .-_Z8downPassP4Nodei
.globl _Z9buildTreePiP4NodeS_i
.type _Z9buildTreePiP4NodeS_i, @function
_Z9buildTreePiP4NodeS_i:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z9buildTreePiP4NodeS_i, .-_Z9buildTreePiP4NodeS_i
.globl _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
.type _Z35__device_stub__Z12find_repeatsPiS_iPiS_i, @function
_Z35__device_stub__Z12find_repeatsPiS_iPiS_i:
.LFB2086:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L16
.L12:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L17
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L16:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12find_repeatsPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L12
.L17:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2086:
.size _Z35__device_stub__Z12find_repeatsPiS_iPiS_i, .-_Z35__device_stub__Z12find_repeatsPiS_iPiS_i
.globl _Z12find_repeatsPiS_i
.type _Z12find_repeatsPiS_i, @function
_Z12find_repeatsPiS_i:
.LFB2087:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2087:
.size _Z12find_repeatsPiS_i, .-_Z12find_repeatsPiS_i
.globl _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
.type _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i, @function
_Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i:
.LFB2088:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movl %r8d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 12(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L24
.L20:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L25
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L24:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z14exclusive_scanPiS_P4NodeS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L20
.L25:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i, .-_Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
.globl _Z14exclusive_scanPiS_P4NodeS_i
.type _Z14exclusive_scanPiS_P4NodeS_i, @function
_Z14exclusive_scanPiS_P4NodeS_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14exclusive_scanPiS_P4NodeS_i, .-_Z14exclusive_scanPiS_P4NodeS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Last element of find_repeats: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2060:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $88, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $0, 4(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
leaq 40(%rsp), %rdi
movl $16777208, %esi
call cudaMalloc@PLT
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbp
movl $4194304, %edi
call malloc@PLT
movq %rax, %rbx
movl $4194304, %edi
call malloc@PLT
movq %rax, %r12
movl $16777208, %edi
call malloc@PLT
movq %rax, %r13
movl $1048576, %esi
movq %rbp, %rdi
call _Z9randomGenPii
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L36
.L29:
movl $1024, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1024, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 60(%rsp), %rdx
movl $1, %ecx
movq 48(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L37
.L30:
movl $2, %ecx
movl $4194304, %edx
movq 16(%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
leaq 4(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
movl $2, %ecx
movl $16777208, %edx
movq 40(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $0, %eax
movl $0, %edx
jmp .L32
.L36:
movl $1048576, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z35__device_stub__Z12find_repeatsPiS_iPiS_i
jmp .L29
.L37:
movl $20, %r8d
movq 32(%rsp), %rcx
movq 40(%rsp), %rdx
movq 24(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z45__device_stub__Z14exclusive_scanPiS_P4NodeS_iPiS_P4NodeS_i
jmp .L30
.L31:
addq $4, %rax
cmpq $4194304, %rax
je .L38
.L32:
cmpl $0, (%rbx,%rax)
jne .L31
movslq %edx, %rcx
movl 0(%rbp,%rax), %esi
movl %esi, (%r12,%rcx,4)
addl $1, %edx
jmp .L31
.L38:
movl $1048576, %eax
subl %edx, %eax
cltq
movl (%r12,%rax,4), %edx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 40(%rsp), %rdi
call cudaFree@PLT
movq %rbp, %rdi
call free@PLT
movq %rbx, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L39
movl $0, %eax
addq $88, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L39:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size main, .-main
.globl _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
.type _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_, @function
_Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_:
.LFB2090:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L44
.L40:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L45
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L44:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z13index_repeatsPiS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L40
.L45:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_, .-_Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
.globl _Z13index_repeatsPiS_S_
.type _Z13index_repeatsPiS_S_, @function
_Z13index_repeatsPiS_S_:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z13index_repeatsPiS_S_PiS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z13index_repeatsPiS_S_, .-_Z13index_repeatsPiS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "_Z13index_repeatsPiS_S_"
.section .rodata.str1.8
.align 8
.LC2:
.string "_Z14exclusive_scanPiS_P4NodeS_i"
.section .rodata.str1.1
.LC3:
.string "_Z12find_repeatsPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z13index_repeatsPiS_S_(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z14exclusive_scanPiS_P4NodeS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z12find_repeatsPiS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_prefix.hip"
.globl _Z9randomGenPii # -- Begin function _Z9randomGenPii
.p2align 4, 0x90
.type _Z9randomGenPii,@function
_Z9randomGenPii: # @_Z9randomGenPii
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl %esi, %ebp
movq %rdi, %rbx
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
testl %ebp, %ebp
jle .LBB0_3
# %bb.1: # %.lr.ph.preheader
movl %ebp, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB0_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB0_2
.LBB0_3: # %._crit_edge
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z9randomGenPii, .Lfunc_end0-_Z9randomGenPii
.cfi_endproc
# -- End function
.globl _Z27__device_stub__find_repeatsPiS_i # -- Begin function _Z27__device_stub__find_repeatsPiS_i
.p2align 4, 0x90
.type _Z27__device_stub__find_repeatsPiS_i,@function
_Z27__device_stub__find_repeatsPiS_i: # @_Z27__device_stub__find_repeatsPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z12find_repeatsPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end1:
.size _Z27__device_stub__find_repeatsPiS_i, .Lfunc_end1-_Z27__device_stub__find_repeatsPiS_i
.cfi_endproc
# -- End function
.globl _Z29__device_stub__exclusive_scanPiS_P4NodeS_i # -- Begin function _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.p2align 4, 0x90
.type _Z29__device_stub__exclusive_scanPiS_P4NodeS_i,@function
_Z29__device_stub__exclusive_scanPiS_P4NodeS_i: # @_Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movl %r8d, 12(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 12(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z14exclusive_scanPiS_P4NodeS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end2:
.size _Z29__device_stub__exclusive_scanPiS_P4NodeS_i, .Lfunc_end2-_Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.cfi_endproc
# -- End function
.globl _Z28__device_stub__index_repeatsPiS_S_ # -- Begin function _Z28__device_stub__index_repeatsPiS_S_
.p2align 4, 0x90
.type _Z28__device_stub__index_repeatsPiS_S_,@function
_Z28__device_stub__index_repeatsPiS_S_: # @_Z28__device_stub__index_repeatsPiS_S_
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z13index_repeatsPiS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end3:
.size _Z28__device_stub__index_repeatsPiS_S_, .Lfunc_end3-_Z28__device_stub__index_repeatsPiS_S_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $184, %rsp
.cfi_def_cfa_offset 240
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movl $0, 112(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 120(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
leaq 16(%rsp), %rdi
movl $16777208, %esi # imm = 0xFFFFF8
callq hipMalloc
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq malloc
movq %rax, %r15
movl $16777208, %edi # imm = 0xFFFFF8
callq malloc
movq %rax, %r12
xorl %r13d, %r13d
xorl %edi, %edi
callq time
movl %eax, %edi
callq srand
.p2align 4, 0x90
.LBB4_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
cltq
imulq $1374389535, %rax, %rcx # imm = 0x51EB851F
movq %rcx, %rdx
shrq $63, %rdx
sarq $37, %rcx
addl %edx, %ecx
imull $100, %ecx, %ecx
subl %ecx, %eax
movl %eax, (%rbx,%r13,4)
incq %r13
cmpq $1048576, %r13 # imm = 0x100000
jne .LBB4_1
# %bb.2: # %_Z9randomGenPii.exit
movabsq $4294968320, %r13 # imm = 0x100000400
movq 8(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_4
# %bb.3:
movq 8(%rsp), %rax
movq 32(%rsp), %rcx
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movl $1048576, 40(%rsp) # imm = 0x100000
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 40(%rsp), %rax
movq %rax, 144(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 88(%rsp), %rdx
leaq 80(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z12find_repeatsPiS_i, %edi
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
pushq 96(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_4:
xorl %ebp, %ebp
movq %r13, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB4_6
# %bb.5:
movq 8(%rsp), %rax
movq 120(%rsp), %rcx
movq 16(%rsp), %rdx
movq 24(%rsp), %rsi
movq %rax, 104(%rsp)
movq %rcx, 96(%rsp)
movq %rdx, 88(%rsp)
movq %rsi, 80(%rsp)
movl $20, 116(%rsp)
leaq 104(%rsp), %rax
movq %rax, 128(%rsp)
leaq 96(%rsp), %rax
movq %rax, 136(%rsp)
leaq 88(%rsp), %rax
movq %rax, 144(%rsp)
leaq 80(%rsp), %rax
movq %rax, 152(%rsp)
leaq 116(%rsp), %rax
movq %rax, 160(%rsp)
leaq 64(%rsp), %rdi
leaq 48(%rsp), %rsi
leaq 40(%rsp), %rdx
leaq 176(%rsp), %rcx
callq __hipPopCallConfiguration
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
movq 48(%rsp), %rcx
movl 56(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z14exclusive_scanPiS_P4NodeS_i, %edi
pushq 176(%rsp)
.cfi_adjust_cfa_offset 8
pushq 48(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB4_6:
movq 32(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 24(%rsp), %rsi
leaq 112(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movl $16777208, %edx # imm = 0xFFFFF8
movq %r12, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %eax, %eax
jmp .LBB4_7
.p2align 4, 0x90
.LBB4_9: # in Loop: Header=BB4_7 Depth=1
incq %rax
cmpq $1048576, %rax # imm = 0x100000
je .LBB4_10
.LBB4_7: # =>This Inner Loop Header: Depth=1
cmpl $0, (%r14,%rax,4)
jne .LBB4_9
# %bb.8: # in Loop: Header=BB4_7 Depth=1
movl (%rbx,%rax,4), %ecx
movslq %ebp, %rbp
movl %ecx, (%r15,%rbp,4)
incl %ebp
jmp .LBB4_9
.LBB4_10:
movl $1048576, %eax # imm = 0x100000
subl %ebp, %eax
cltq
movl (%r15,%rax,4), %esi
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movq 8(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 120(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
xorl %eax, %eax
addq $184, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size main, .Lfunc_end4-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB5_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB5_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z12find_repeatsPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14exclusive_scanPiS_P4NodeS_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z13index_repeatsPiS_S_, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end5:
.size __hip_module_ctor, .Lfunc_end5-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB6_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB6_2:
retq
.Lfunc_end6:
.size __hip_module_dtor, .Lfunc_end6-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z12find_repeatsPiS_i,@object # @_Z12find_repeatsPiS_i
.section .rodata,"a",@progbits
.globl _Z12find_repeatsPiS_i
.p2align 3, 0x0
_Z12find_repeatsPiS_i:
.quad _Z27__device_stub__find_repeatsPiS_i
.size _Z12find_repeatsPiS_i, 8
.type _Z14exclusive_scanPiS_P4NodeS_i,@object # @_Z14exclusive_scanPiS_P4NodeS_i
.globl _Z14exclusive_scanPiS_P4NodeS_i
.p2align 3, 0x0
_Z14exclusive_scanPiS_P4NodeS_i:
.quad _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.size _Z14exclusive_scanPiS_P4NodeS_i, 8
.type _Z13index_repeatsPiS_S_,@object # @_Z13index_repeatsPiS_S_
.globl _Z13index_repeatsPiS_S_
.p2align 3, 0x0
_Z13index_repeatsPiS_S_:
.quad _Z28__device_stub__index_repeatsPiS_S_
.size _Z13index_repeatsPiS_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Last element of find_repeats: %d\n"
.size .L.str, 34
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z12find_repeatsPiS_i"
.size .L__unnamed_1, 22
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z14exclusive_scanPiS_P4NodeS_i"
.size .L__unnamed_2, 32
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "_Z13index_repeatsPiS_S_"
.size .L__unnamed_3, 24
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z27__device_stub__find_repeatsPiS_i
.addrsig_sym _Z29__device_stub__exclusive_scanPiS_P4NodeS_i
.addrsig_sym _Z28__device_stub__index_repeatsPiS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z12find_repeatsPiS_i
.addrsig_sym _Z14exclusive_scanPiS_P4NodeS_i
.addrsig_sym _Z13index_repeatsPiS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// 2. Copy array contents from host to device.
cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice);
printf("Kernel Launched\n");
cudaEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Came back\n");
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} | .file "tmpxft_0016d612_00000000-6_a_sam_backup_latest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9createNILv
.type _Z9createNILv, @function
_Z9createNILv:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z9createNILv, .-_Z9createNILv
.globl _Z10createNodei
.type _Z10createNodei, @function
_Z10createNodei:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z10createNodei, .-_Z10createNodei
.globl _Z10createTreev
.type _Z10createTreev, @function
_Z10createTreev:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z10createTreev, .-_Z10createTreev
.globl _Z8TraverseP10par_rbNodei
.type _Z8TraverseP10par_rbNodei, @function
_Z8TraverseP10par_rbNodei:
.LFB2060:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2060:
.size _Z8TraverseP10par_rbNodei, .-_Z8TraverseP10par_rbNodei
.globl _Z9PlaceNodeP10par_rbNode
.type _Z9PlaceNodeP10par_rbNode, @function
_Z9PlaceNodeP10par_rbNode:
.LFB2061:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2061:
.size _Z9PlaceNodeP10par_rbNode, .-_Z9PlaceNodeP10par_rbNode
.globl _Z16Insert_RebalanceP10par_rbNode
.type _Z16Insert_RebalanceP10par_rbNode, @function
_Z16Insert_RebalanceP10par_rbNode:
.LFB2062:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2062:
.size _Z16Insert_RebalanceP10par_rbNode, .-_Z16Insert_RebalanceP10par_rbNode
.globl _Z15Update_RotationP10par_rbNodeP8caseFlag
.type _Z15Update_RotationP10par_rbNodeP8caseFlag, @function
_Z15Update_RotationP10par_rbNodeP8caseFlag:
.LFB2063:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2063:
.size _Z15Update_RotationP10par_rbNodeP8caseFlag, .-_Z15Update_RotationP10par_rbNodeP8caseFlag
.globl _Z11Left_RotateP10par_rbNode
.type _Z11Left_RotateP10par_rbNode, @function
_Z11Left_RotateP10par_rbNode:
.LFB2064:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2064:
.size _Z11Left_RotateP10par_rbNode, .-_Z11Left_RotateP10par_rbNode
.globl _Z12Right_RotateP10par_rbNode
.type _Z12Right_RotateP10par_rbNode, @function
_Z12Right_RotateP10par_rbNode:
.LFB2065:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2065:
.size _Z12Right_RotateP10par_rbNode, .-_Z12Right_RotateP10par_rbNode
.globl _Z6Inserti
.type _Z6Inserti, @function
_Z6Inserti:
.LFB2066:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2066:
.size _Z6Inserti, .-_Z6Inserti
.globl _Z13printPreorderP10par_rbNode
.type _Z13printPreorderP10par_rbNode, @function
_Z13printPreorderP10par_rbNode:
.LFB2067:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2067:
.size _Z13printPreorderP10par_rbNode, .-_Z13printPreorderP10par_rbNode
.globl _Z12printInorderP10par_rbNode
.type _Z12printInorderP10par_rbNode, @function
_Z12printInorderP10par_rbNode:
.LFB2068:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2068:
.size _Z12printInorderP10par_rbNode, .-_Z12printInorderP10par_rbNode
.globl _Z14printPostorderP10par_rbNode
.type _Z14printPostorderP10par_rbNode, @function
_Z14printPostorderP10par_rbNode:
.LFB2069:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2069:
.size _Z14printPostorderP10par_rbNode, .-_Z14printPostorderP10par_rbNode
.globl _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
.type _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode, @function
_Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode:
.LFB2095:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3RBTP10par_rbNode(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode, .-_Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
.globl _Z3RBTP10par_rbNode
.type _Z3RBTP10par_rbNode, @function
_Z3RBTP10par_rbNode:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z3RBTP10par_rbNode, .-_Z3RBTP10par_rbNode
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Kernel Launched\n"
.LC1:
.string "Came back\n"
.LC2:
.string "Time for the kernel: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
subq $872, %rsp
.cfi_def_cfa_offset 880
movq %fs:40, %rax
movq %rax, 856(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 52(%rsp), %rax
leaq 852(%rsp), %rdx
.L38:
movl $0, 4(%rax)
movl $0, (%rax)
addq $40, %rax
cmpq %rdx, %rax
jne .L38
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $800, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $800, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 36(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 856(%rsp), %rax
subq %fs:40, %rax
jne .L44
movl $0, %eax
addq $872, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
movq (%rsp), %rdi
call _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z3RBTP10par_rbNode"
.LC4:
.string "nodes"
.LC5:
.string "root"
.LC6:
.string "NIL"
.LC7:
.string "rtParent"
.LC8:
.string "rtSibling"
.LC9:
.string "nodeIndex"
.LC10:
.string "tmpIndex"
.LC11:
.string "tmp"
.LC12:
.string "createFlag"
.LC13:
.string "threadsFinished"
.LC14:
.string "passCreate"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z3RBTP10par_rbNode(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5nodes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4root(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3NIL(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8rtParent(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9rtSibling(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9nodeIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8tmpIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $160, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3tmp(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10createFlag(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL15threadsFinished(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10passCreate(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10passCreate
.comm _ZL10passCreate,4,4
.local _ZL15threadsFinished
.comm _ZL15threadsFinished,4,4
.local _ZL10createFlag
.comm _ZL10createFlag,4,4
.local _ZL3tmp
.comm _ZL3tmp,160,32
.local _ZL8tmpIndex
.comm _ZL8tmpIndex,4,4
.local _ZL9nodeIndex
.comm _ZL9nodeIndex,4,4
.local _ZL9rtSibling
.comm _ZL9rtSibling,8,8
.local _ZL8rtParent
.comm _ZL8rtParent,8,8
.local _ZL3NIL
.comm _ZL3NIL,8,8
.local _ZL4root
.comm _ZL4root,8,8
.local _ZL5nodes
.comm _ZL5nodes,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// 2. Copy array contents from host to device.
cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice);
printf("Kernel Launched\n");
cudaEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Came back\n");
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
hipMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// 2. Copy array contents from host to device.
hipMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), hipMemcpyHostToDevice);
printf("Kernel Launched\n");
hipEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
hipMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
printf("Came back\n");
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#define M 20
// RED = 0, BLACK = 1
enum nodeColor {
RED,
BLACK
};
enum result {
Failure,
Success,
FirstInsert
};
enum caseFlag {
NOOP,
DID_CASE1,
DID_CASE3
};
struct par_rbNode {
int key, color;
int flag;
struct par_rbNode *left, *right, *parent;
};
// /*Function prototypes */
__device__ void createNIL();
__device__ struct par_rbNode * createNode(int);
__device__ void createTree();
__device__ struct par_rbNode * Traverse(struct par_rbNode *,int);
__device__ enum result PlaceNode(struct par_rbNode *);
__device__ void Insert_Rebalance(struct par_rbNode *);
__device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *);
__device__ bool Left_Rotate(struct par_rbNode *);
__device__ bool Right_Rotate(struct par_rbNode *);
__device__ void printPreorder(struct par_rbNode* );
__device__ void printInorder(struct par_rbNode* );
__device__ void printPostorder(struct par_rbNode* );
__device__ struct par_rbNode *nodes;
__device__ struct par_rbNode *root;
__device__ struct par_rbNode *NIL;
__device__ struct par_rbNode *rtParent;
__device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used
__device__ int nodeIndex = 0;
__device__ int tmpIndex = 0;
__device__ struct par_rbNode *tmp[M];// need M tmps
__device__ int createFlag = false;
__device__ void createNIL(){
NIL = &nodes[0];
NIL->color = BLACK;
NIL->key = -1;
NIL->left = NIL->right = NIL->parent = NIL;
printf("NIL created\n");
}
__device__ struct par_rbNode * createNode(int key){
bool ok = false;
do{
ok = atomicCAS(&createFlag,false,true); //Capture the lock
}while(ok);
atomicAdd(&nodeIndex,1);
atomicAdd(&tmpIndex,1);
nodes[nodeIndex].key = key;
nodes[nodeIndex].flag = true;
nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL;
tmp[tmpIndex] = &nodes[nodeIndex];
createFlag = false;
// atomicCAS(&createFlag,true,false); //Release the lock
printf("Created %d\n",key);
return tmp[tmpIndex]; // Even if this thread pauses it will eventually return the correct pointer
}
__device__ void createTree(){
rtParent = createNode(-1);
rtSibling = createNode(-1);
// NIL = createNode(-1);
root = NIL;
rtParent->parent = NIL;
rtSibling->parent = rtParent;
rtSibling->right = NIL;
rtSibling->left = NIL;
rtParent->left = root;
//rtParent->left = root; Why only left, y not right?
//ANS: Since we check for left parent condition first
//(if u don't understand, try to insert a node to a tree with only one node)
rtParent->right = rtSibling;
rtParent->flag = false;
rtSibling->flag = false;
rtParent->color = BLACK;
rtSibling->color = BLACK;
// NIL->left = NIL;
// NIL->right = NIL;
NIL->parent = rtParent;
NIL->flag = false;
// NIL->color = BLACK;
printf("Tree Created \n");
printf("\n");
}
__device__ struct par_rbNode * Traverse(struct par_rbNode *newNode,int key){
struct par_rbNode *x;
// struct par_rbNode *inertPoint;
// struct par_rbNode *savert;
bool success;
bool ok;
// do{
// savert = root;
// success = DCAS(&root->flag,false,true,&root,savert,savert); //Catching the flag of the root
// }while(!success);
//An alternate for DCAS - should check if it works or not
// do{
// savert = root;
// success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
// }while(savert!=root || !success);
do{
// savert = root;
success = atomicCAS(&root->flag,false,true); //Catching the flag of the root
}while(success);
//success => captured the root flag
//savert != root => root has changed
//!success => root is under lock
//thread will come out of the loop only after "success" and "savert==root"
x = root;
if(x != NIL){
while(x != NIL){
struct par_rbNode *y = x;
if(key == x->key) {
x->flag = false; // Release the flag that was just caught
return NULL; // Traversing is done. Node is already there so Insert() fails.
}
if(key < x->key){
if(x->left != NIL){
ok = atomicCAS(&x->left->flag,false,true);
if(ok){
x->flag = false; // Release the flag of x
return NULL;
}//end if
x->flag = false;
x = x->left;
}else{
newNode->parent = x;
x = x->left;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}else{
if(x->right != NIL){
ok = atomicCAS(&x->right->flag,false,true);
if(ok){
x->flag = false;
return NULL;
}//end if
x->flag = false;
x = x->right;
}else{
newNode->parent = x;
x = x->right;
if(x == NIL){
printf("Insert Point %d\n",y->key);
return y;
}
}//end if
}//end if
}//end while
// return x->parent;
}else{
return NIL;
}
}
__device__ enum result PlaceNode(struct par_rbNode *newNode){
//flags on newNode and insPoint are held
bool ok = true;
// struct par_rbNode *uncle,*savep;
if(newNode->parent == NIL){ //tree is empty
newNode->color = BLACK;
newNode->parent = rtParent;
rtParent->left = newNode;
root=newNode;
NIL->flag = false; // release NIL node, that u caught during Traverse
newNode->flag = false;
newNode->left = newNode->right = NIL;
return FirstInsert;
}else{ // the tree is not empty so...
// newNode->parent = insPoint;
//set the flags of the grandparent and uncle
// struct par_rbNode *insPoint = newNode->parent;
printf("%d\n",newNode->parent->key);
printf("%d\n",newNode->parent->parent->left->key);
if(newNode->parent == newNode->parent->parent->left){ //uncle is right child
printf("Insert Key %d\n",newNode->parent->key);
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->right; // rtSibling is used here, when newNode->parent is root
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->right->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->right,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->right->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}else{// uncle is left child
// savep = newNode->parent->parent; // save parent ptr
// uncle = savep->left;
ok = atomicCAS(&newNode->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&newNode->parent->parent->left->flag,false,true);
// if(ok){
// ok = atomicCAS(&newNode->parent->parent,savep,savep) && atomicCAS(&savep->left,uncle,uncle);
// }
if(ok){ //back off
newNode->parent->parent->flag = false;
newNode->parent->parent->left->flag = false;
}else{
newNode->parent->parent->flag = false;
}//end if
}
}//end if
if(ok){
// This "!ok" is when u fail to capture the grandparent flag,
// u haven't caught any extra flags so just get rid of the flag of newNode->parent
newNode->parent->flag = false; // release flag
newNode->parent = NIL;
return Failure; //avoid deadlock
}
// When u have successfully captured all the required flags.
// i.e. parent, grandparent, uncle
if(newNode->key < newNode->parent->key){
//insert as left child
newNode->parent->left = newNode;
return Success;
}else{//insertas right child
newNode->parent->right = newNode;
printf("THE OK = %d\n",ok);
return Success;
}
}
}
__device__ void Insert_Rebalance(struct par_rbNode *x){ //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//we hold flags on x, p(x), p(p(x)), and uncle(x)
struct par_rbNode *oldx;
struct par_rbNode *uncle, *olduncle;
// struct par_rbNode *savep, *savegp;
struct par_rbNode *brother;
struct par_rbNode *nephew;
bool ok;
bool updateSucceeds; //Update-Rotation successded?
//caseF is short for caseFlag (avoiding confusion between global enum and local variable)
enum caseFlag caseF = NOOP; // initially not doing any case
//define uncle for first iteration
if(x->parent == x->parent->parent->left){
uncle = x->parent->parent->right;
}else{ // uncle is the left child not right
uncle = x->parent->parent->left;
}
while((x != root) && (x->parent->color == RED)){
//do color-update and/or rotaion as required
do{
updateSucceeds = Update_Rotation(x,&caseF);
}while(!updateSucceeds);
//CASE 1: move to grandparent after color update
if(caseF == DID_CASE1){
oldx = x; //save pointer to the old x
olduncle = uncle; // save pointer to old uncle;
x = x->parent->parent; // up to grandparent
do{ //find new uncle of x and get flags
if(x->parent == x->parent->parent->left){
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->right;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->right->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->right->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}else{
// savep = x->parent;
// savegp = savep->parent;
// uncle = savegp->left;
ok = atomicCAS(&x->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->flag,false,true);
if(!ok){
ok = atomicCAS(&x->parent->parent->left->flag,false,true);
if(ok){
x->parent->flag = false;
x->parent->parent->flag = false;
x->parent->parent->left->flag = false;
}else{
x->parent->flag = false;
x->parent->parent->flag = false;
}
}else{
x->parent->flag = false;
}
}
}
}while(ok); //THIS FUNCTION DOESN'T BACKOFF. IT KEEPS TRYING
//Release old flags for CASE 1
oldx->parent->flag = false;
olduncle->flag = false;
oldx->flag = false;
}
//in CASE 3 loop will exit: parent will be BLACK
}
switch(caseF){
case NOOP: //In the beginning of this function we had
//x,p(x),p(p(x)),uncle(x) - release them
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE1: //Release the last set of flags acquired
x->parent->parent->flag = false;
x->parent->flag = false;
uncle->flag = false;
x->flag = false;
break;
case DID_CASE3: //release flags on ROTATED x, etc
if(x == x->parent->left){
brother = x->parent->right;
nephew = x->parent->right->right;
}else{
brother = x->parent->left;
nephew = x->parent->left->left;
}
x->parent->flag = false;
brother->flag = false;
nephew->flag = false;
x->flag = false;
break;
}
// printf("last %d\n",x->key);
root->color = BLACK;
}
__device__ bool Update_Rotation(struct par_rbNode *x, enum caseFlag *caseF){
//we hold flags on x, p(x), p(p(x)) and uncle(x)
struct par_rbNode *xUncle;
struct par_rbNode *oldx; //*ggp; // ggp -> greatgrandparent
bool ok;
if(x->parent == x->parent->parent->left){
//the parent is a left child
xUncle = x->parent->parent->right;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true; // This true is for "updateSucceeds"
}else{ // rotation(s) will be needed
if(x == x->parent->right){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Left_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false; //This false is for "updateSucceeds"
}
}
//In CASE 3, if the right-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok); //KEEPS TRYING, DOESN'T BACK OFF
ok = Right_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false; //This false is for "updateSucceeds"
}else{
x->parent->color = BLACK;
x->parent->right->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false; //remove the ggp flag as rotation was successful
return true;
}
}
//symmetric to above code
}else{
//the parent is a right child
xUncle = x->parent->parent->left;
if(xUncle->color == RED){
//CASE 1 - recoloring
// U have all the flags u need. So this is simple, similar to serial code
x->parent->color = BLACK;
xUncle->color = BLACK;
x->parent->parent->color = RED;
*caseF = DID_CASE1;
return true;
}else{ // rotation(s) will be needed
if(x == x->parent->left){//CASE2
oldx = x; // save old x in case rotate fails
x = x->parent;
ok = Right_Rotate(x);
if(!ok){
x = oldx; //undo change to x
return false;
}
}
//In CASE 3, if the left-rotation fails,
//CASE 3 fails but the algorithm still works
//beacuse the process will return false to
//Insert_Rebalance, and Insert_Rebalance will
//call Update_Rotation again to complete CASE3
do{ // get great grandparent's flag
// ggp = x->parent->parent->parent;
ok = atomicCAS(&x->parent->parent->parent->flag,false,true);
}while(ok);
ok = Left_Rotate(x->parent->parent);
if(!ok){
x->parent->parent->parent->flag = false;
return false;
}else{
x->parent->color = BLACK;
x->parent->left->color = RED;
*caseF = DID_CASE3;
x->parent->parent->parent->flag = false;
return true;
}
}
}
}
//A rotation will always be successful(true), as u can reach the rotate command
//only after u have cptured all the requried flags
__device__ bool Left_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->right (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
// ok = CAS3(z->right,zrl,z->right,
// z->right,z,zrl->parent,
// zrl,zrl,z->right->left);
//update other links
root = zr;
rtParent->left = root;
root->parent = rtParent;
z->parent = root;
root->left = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->left){
//z is left child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->left = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}else{
// z is right child
zrl = z->right->left;
zr = z->right;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->right = zrl;
//update other links
z->parent->right = zr;
z->right->parent = z->parent;
z->parent = zr;
z->right->left = z;
}
}
return true;
}
//symmetric to Left_rotate
__device__ bool Right_Rotate(struct par_rbNode *z){
//z is the root of the rotation subtree. The locks
// held at this point are : z,z->parent and z->left (and sibling of z but its not useful here)
// bool ok;
struct par_rbNode *zrl,*zr;
if(z->parent == rtParent){
//rotating at the root
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
// ok = CAS3(z->left,zrl,z->left,
// z->left,z,zrl->parent,
// zrl,zrl,z->left->right);
//update other links
root = zr;
rtParent->right = root;
root->parent = rtParent;
z->parent = root;
root->right = z;
}else{
//rotating under the root (parent, etc . exist)
if(z == z->parent->right){
//z is right child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->right = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}else{
// z is left child
zrl = z->left->right;
zr = z->left;
// if a process has set the flag of a node q,
//no other process can move one of the children of q away from q
zrl->parent = z;
z->left = zrl;
//update other links
z->parent->left = zr;
z->left->parent = z->parent;
z->parent = zr;
z->left->right = z;
}
}
return true;
}
__device__ void Insert(int key){
struct par_rbNode *newNode = createNode(key); //Internally the flag of the newNode is held
// struct par_rbNode *insertPoint;
// // Create and initialize the new node
// enum result res = Failure;
// //insert the new node
// do{
// //Traverse tree to find insertion point
// insertPoint = Traverse(newNode,key);
// if(insertPoint != NULL){
// //add new node to tree
// // printf("Placing Node\n");
// res = PlaceNode(newNode);
// printf("res = %d\n",res);
// // res is short for result (avoiding confusion b/w global enum and local variable)
// if(res == Success){
// printf("rebalance\n");
// //node was added succcessfully so make
// //tree red-black again by doing the
// //necessary color updates and rotations
// Insert_Rebalance(newNode);
// }
// }else{
// printf("Key Exists\n");
// res = Success;
// break;
// }
// }while(res == Failure);
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
//Functions for printing the tree
__device__ void printPreorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* then recur on left child */
printPreorder(node->left);
/* now recur on right child */
printPreorder(node->right);
}
__device__ void printInorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printInorder(node->left);
/* then print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
/* now recur on right child */
printInorder(node->right);
}
__device__ void printPostorder(struct par_rbNode* node)
{
if (node == NIL)
return;
/* first recur on left child */
printPostorder(node->left);
/* then recur on right child */
printPostorder(node->right);
/* now print the data of node */
printf("%d-", node->key);
printf("%d", node->color);
printf(" ");
}
__device__ int threadsFinished = 0;
__device__ int passCreate = 0;
__global__ void RBT(struct par_rbNode *d_nodes) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
int threadCount = gridDim.x*blockDim.x;
if(id == 0){
printf("Starting the Tree\n");
nodes = d_nodes; // Make it a global variable
createNIL();
createTree();
atomicAdd(&passCreate,1);
}
Insert(5);
Insert(6);
Insert(4);
// while(1){
// if(passCreate){
// Insert(id);
// break;
// }
// }
// //Print the time
// //This will keep track of number of threads that are done
atomicAdd(&threadsFinished,1);
// // //Print the tree after all the threads are done
if(threadsFinished == threadCount){
if(id == 0){
// printf("PreOrder: ");
// printPreorder(root);
// printf("\n");
// printf("\n");
// printf("InOrder: ");
// printInorder(root);
// printf("\n");
// printf("\n");
// printf("PostOrder: ");
// printPostorder(root);
// printf("\n");
// printf("\n");
}
}
//return to main
}
int main() {
struct par_rbNode h_nodes[M];
struct par_rbNode *d_nodes;
float time;
// 1. Allocate device array.
hipMalloc(&d_nodes, M * sizeof(struct par_rbNode));
for(int i=0;i<M;i++){
h_nodes[i].flag = false;
h_nodes[i].color = RED;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// 2. Copy array contents from host to device.
hipMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), hipMemcpyHostToDevice);
printf("Kernel Launched\n");
hipEventRecord(start, 0);
RBT<<<1,1>>>(d_nodes);
hipMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
printf("Came back\n");
hipEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
} | .text
.file "a_sam_backup_latest.hip"
.globl _Z18__device_stub__RBTP10par_rbNode # -- Begin function _Z18__device_stub__RBTP10par_rbNode
.p2align 4, 0x90
.type _Z18__device_stub__RBTP10par_rbNode,@function
_Z18__device_stub__RBTP10par_rbNode: # @_Z18__device_stub__RBTP10par_rbNode
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z3RBTP10par_rbNode, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z18__device_stub__RBTP10par_rbNode, .Lfunc_end0-_Z18__device_stub__RBTP10par_rbNode
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $904, %rsp # imm = 0x388
.cfi_def_cfa_offset 912
leaq 16(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
movl $8, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movq $0, 92(%rsp,%rax)
addq $40, %rax
cmpq $808, %rax # imm = 0x328
jne .LBB1_1
# %bb.2:
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
leaq 96(%rsp), %rsi
movl $800, %edx # imm = 0x320
movl $1, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 48(%rsp)
leaq 24(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 24(%rsp), %rsi
movl 32(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3RBTP10par_rbNode, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 16(%rsp), %rsi
leaq 96(%rsp), %rdi
movl $800, %edx # imm = 0x320
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
movq 40(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 24(%rsp), %rdi
callq hipEventElapsedTime
movss 24(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $904, %rsp # imm = 0x388
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3RBTP10par_rbNode, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $nodes, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $root, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $NIL, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $rtParent, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $rtSibling, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $nodeIndex, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $tmpIndex, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $tmp, %esi
movl $.L__unnamed_9, %edx
movl $.L__unnamed_9, %ecx
movl $160, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $createFlag, %esi
movl $.L__unnamed_10, %edx
movl $.L__unnamed_10, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $threadsFinished, %esi
movl $.L__unnamed_11, %edx
movl $.L__unnamed_11, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $passCreate, %esi
movl $.L__unnamed_12, %edx
movl $.L__unnamed_12, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type nodes,@object # @nodes
.local nodes
.comm nodes,8,8
.type root,@object # @root
.local root
.comm root,8,8
.type NIL,@object # @NIL
.local NIL
.comm NIL,8,8
.type rtParent,@object # @rtParent
.local rtParent
.comm rtParent,8,8
.type rtSibling,@object # @rtSibling
.local rtSibling
.comm rtSibling,8,8
.type nodeIndex,@object # @nodeIndex
.local nodeIndex
.comm nodeIndex,4,4
.type tmpIndex,@object # @tmpIndex
.local tmpIndex
.comm tmpIndex,4,4
.type tmp,@object # @tmp
.local tmp
.comm tmp,160,16
.type createFlag,@object # @createFlag
.local createFlag
.comm createFlag,4,4
.type threadsFinished,@object # @threadsFinished
.local threadsFinished
.comm threadsFinished,4,4
.type passCreate,@object # @passCreate
.local passCreate
.comm passCreate,4,4
.type _Z3RBTP10par_rbNode,@object # @_Z3RBTP10par_rbNode
.section .rodata,"a",@progbits
.globl _Z3RBTP10par_rbNode
.p2align 3, 0x0
_Z3RBTP10par_rbNode:
.quad _Z18__device_stub__RBTP10par_rbNode
.size _Z3RBTP10par_rbNode, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "Time for the kernel: %f ms\n"
.size .L.str.2, 28
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3RBTP10par_rbNode"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "nodes"
.size .L__unnamed_2, 6
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "root"
.size .L__unnamed_3, 5
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "NIL"
.size .L__unnamed_4, 4
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "rtParent"
.size .L__unnamed_5, 9
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "rtSibling"
.size .L__unnamed_6, 10
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "nodeIndex"
.size .L__unnamed_7, 10
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "tmpIndex"
.size .L__unnamed_8, 9
.type .L__unnamed_9,@object # @8
.L__unnamed_9:
.asciz "tmp"
.size .L__unnamed_9, 4
.type .L__unnamed_10,@object # @9
.L__unnamed_10:
.asciz "createFlag"
.size .L__unnamed_10, 11
.type .L__unnamed_11,@object # @10
.L__unnamed_11:
.asciz "threadsFinished"
.size .L__unnamed_11, 16
.type .L__unnamed_12,@object # @11
.L__unnamed_12:
.asciz "passCreate"
.size .L__unnamed_12, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Kernel Launched"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Came back"
.size .Lstr.1, 10
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__RBTP10par_rbNode
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym nodes
.addrsig_sym root
.addrsig_sym NIL
.addrsig_sym rtParent
.addrsig_sym rtSibling
.addrsig_sym nodeIndex
.addrsig_sym tmpIndex
.addrsig_sym tmp
.addrsig_sym createFlag
.addrsig_sym threadsFinished
.addrsig_sym passCreate
.addrsig_sym _Z3RBTP10par_rbNode
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016d612_00000000-6_a_sam_backup_latest.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z9createNILv
.type _Z9createNILv, @function
_Z9createNILv:
.LFB2057:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z9createNILv, .-_Z9createNILv
.globl _Z10createNodei
.type _Z10createNodei, @function
_Z10createNodei:
.LFB2058:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2058:
.size _Z10createNodei, .-_Z10createNodei
.globl _Z10createTreev
.type _Z10createTreev, @function
_Z10createTreev:
.LFB2059:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2059:
.size _Z10createTreev, .-_Z10createTreev
.globl _Z8TraverseP10par_rbNodei
.type _Z8TraverseP10par_rbNodei, @function
_Z8TraverseP10par_rbNodei:
.LFB2060:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2060:
.size _Z8TraverseP10par_rbNodei, .-_Z8TraverseP10par_rbNodei
.globl _Z9PlaceNodeP10par_rbNode
.type _Z9PlaceNodeP10par_rbNode, @function
_Z9PlaceNodeP10par_rbNode:
.LFB2061:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2061:
.size _Z9PlaceNodeP10par_rbNode, .-_Z9PlaceNodeP10par_rbNode
.globl _Z16Insert_RebalanceP10par_rbNode
.type _Z16Insert_RebalanceP10par_rbNode, @function
_Z16Insert_RebalanceP10par_rbNode:
.LFB2062:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2062:
.size _Z16Insert_RebalanceP10par_rbNode, .-_Z16Insert_RebalanceP10par_rbNode
.globl _Z15Update_RotationP10par_rbNodeP8caseFlag
.type _Z15Update_RotationP10par_rbNodeP8caseFlag, @function
_Z15Update_RotationP10par_rbNodeP8caseFlag:
.LFB2063:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2063:
.size _Z15Update_RotationP10par_rbNodeP8caseFlag, .-_Z15Update_RotationP10par_rbNodeP8caseFlag
.globl _Z11Left_RotateP10par_rbNode
.type _Z11Left_RotateP10par_rbNode, @function
_Z11Left_RotateP10par_rbNode:
.LFB2064:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2064:
.size _Z11Left_RotateP10par_rbNode, .-_Z11Left_RotateP10par_rbNode
.globl _Z12Right_RotateP10par_rbNode
.type _Z12Right_RotateP10par_rbNode, @function
_Z12Right_RotateP10par_rbNode:
.LFB2065:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2065:
.size _Z12Right_RotateP10par_rbNode, .-_Z12Right_RotateP10par_rbNode
.globl _Z6Inserti
.type _Z6Inserti, @function
_Z6Inserti:
.LFB2066:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2066:
.size _Z6Inserti, .-_Z6Inserti
.globl _Z13printPreorderP10par_rbNode
.type _Z13printPreorderP10par_rbNode, @function
_Z13printPreorderP10par_rbNode:
.LFB2067:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2067:
.size _Z13printPreorderP10par_rbNode, .-_Z13printPreorderP10par_rbNode
.globl _Z12printInorderP10par_rbNode
.type _Z12printInorderP10par_rbNode, @function
_Z12printInorderP10par_rbNode:
.LFB2068:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2068:
.size _Z12printInorderP10par_rbNode, .-_Z12printInorderP10par_rbNode
.globl _Z14printPostorderP10par_rbNode
.type _Z14printPostorderP10par_rbNode, @function
_Z14printPostorderP10par_rbNode:
.LFB2069:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2069:
.size _Z14printPostorderP10par_rbNode, .-_Z14printPostorderP10par_rbNode
.globl _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
.type _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode, @function
_Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode:
.LFB2095:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rax
movq %rax, 80(%rsp)
movl $1, 32(%rsp)
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
leaq 24(%rsp), %rcx
leaq 16(%rsp), %rdx
leaq 44(%rsp), %rsi
leaq 32(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L33
.L29:
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L34
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L33:
.cfi_restore_state
pushq 24(%rsp)
.cfi_def_cfa_offset 120
pushq 24(%rsp)
.cfi_def_cfa_offset 128
leaq 96(%rsp), %r9
movq 60(%rsp), %rcx
movl 68(%rsp), %r8d
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
leaq _Z3RBTP10par_rbNode(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 112
jmp .L29
.L34:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode, .-_Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
.globl _Z3RBTP10par_rbNode
.type _Z3RBTP10par_rbNode, @function
_Z3RBTP10par_rbNode:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z3RBTP10par_rbNode, .-_Z3RBTP10par_rbNode
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Kernel Launched\n"
.LC1:
.string "Came back\n"
.LC2:
.string "Time for the kernel: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
subq $872, %rsp
.cfi_def_cfa_offset 880
movq %fs:40, %rax
movq %rax, 856(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $800, %esi
call cudaMalloc@PLT
leaq 52(%rsp), %rax
leaq 852(%rsp), %rdx
.L38:
movl $0, 4(%rax)
movl $0, (%rax)
addq $40, %rax
cmpq %rdx, %rax
jne .L38
leaq 8(%rsp), %rdi
call cudaEventCreate@PLT
leaq 16(%rsp), %rdi
call cudaEventCreate@PLT
leaq 48(%rsp), %rsi
movl $1, %ecx
movl $800, %edx
movq (%rsp), %rdi
call cudaMemcpy@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %esi
movq 8(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, 36(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 24(%rsp)
movl $1, 28(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 36(%rsp), %rdx
movl $1, %ecx
movq 24(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L43
.L39:
leaq 48(%rsp), %rdi
movl $2, %ecx
movl $800, %edx
movq (%rsp), %rsi
call cudaMemcpy@PLT
call cudaDeviceSynchronize@PLT
movl $0, %esi
movq 16(%rsp), %rdi
call cudaEventRecord@PLT
movq 16(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 36(%rsp), %rdi
movq 16(%rsp), %rdx
movq 8(%rsp), %rsi
call cudaEventElapsedTime@PLT
pxor %xmm0, %xmm0
cvtss2sd 36(%rsp), %xmm0
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 856(%rsp), %rax
subq %fs:40, %rax
jne .L44
movl $0, %eax
addq $872, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
movq (%rsp), %rdi
call _Z33__device_stub__Z3RBTP10par_rbNodeP10par_rbNode
jmp .L39
.L44:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC3:
.string "_Z3RBTP10par_rbNode"
.LC4:
.string "nodes"
.LC5:
.string "root"
.LC6:
.string "NIL"
.LC7:
.string "rtParent"
.LC8:
.string "rtSibling"
.LC9:
.string "nodeIndex"
.LC10:
.string "tmpIndex"
.LC11:
.string "tmp"
.LC12:
.string "createFlag"
.LC13:
.string "threadsFinished"
.LC14:
.string "passCreate"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC3(%rip), %rdx
movq %rdx, %rcx
leaq _Z3RBTP10par_rbNode(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC4(%rip), %rdx
movq %rdx, %rcx
leaq _ZL5nodes(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _ZL4root(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3NIL(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC7(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8rtParent(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $8, %r9d
movl $0, %r8d
leaq .LC8(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9rtSibling(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC9(%rip), %rdx
movq %rdx, %rcx
leaq _ZL9nodeIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC10(%rip), %rdx
movq %rdx, %rcx
leaq _ZL8tmpIndex(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $160, %r9d
movl $0, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _ZL3tmp(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC12(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10createFlag(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _ZL15threadsFinished(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
movl $4, %r9d
movl $0, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _ZL10passCreate(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterVar@PLT
addq $16, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.local _ZL10passCreate
.comm _ZL10passCreate,4,4
.local _ZL15threadsFinished
.comm _ZL15threadsFinished,4,4
.local _ZL10createFlag
.comm _ZL10createFlag,4,4
.local _ZL3tmp
.comm _ZL3tmp,160,32
.local _ZL8tmpIndex
.comm _ZL8tmpIndex,4,4
.local _ZL9nodeIndex
.comm _ZL9nodeIndex,4,4
.local _ZL9rtSibling
.comm _ZL9rtSibling,8,8
.local _ZL8rtParent
.comm _ZL8rtParent,8,8
.local _ZL3NIL
.comm _ZL3NIL,8,8
.local _ZL4root
.comm _ZL4root,8,8
.local _ZL5nodes
.comm _ZL5nodes,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "a_sam_backup_latest.hip"
.globl _Z18__device_stub__RBTP10par_rbNode # -- Begin function _Z18__device_stub__RBTP10par_rbNode
.p2align 4, 0x90
.type _Z18__device_stub__RBTP10par_rbNode,@function
_Z18__device_stub__RBTP10par_rbNode: # @_Z18__device_stub__RBTP10par_rbNode
.cfi_startproc
# %bb.0:
subq $72, %rsp
.cfi_def_cfa_offset 80
movq %rdi, 64(%rsp)
leaq 64(%rsp), %rax
movq %rax, (%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
movq %rsp, %r9
movl $_Z3RBTP10par_rbNode, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $88, %rsp
.cfi_adjust_cfa_offset -88
retq
.Lfunc_end0:
.size _Z18__device_stub__RBTP10par_rbNode, .Lfunc_end0-_Z18__device_stub__RBTP10par_rbNode
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $904, %rsp # imm = 0x388
.cfi_def_cfa_offset 912
leaq 16(%rsp), %rdi
movl $800, %esi # imm = 0x320
callq hipMalloc
movl $8, %eax
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movq $0, 92(%rsp,%rax)
addq $40, %rax
cmpq $808, %rax # imm = 0x328
jne .LBB1_1
# %bb.2:
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movq 16(%rsp), %rdi
leaq 96(%rsp), %rsi
movl $800, %edx # imm = 0x320
movl $1, %ecx
callq hipMemcpy
movl $.Lstr, %edi
callq puts@PLT
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_4
# %bb.3:
movq 16(%rsp), %rax
movq %rax, 88(%rsp)
leaq 88(%rsp), %rax
movq %rax, 48(%rsp)
leaq 24(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 24(%rsp), %rsi
movl 32(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 48(%rsp), %r9
movl $_Z3RBTP10par_rbNode, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_4:
movq 16(%rsp), %rsi
leaq 96(%rsp), %rdi
movl $800, %edx # imm = 0x320
movl $2, %ecx
callq hipMemcpy
callq hipDeviceSynchronize
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movl $.Lstr.1, %edi
callq puts@PLT
movq 40(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 24(%rsp), %rdi
callq hipEventElapsedTime
movss 24(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.2, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $904, %rsp # imm = 0x388
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rbx
subq $32, %rsp
.cfi_adjust_cfa_offset 32
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z3RBTP10par_rbNode, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
addq $32, %rsp
.cfi_adjust_cfa_offset -32
movl $nodes, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $root, %esi
movl $.L__unnamed_3, %edx
movl $.L__unnamed_3, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $NIL, %esi
movl $.L__unnamed_4, %edx
movl $.L__unnamed_4, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $rtParent, %esi
movl $.L__unnamed_5, %edx
movl $.L__unnamed_5, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $rtSibling, %esi
movl $.L__unnamed_6, %edx
movl $.L__unnamed_6, %ecx
movl $8, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $nodeIndex, %esi
movl $.L__unnamed_7, %edx
movl $.L__unnamed_7, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $tmpIndex, %esi
movl $.L__unnamed_8, %edx
movl $.L__unnamed_8, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $tmp, %esi
movl $.L__unnamed_9, %edx
movl $.L__unnamed_9, %ecx
movl $160, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $createFlag, %esi
movl $.L__unnamed_10, %edx
movl $.L__unnamed_10, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $threadsFinished, %esi
movl $.L__unnamed_11, %edx
movl $.L__unnamed_11, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $passCreate, %esi
movl $.L__unnamed_12, %edx
movl $.L__unnamed_12, %ecx
movl $4, %r9d
movq %rbx, %rdi
xorl %r8d, %r8d
pushq $0
.cfi_adjust_cfa_offset 8
pushq $0
.cfi_adjust_cfa_offset 8
callq __hipRegisterVar
addq $16, %rsp
.cfi_adjust_cfa_offset -16
movl $__hip_module_dtor, %edi
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type nodes,@object # @nodes
.local nodes
.comm nodes,8,8
.type root,@object # @root
.local root
.comm root,8,8
.type NIL,@object # @NIL
.local NIL
.comm NIL,8,8
.type rtParent,@object # @rtParent
.local rtParent
.comm rtParent,8,8
.type rtSibling,@object # @rtSibling
.local rtSibling
.comm rtSibling,8,8
.type nodeIndex,@object # @nodeIndex
.local nodeIndex
.comm nodeIndex,4,4
.type tmpIndex,@object # @tmpIndex
.local tmpIndex
.comm tmpIndex,4,4
.type tmp,@object # @tmp
.local tmp
.comm tmp,160,16
.type createFlag,@object # @createFlag
.local createFlag
.comm createFlag,4,4
.type threadsFinished,@object # @threadsFinished
.local threadsFinished
.comm threadsFinished,4,4
.type passCreate,@object # @passCreate
.local passCreate
.comm passCreate,4,4
.type _Z3RBTP10par_rbNode,@object # @_Z3RBTP10par_rbNode
.section .rodata,"a",@progbits
.globl _Z3RBTP10par_rbNode
.p2align 3, 0x0
_Z3RBTP10par_rbNode:
.quad _Z18__device_stub__RBTP10par_rbNode
.size _Z3RBTP10par_rbNode, 8
.type .L.str.2,@object # @.str.2
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.2:
.asciz "Time for the kernel: %f ms\n"
.size .L.str.2, 28
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z3RBTP10par_rbNode"
.size .L__unnamed_1, 20
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "nodes"
.size .L__unnamed_2, 6
.type .L__unnamed_3,@object # @2
.L__unnamed_3:
.asciz "root"
.size .L__unnamed_3, 5
.type .L__unnamed_4,@object # @3
.L__unnamed_4:
.asciz "NIL"
.size .L__unnamed_4, 4
.type .L__unnamed_5,@object # @4
.L__unnamed_5:
.asciz "rtParent"
.size .L__unnamed_5, 9
.type .L__unnamed_6,@object # @5
.L__unnamed_6:
.asciz "rtSibling"
.size .L__unnamed_6, 10
.type .L__unnamed_7,@object # @6
.L__unnamed_7:
.asciz "nodeIndex"
.size .L__unnamed_7, 10
.type .L__unnamed_8,@object # @7
.L__unnamed_8:
.asciz "tmpIndex"
.size .L__unnamed_8, 9
.type .L__unnamed_9,@object # @8
.L__unnamed_9:
.asciz "tmp"
.size .L__unnamed_9, 4
.type .L__unnamed_10,@object # @9
.L__unnamed_10:
.asciz "createFlag"
.size .L__unnamed_10, 11
.type .L__unnamed_11,@object # @10
.L__unnamed_11:
.asciz "threadsFinished"
.size .L__unnamed_11, 16
.type .L__unnamed_12,@object # @11
.L__unnamed_12:
.asciz "passCreate"
.size .L__unnamed_12, 11
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Kernel Launched"
.size .Lstr, 16
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Came back"
.size .Lstr.1, 10
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z18__device_stub__RBTP10par_rbNode
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym nodes
.addrsig_sym root
.addrsig_sym NIL
.addrsig_sym rtParent
.addrsig_sym rtSibling
.addrsig_sym nodeIndex
.addrsig_sym tmpIndex
.addrsig_sym tmp
.addrsig_sym createFlag
.addrsig_sym threadsFinished
.addrsig_sym passCreate
.addrsig_sym _Z3RBTP10par_rbNode
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | __device__ inline double sq(double x) { return x*x;}
__device__ double optimsquare_eps_2d_descent(double u[12], double xi[12], int sc, double epsilon, double w, int steps) {
double no, nxi[12], r;
r = 1./(1+epsilon/4.);
for (int c = 0; c < 3; c++) {
u[c*4 + 0] -= xi[c*4 + 0]-xi[c*4 + 3];
for (int i=1;i<=3;i++) {
u[c*4 + i] -= xi[c*4 + i]-xi[c*4 + i-1];
}
}
for (int it=0; it<steps; it++) {
no = 0;
for (int c = 0; c < 3; c++) {
nxi[c*4 + 0]=r*(.5*xi[c*4 + 0]+.25*(xi[c*4 + 3]+xi[c*4 + 1]+ u[c*4 + 1]-u[c*4 + 0]));
nxi[c*4 + 1]=r*(.5*xi[c*4 + 1]+.25*(xi[c*4 + 0]+xi[c*4 + 2]+ u[c*4 + 2]-u[c*4 + 1]));
nxi[c*4 + 2]=r*(.5*xi[c*4 + 2]+.25*(xi[c*4 + 1]+xi[c*4 + 3]+ u[c*4 + 3]-u[c*4 + 2]));
nxi[c*4 + 3]=r*(.5*xi[c*4 + 3]+.25*(xi[c*4 + 2]+xi[c*4 + 0]+ u[c*4 + 0]-u[c*4 + 3]));
// no += sq(nxi[c*4 +0]) + sq(nxi[c*4 +1]) + sq(nxi[c*4 +2]) + sq(nxi[c*4 +3]);
}
for (int i = 0; i < 4; i++) {
no += sq(nxi[i]) + sq(nxi[4 + i]) + sq(nxi[8 + i]);
}
if (no > w) {
no = sqrt(w/no);
} else {
no = 1;
}
for (int c = 0; c < 3; c++) {
xi[c*4 + 0]= nxi[c*4 + 0]*no;
xi[c*4 + 1]= nxi[c*4 + 1]*no;
xi[c*4 + 2]= nxi[c*4 + 2]*no;
xi[c*4 + 3]= nxi[c*4 + 3]*no;
}
}
for (int c = 0; c < 3; c++) {
u[c*4 + 0] += xi[c*4 + 0]-xi[c*4 + 3];
for (int i=1;i<=3;i++) {
u[c*4 + i] += xi[c*4 + i]-xi[c*4 + i-1];
}
}
return 0.0;
}
__global__ void opt_eps_split(int sx, int sc, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int is_even_step) {
int i,kk;
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < L) && (k < K)) {
if (is_even_step) {
i = l * 2 * sx + 2 * k;
kk = l * K + k;
} else {
i = (1 + l * 2) * sx + 1 + 2 * k;
kk = l * K + k;
}
double aru[12];
for (int c = 0; c < 3; c++) {
aru[c*4 + 0]=u[c + sc*i];
aru[c*4 + 1]=u[c + sc*(i+1)];
aru[c*4 + 2]=u[c + sc*(i+1+sx)];
aru[c*4 + 3]=u[c + sc*(i+sx)];
}
double axi[12];
for (int c = 0; c < 3; c++) {
axi[c*4 + 0] = xi[c + sc*kk*4];
axi[c*4 + 1] = xi[c + sc*(kk*4 + 1)];
axi[c*4 + 2] = xi[c + sc*(kk*4 + 2)];
axi[c*4 + 3] = xi[c + sc*(kk*4 + 3)];
}
optimsquare_eps_2d_descent(aru, axi, sc, epsilon, w, steps);
for (int c = 0; c < 3; c++) {
u[c + sc*i]=aru[c*4 + 0];
u[c + sc*(i+1)]=aru[c*4 + 1];
u[c + sc*(i+1+sx)]=aru[c*4 + 2];
u[c + sc*(i+sx)]=aru[c*4 + 3];
}
for (int c = 0; c < 3; c++) {
xi[c + sc*kk*4] = axi[c*4 + 0];
xi[c + sc*(kk*4 + 1)] = axi[c*4 + 1];
xi[c + sc*(kk*4 + 2)] = axi[c*4 + 2];
xi[c + sc*(kk*4 + 3)] = axi[c*4 + 3];
}
}
}
__global__ void over_relax_eps_2d(int sx, int sc, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko) {
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < Lo) && (k < Ko)) {
int j = 1 + 2*l;
int i = j * sx + 1 + 2*k;
int kk = Ko * l + k;
double dx[12];
int m;
int kx = kk * 4;
for (int c = 0; c < 3; c++) {
for (m=0; m < 4; m++)
dx[c*4 + m] = theta * (xiobar[c + sc*(kx+m)] - xio[c + sc*(kx+m)]);
u[c + sc*i] += dx[c*4 + 0]-dx[c*4 + 3];
u[c + sc*(i+1)] += dx[c*4 + 1]-dx[c*4 + 0];
u[c + sc*(i+1+sx)] += dx[c*4 + 2]-dx[c*4 + 1];
u[c + sc*(i+sx)] += dx[c*4 + 3]-dx[c*4 + 2];
for (m=0; m<4; m++)
xio[c + sc*(kx+m)] = xiobar[c + sc*(kx+m)] + dx[c*4 + m];
}
}
}
__global__ void gap_arr_eps_2d(int sx, int sc, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step) {
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < Le) && (k < Ke)) {
int i;
if (is_even_step) {
i = 2 * l * sx + 2 * k;
} else {
i = (2 * l + 1) * sx + 1 + 2 * k;
}
int kk = l*Ke + k;
double aru[12], a[12], xi[12], b = 0, cc = 0, d = 0, gap = 0;
for (int c = 0; c < 3; c++) {
aru[c*4 + 0]=u[c + sc*i];
aru[c*4 + 1]=u[c + sc*(i+1)];
aru[c*4 + 2]=u[c + sc*(i+1+sx)];
aru[c*4 + 3]=u[c + sc*(i+sx)];
xi[c*4 + 0] = xie[c + sc*kk*4];
xi[c*4 + 1] = xie[c + sc*(kk*4 + 1)];
xi[c*4 + 2] = xie[c + sc*(kk*4 + 2)];
xi[c*4 + 3] = xie[c + sc*(kk*4 + 3)];
}
for (int c = 0; c < 3; c++) {
// TV_e(Du) - <xi,Du> + e*xi^2/2
a[c*4 + 3] = aru[c*4 + 0]-aru[c*4 + 3];
b += a[c*4 + 3]*a[c*4 + 3];
cc += xi[c*4 + 3]*a[c*4 + 3];
d += xi[c*4 + 3]*xi[c*4 + 3];
for (int m=0;m<3;m++) {
a[c*4 + m] = aru[c*4 + m+1]-aru[c*4 + m];
b += a[c*4 + m]*a[c*4 + m];
cc += xi[c*4 + m]*a[c*4 + m];
d += xi[c*4 + m]*xi[c*4 + m];
}
}
gap += epsilon*.5*d-cc;
if (b < we2) {
gap += .5*b/epsilon; // here epsilon>0
} else {
gap += ws*sqrt(b)-.5*epsilon*w; // TV_eps
}
gl[l*Ke + k] = gap;
}
} | .file "tmpxft_000400b4_00000000-6_kernels-2d-color.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26optimsquare_eps_2d_descentPdS_iddi
.type _Z26optimsquare_eps_2d_descentPdS_iddi, @function
_Z26optimsquare_eps_2d_descentPdS_iddi:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z26optimsquare_eps_2d_descentPdS_iddi, .-_Z26optimsquare_eps_2d_descentPdS_iddi
.globl _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
.type _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii, @function
_Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii:
.LFB2053:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 256(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 264
pushq 72(%rsp)
.cfi_def_cfa_offset 272
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13opt_eps_splitiiPdS_ddddiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii, .-_Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
.globl _Z13opt_eps_splitiiPdS_ddddiiii
.type _Z13opt_eps_splitiiPdS_ddddiiii, @function
_Z13opt_eps_splitiiPdS_ddddiiii:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z13opt_eps_splitiiPdS_ddddiiii, .-_Z13opt_eps_splitiiPdS_ddddiiii
.globl _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
.type _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii, @function
_Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii:
.LFB2055:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movsd %xmm0, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 208(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17over_relax_eps_2diiPdS_S_dii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii, .-_Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
.globl _Z17over_relax_eps_2diiPdS_S_dii
.type _Z17over_relax_eps_2diiPdS_S_dii, @function
_Z17over_relax_eps_2diiPdS_S_dii:
.LFB2056:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z17over_relax_eps_2diiPdS_S_dii, .-_Z17over_relax_eps_2diiPdS_S_dii
.globl _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
.type _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii, @function
_Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii:
.LFB2057:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movsd %xmm0, 40(%rsp)
movsd %xmm1, 32(%rsp)
movsd %xmm2, 24(%rsp)
movsd %xmm3, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 64(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 12(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z14gap_arr_eps_2diiPdS_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii, .-_Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
.globl _Z14gap_arr_eps_2diiPdS_S_ddddiii
.type _Z14gap_arr_eps_2diiPdS_S_ddddiii, @function
_Z14gap_arr_eps_2diiPdS_S_ddddiii:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z14gap_arr_eps_2diiPdS_S_ddddiii, .-_Z14gap_arr_eps_2diiPdS_S_ddddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14gap_arr_eps_2diiPdS_S_ddddiii"
.align 8
.LC1:
.string "_Z17over_relax_eps_2diiPdS_S_dii"
.align 8
.LC2:
.string "_Z13opt_eps_splitiiPdS_ddddiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14gap_arr_eps_2diiPdS_S_ddddiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z17over_relax_eps_2diiPdS_S_dii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13opt_eps_splitiiPdS_ddddiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | __device__ inline double sq(double x) { return x*x;}
__device__ double optimsquare_eps_2d_descent(double u[12], double xi[12], int sc, double epsilon, double w, int steps) {
double no, nxi[12], r;
r = 1./(1+epsilon/4.);
for (int c = 0; c < 3; c++) {
u[c*4 + 0] -= xi[c*4 + 0]-xi[c*4 + 3];
for (int i=1;i<=3;i++) {
u[c*4 + i] -= xi[c*4 + i]-xi[c*4 + i-1];
}
}
for (int it=0; it<steps; it++) {
no = 0;
for (int c = 0; c < 3; c++) {
nxi[c*4 + 0]=r*(.5*xi[c*4 + 0]+.25*(xi[c*4 + 3]+xi[c*4 + 1]+ u[c*4 + 1]-u[c*4 + 0]));
nxi[c*4 + 1]=r*(.5*xi[c*4 + 1]+.25*(xi[c*4 + 0]+xi[c*4 + 2]+ u[c*4 + 2]-u[c*4 + 1]));
nxi[c*4 + 2]=r*(.5*xi[c*4 + 2]+.25*(xi[c*4 + 1]+xi[c*4 + 3]+ u[c*4 + 3]-u[c*4 + 2]));
nxi[c*4 + 3]=r*(.5*xi[c*4 + 3]+.25*(xi[c*4 + 2]+xi[c*4 + 0]+ u[c*4 + 0]-u[c*4 + 3]));
// no += sq(nxi[c*4 +0]) + sq(nxi[c*4 +1]) + sq(nxi[c*4 +2]) + sq(nxi[c*4 +3]);
}
for (int i = 0; i < 4; i++) {
no += sq(nxi[i]) + sq(nxi[4 + i]) + sq(nxi[8 + i]);
}
if (no > w) {
no = sqrt(w/no);
} else {
no = 1;
}
for (int c = 0; c < 3; c++) {
xi[c*4 + 0]= nxi[c*4 + 0]*no;
xi[c*4 + 1]= nxi[c*4 + 1]*no;
xi[c*4 + 2]= nxi[c*4 + 2]*no;
xi[c*4 + 3]= nxi[c*4 + 3]*no;
}
}
for (int c = 0; c < 3; c++) {
u[c*4 + 0] += xi[c*4 + 0]-xi[c*4 + 3];
for (int i=1;i<=3;i++) {
u[c*4 + i] += xi[c*4 + i]-xi[c*4 + i-1];
}
}
return 0.0;
}
__global__ void opt_eps_split(int sx, int sc, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int is_even_step) {
int i,kk;
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < L) && (k < K)) {
if (is_even_step) {
i = l * 2 * sx + 2 * k;
kk = l * K + k;
} else {
i = (1 + l * 2) * sx + 1 + 2 * k;
kk = l * K + k;
}
double aru[12];
for (int c = 0; c < 3; c++) {
aru[c*4 + 0]=u[c + sc*i];
aru[c*4 + 1]=u[c + sc*(i+1)];
aru[c*4 + 2]=u[c + sc*(i+1+sx)];
aru[c*4 + 3]=u[c + sc*(i+sx)];
}
double axi[12];
for (int c = 0; c < 3; c++) {
axi[c*4 + 0] = xi[c + sc*kk*4];
axi[c*4 + 1] = xi[c + sc*(kk*4 + 1)];
axi[c*4 + 2] = xi[c + sc*(kk*4 + 2)];
axi[c*4 + 3] = xi[c + sc*(kk*4 + 3)];
}
optimsquare_eps_2d_descent(aru, axi, sc, epsilon, w, steps);
for (int c = 0; c < 3; c++) {
u[c + sc*i]=aru[c*4 + 0];
u[c + sc*(i+1)]=aru[c*4 + 1];
u[c + sc*(i+1+sx)]=aru[c*4 + 2];
u[c + sc*(i+sx)]=aru[c*4 + 3];
}
for (int c = 0; c < 3; c++) {
xi[c + sc*kk*4] = axi[c*4 + 0];
xi[c + sc*(kk*4 + 1)] = axi[c*4 + 1];
xi[c + sc*(kk*4 + 2)] = axi[c*4 + 2];
xi[c + sc*(kk*4 + 3)] = axi[c*4 + 3];
}
}
}
__global__ void over_relax_eps_2d(int sx, int sc, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko) {
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < Lo) && (k < Ko)) {
int j = 1 + 2*l;
int i = j * sx + 1 + 2*k;
int kk = Ko * l + k;
double dx[12];
int m;
int kx = kk * 4;
for (int c = 0; c < 3; c++) {
for (m=0; m < 4; m++)
dx[c*4 + m] = theta * (xiobar[c + sc*(kx+m)] - xio[c + sc*(kx+m)]);
u[c + sc*i] += dx[c*4 + 0]-dx[c*4 + 3];
u[c + sc*(i+1)] += dx[c*4 + 1]-dx[c*4 + 0];
u[c + sc*(i+1+sx)] += dx[c*4 + 2]-dx[c*4 + 1];
u[c + sc*(i+sx)] += dx[c*4 + 3]-dx[c*4 + 2];
for (m=0; m<4; m++)
xio[c + sc*(kx+m)] = xiobar[c + sc*(kx+m)] + dx[c*4 + m];
}
}
}
__global__ void gap_arr_eps_2d(int sx, int sc, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step) {
int l = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.x * blockDim.x + threadIdx.x;
if ((l < Le) && (k < Ke)) {
int i;
if (is_even_step) {
i = 2 * l * sx + 2 * k;
} else {
i = (2 * l + 1) * sx + 1 + 2 * k;
}
int kk = l*Ke + k;
double aru[12], a[12], xi[12], b = 0, cc = 0, d = 0, gap = 0;
for (int c = 0; c < 3; c++) {
aru[c*4 + 0]=u[c + sc*i];
aru[c*4 + 1]=u[c + sc*(i+1)];
aru[c*4 + 2]=u[c + sc*(i+1+sx)];
aru[c*4 + 3]=u[c + sc*(i+sx)];
xi[c*4 + 0] = xie[c + sc*kk*4];
xi[c*4 + 1] = xie[c + sc*(kk*4 + 1)];
xi[c*4 + 2] = xie[c + sc*(kk*4 + 2)];
xi[c*4 + 3] = xie[c + sc*(kk*4 + 3)];
}
for (int c = 0; c < 3; c++) {
// TV_e(Du) - <xi,Du> + e*xi^2/2
a[c*4 + 3] = aru[c*4 + 0]-aru[c*4 + 3];
b += a[c*4 + 3]*a[c*4 + 3];
cc += xi[c*4 + 3]*a[c*4 + 3];
d += xi[c*4 + 3]*xi[c*4 + 3];
for (int m=0;m<3;m++) {
a[c*4 + m] = aru[c*4 + m+1]-aru[c*4 + m];
b += a[c*4 + m]*a[c*4 + m];
cc += xi[c*4 + m]*a[c*4 + m];
d += xi[c*4 + m]*xi[c*4 + m];
}
}
gap += epsilon*.5*d-cc;
if (b < we2) {
gap += .5*b/epsilon; // here epsilon>0
} else {
gap += ws*sqrt(b)-.5*epsilon*w; // TV_eps
}
gl[l*Ke + k] = gap;
}
} | #ifndef KERNELS_EPS_2D_COLOR_CUH
#define KERNELS_EPS_2D_COLOR_CUH
__global__ void opt_eps_split(int sx, int sc, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int is_even_step);
__global__ void over_relax_eps_2d(int sx, int sc, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko);
__global__ void gap_arr_eps_2d(int sx, int sc, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step);
#endif |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #ifndef KERNELS_EPS_2D_COLOR_CUH
#define KERNELS_EPS_2D_COLOR_CUH
__global__ void opt_eps_split(int sx, int sc, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int is_even_step);
__global__ void over_relax_eps_2d(int sx, int sc, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko);
__global__ void gap_arr_eps_2d(int sx, int sc, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step);
#endif | .text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.amdgpu_metadata
---
amdhsa.kernels: []
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #ifndef KERNELS_EPS_2D_COLOR_CUH
#define KERNELS_EPS_2D_COLOR_CUH
__global__ void opt_eps_split(int sx, int sc, double *xi, double *u, double epsilon, double w, double ws, double we2, int L, int K, int steps, int is_even_step);
__global__ void over_relax_eps_2d(int sx, int sc, double *xio, double *xiobar, double *u, double theta, int Lo, int Ko);
__global__ void gap_arr_eps_2d(int sx, int sc, double* gl, double *xie, double *u, double epsilon, double w, double ws, double we2, int Le, int Ke, int is_even_step);
#endif | .text
.file "kernels-2d-color.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000400b4_00000000-6_kernels-2d-color.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2031:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2031:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z26optimsquare_eps_2d_descentPdS_iddi
.type _Z26optimsquare_eps_2d_descentPdS_iddi, @function
_Z26optimsquare_eps_2d_descentPdS_iddi:
.LFB2028:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $24, %rsp
.cfi_def_cfa_offset 32
movl $1, 12(%rsp)
movl 12(%rsp), %edi
call exit@PLT
.cfi_endproc
.LFE2028:
.size _Z26optimsquare_eps_2d_descentPdS_iddi, .-_Z26optimsquare_eps_2d_descentPdS_iddi
.globl _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
.type _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii, @function
_Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii:
.LFB2053:
.cfi_startproc
endbr64
subq $248, %rsp
.cfi_def_cfa_offset 256
movl %edi, 60(%rsp)
movl %esi, 56(%rsp)
movq %rdx, 48(%rsp)
movq %rcx, 40(%rsp)
movsd %xmm0, 32(%rsp)
movsd %xmm1, 24(%rsp)
movsd %xmm2, 16(%rsp)
movsd %xmm3, 8(%rsp)
movl %r8d, 4(%rsp)
movl %r9d, (%rsp)
movq %fs:40, %rax
movq %rax, 232(%rsp)
xorl %eax, %eax
leaq 60(%rsp), %rax
movq %rax, 128(%rsp)
leaq 56(%rsp), %rax
movq %rax, 136(%rsp)
leaq 48(%rsp), %rax
movq %rax, 144(%rsp)
leaq 40(%rsp), %rax
movq %rax, 152(%rsp)
leaq 32(%rsp), %rax
movq %rax, 160(%rsp)
leaq 24(%rsp), %rax
movq %rax, 168(%rsp)
leaq 16(%rsp), %rax
movq %rax, 176(%rsp)
leaq 8(%rsp), %rax
movq %rax, 184(%rsp)
leaq 4(%rsp), %rax
movq %rax, 192(%rsp)
movq %rsp, %rax
movq %rax, 200(%rsp)
leaq 256(%rsp), %rax
movq %rax, 208(%rsp)
leaq 264(%rsp), %rax
movq %rax, 216(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
leaq 72(%rsp), %rcx
leaq 64(%rsp), %rdx
leaq 92(%rsp), %rsi
leaq 80(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L9
.L5:
movq 232(%rsp), %rax
subq %fs:40, %rax
jne .L10
addq $248, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L9:
.cfi_restore_state
pushq 72(%rsp)
.cfi_def_cfa_offset 264
pushq 72(%rsp)
.cfi_def_cfa_offset 272
leaq 144(%rsp), %r9
movq 108(%rsp), %rcx
movl 116(%rsp), %r8d
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
leaq _Z13opt_eps_splitiiPdS_ddddiiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 256
jmp .L5
.L10:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2053:
.size _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii, .-_Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
.globl _Z13opt_eps_splitiiPdS_ddddiiii
.type _Z13opt_eps_splitiiPdS_ddddiiii, @function
_Z13opt_eps_splitiiPdS_ddddiiii:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z45__device_stub__Z13opt_eps_splitiiPdS_ddddiiiiiiPdS_ddddiiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _Z13opt_eps_splitiiPdS_ddddiiii, .-_Z13opt_eps_splitiiPdS_ddddiiii
.globl _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
.type _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii, @function
_Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii:
.LFB2055:
.cfi_startproc
endbr64
subq $200, %rsp
.cfi_def_cfa_offset 208
movl %edi, 44(%rsp)
movl %esi, 40(%rsp)
movq %rdx, 32(%rsp)
movq %rcx, 24(%rsp)
movq %r8, 16(%rsp)
movsd %xmm0, 8(%rsp)
movl %r9d, 4(%rsp)
movq %fs:40, %rax
movq %rax, 184(%rsp)
xorl %eax, %eax
leaq 44(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rax
movq %rax, 120(%rsp)
leaq 32(%rsp), %rax
movq %rax, 128(%rsp)
leaq 24(%rsp), %rax
movq %rax, 136(%rsp)
leaq 16(%rsp), %rax
movq %rax, 144(%rsp)
leaq 8(%rsp), %rax
movq %rax, 152(%rsp)
leaq 4(%rsp), %rax
movq %rax, 160(%rsp)
leaq 208(%rsp), %rax
movq %rax, 168(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L17
.L13:
movq 184(%rsp), %rax
subq %fs:40, %rax
jne .L18
addq $200, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 216
pushq 56(%rsp)
.cfi_def_cfa_offset 224
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z17over_relax_eps_2diiPdS_S_dii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 208
jmp .L13
.L18:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2055:
.size _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii, .-_Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
.globl _Z17over_relax_eps_2diiPdS_S_dii
.type _Z17over_relax_eps_2diiPdS_S_dii, @function
_Z17over_relax_eps_2diiPdS_S_dii:
.LFB2056:
.cfi_startproc
endbr64
subq $16, %rsp
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z46__device_stub__Z17over_relax_eps_2diiPdS_S_diiiiPdS_S_dii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2056:
.size _Z17over_relax_eps_2diiPdS_S_dii, .-_Z17over_relax_eps_2diiPdS_S_dii
.globl _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
.type _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii, @function
_Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii:
.LFB2057:
.cfi_startproc
endbr64
subq $264, %rsp
.cfi_def_cfa_offset 272
movl %edi, 76(%rsp)
movl %esi, 72(%rsp)
movq %rdx, 64(%rsp)
movq %rcx, 56(%rsp)
movq %r8, 48(%rsp)
movsd %xmm0, 40(%rsp)
movsd %xmm1, 32(%rsp)
movsd %xmm2, 24(%rsp)
movsd %xmm3, 16(%rsp)
movl %r9d, 12(%rsp)
movq %fs:40, %rax
movq %rax, 248(%rsp)
xorl %eax, %eax
leaq 76(%rsp), %rax
movq %rax, 144(%rsp)
leaq 72(%rsp), %rax
movq %rax, 152(%rsp)
leaq 64(%rsp), %rax
movq %rax, 160(%rsp)
leaq 56(%rsp), %rax
movq %rax, 168(%rsp)
leaq 48(%rsp), %rax
movq %rax, 176(%rsp)
leaq 40(%rsp), %rax
movq %rax, 184(%rsp)
leaq 32(%rsp), %rax
movq %rax, 192(%rsp)
leaq 24(%rsp), %rax
movq %rax, 200(%rsp)
leaq 16(%rsp), %rax
movq %rax, 208(%rsp)
leaq 12(%rsp), %rax
movq %rax, 216(%rsp)
leaq 272(%rsp), %rax
movq %rax, 224(%rsp)
leaq 280(%rsp), %rax
movq %rax, 232(%rsp)
movl $1, 96(%rsp)
movl $1, 100(%rsp)
movl $1, 104(%rsp)
movl $1, 108(%rsp)
movl $1, 112(%rsp)
movl $1, 116(%rsp)
leaq 88(%rsp), %rcx
leaq 80(%rsp), %rdx
leaq 108(%rsp), %rsi
leaq 96(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L25
.L21:
movq 248(%rsp), %rax
subq %fs:40, %rax
jne .L26
addq $264, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L25:
.cfi_restore_state
pushq 88(%rsp)
.cfi_def_cfa_offset 280
pushq 88(%rsp)
.cfi_def_cfa_offset 288
leaq 160(%rsp), %r9
movq 124(%rsp), %rcx
movl 132(%rsp), %r8d
movq 112(%rsp), %rsi
movl 120(%rsp), %edx
leaq _Z14gap_arr_eps_2diiPdS_S_ddddiii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 272
jmp .L21
.L26:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii, .-_Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
.globl _Z14gap_arr_eps_2diiPdS_S_ddddiii
.type _Z14gap_arr_eps_2diiPdS_S_ddddiii, @function
_Z14gap_arr_eps_2diiPdS_S_ddddiii:
.LFB2058:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 24
movl 24(%rsp), %eax
pushq %rax
.cfi_def_cfa_offset 32
call _Z47__device_stub__Z14gap_arr_eps_2diiPdS_S_ddddiiiiiPdS_S_ddddiii
addq $24, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2058:
.size _Z14gap_arr_eps_2diiPdS_S_ddddiii, .-_Z14gap_arr_eps_2diiPdS_S_ddddiii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z14gap_arr_eps_2diiPdS_S_ddddiii"
.align 8
.LC1:
.string "_Z17over_relax_eps_2diiPdS_S_dii"
.align 8
.LC2:
.string "_Z13opt_eps_splitiiPdS_ddddiiii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z14gap_arr_eps_2diiPdS_S_ddddiii(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC1(%rip), %rdx
movq %rdx, %rcx
leaq _Z17over_relax_eps_2diiPdS_S_dii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z13opt_eps_splitiiPdS_ddddiiii(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "kernels-2d-color.hip"
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | code for sm_80
Function : _Z8all_dotsiiPdS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e280000002100 */
/*0040*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002600 */
/*0050*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000ea20000002200 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R9.reuse ; /* 0x0000000000007a24 */
/* 0x101fe400078e0209 */
/*0070*/ IMAD R4, R7, c[0x0][0x4], R9 ; /* 0x0000010007047a24 */
/* 0x002fc600078e0209 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe40003f06270 */
/*0090*/ ISETP.GE.AND P1, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fe40003f26270 */
/*00a0*/ ISETP.NE.OR P2, PT, R6.reuse, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x044fe40000745670 */
/*00b0*/ ISETP.NE.OR P1, PT, R6, 0x1, P1 ; /* 0x000000010600780c */
/* 0x000fd60000f25670 */
/*00c0*/ @!P2 MOV R3, 0x8 ; /* 0x000000080003a802 */
/* 0x000fe40000000f00 */
/*00d0*/ @!P1 MOV R5, 0x8 ; /* 0x0000000800059802 */
/* 0x000fc60000000f00 */
/*00e0*/ @!P2 IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a000002a625 */
/* 0x000fc800078e0203 */
/*00f0*/ @!P1 IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004049625 */
/* 0x000fe400078e0205 */
/*0100*/ @!P2 LDG.E.64 R2, [R2.64] ; /* 0x000000040202a981 */
/* 0x000ea8000c1e1b00 */
/*0110*/ @!P1 LDG.E.64 R4, [R4.64] ; /* 0x0000000404049981 */
/* 0x000ee2000c1e1b00 */
/*0120*/ IMAD R7, R7, c[0x0][0x4], R6 ; /* 0x0000010007077a24 */
/* 0x000fca00078e0206 */
/*0130*/ ISETP.GE.OR P0, PT, R7, c[0x0][0x164], P0 ; /* 0x0000590007007a0c */
/* 0x000fe20000706670 */
/*0140*/ @!P2 STS.64 [R9.X8], R2 ; /* 0x000000020900a388 */
/* 0x0041e80000008a00 */
/*0150*/ @!P1 STS.64 [R9.X8+0x100], R4 ; /* 0x0001000409009388 */
/* 0x0081e80000008a00 */
/*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0170*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0180*/ LDS.64 R2, [R9.X8] ; /* 0x0000000009027984 */
/* 0x001fe20000008a00 */
/*0190*/ HFMA2.MMA R11, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0b7435 */
/* 0x000fe200000001ff */
/*01a0*/ IMAD R0, R7, c[0x0][0x160], R0 ; /* 0x0000580007007a24 */
/* 0x000fc400078e0200 */
/*01b0*/ LDS.64 R4, [R6.X8+0x100] ; /* 0x0001000006047984 */
/* 0x000e240000008a00 */
/*01c0*/ DADD R2, R2, R4 ; /* 0x0000000002027229 */
/* 0x00104a0000000004 */
/*01d0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x001fca00078e020b */
/*01e0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x002fe2000c101b04 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | .file "tmpxft_0011817c_00000000-6_all_dots.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
.type _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_, @function
_Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8all_dotsiiPdS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_, .-_Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
.globl _Z8all_dotsiiPdS_S_
.type _Z8all_dotsiiPdS_S_, @function
_Z8all_dotsiiPdS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8all_dotsiiPdS_S_, .-_Z8all_dotsiiPdS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8all_dotsiiPdS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8all_dotsiiPdS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8all_dotsiiPdS_S_
.globl _Z8all_dotsiiPdS_S_
.p2align 8
.type _Z8all_dotsiiPdS_S_,@function
_Z8all_dotsiiPdS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s6, s[0:1], 0x0
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s4, s0, 32
s_addc_u32 s5, s1, 0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_cmp_eq_u32_e64 s2, 0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s6, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[8:9], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 3, v[1:2]
v_lshlrev_b32_e32 v2, 3, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s2, s9, v5, s2
global_load_b64 v[4:5], v[4:5], off
s_waitcnt vmcnt(0)
ds_store_b64 v2, v[4:5]
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s2, s[4:5], 0xc
s_load_b32 s4, s[0:1], 0x4
v_cmp_eq_u32_e64 s3, 1, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s15, s15, s2
v_add_nc_u32_e32 v4, s15, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v4
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_4
s_load_b64 s[8:9], s[0:1], 0x10
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b32_e32 v2, 3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 3, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s2, s9, v5, s2
global_load_b64 v[4:5], v[4:5], off
s_waitcnt vmcnt(0)
ds_store_b64 v2, v[4:5] offset:256
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v2, s15, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e64 s2, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
v_lshlrev_b32_e32 v3, 3, v3
v_lshlrev_b32_e32 v0, 3, v0
s_load_b64 s[0:1], s[0:1], 0x18
ds_load_b64 v[3:4], v3
ds_load_b64 v[5:6], v0 offset:256
s_waitcnt lgkmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_mad_u64_u32 v[5:6], null, v2, s6, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[0:1], 3, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[3:4], off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8all_dotsiiPdS_S_
.amdhsa_group_segment_fixed_size 512
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8all_dotsiiPdS_S_, .Lfunc_end0-_Z8all_dotsiiPdS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 512
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8all_dotsiiPdS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8all_dotsiiPdS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void all_dots(int n, int k, double* data_dots, double* centroid_dots, double* dots) {
__shared__ double local_data_dots[32];
__shared__ double local_centroid_dots[32];
int data_index = threadIdx.x + blockIdx.x * blockDim.x;
if ((data_index < n) && (threadIdx.y == 0)) {
local_data_dots[threadIdx.x] = data_dots[data_index];
}
int centroid_index = threadIdx.x + blockIdx.y * blockDim.y;
if ((centroid_index < k) && (threadIdx.y == 1)) {
local_centroid_dots[threadIdx.x] = centroid_dots[centroid_index];
}
__syncthreads();
centroid_index = threadIdx.y + blockIdx.y * blockDim.y;
if ((data_index < n) && (centroid_index < k)) {
dots[data_index + centroid_index * n] = local_data_dots[threadIdx.x] +
local_centroid_dots[threadIdx.y];
}
} | .text
.file "all_dots.hip"
.globl _Z23__device_stub__all_dotsiiPdS_S_ # -- Begin function _Z23__device_stub__all_dotsiiPdS_S_
.p2align 4, 0x90
.type _Z23__device_stub__all_dotsiiPdS_S_,@function
_Z23__device_stub__all_dotsiiPdS_S_: # @_Z23__device_stub__all_dotsiiPdS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8all_dotsiiPdS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__all_dotsiiPdS_S_, .Lfunc_end0-_Z23__device_stub__all_dotsiiPdS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8all_dotsiiPdS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8all_dotsiiPdS_S_,@object # @_Z8all_dotsiiPdS_S_
.section .rodata,"a",@progbits
.globl _Z8all_dotsiiPdS_S_
.p2align 3, 0x0
_Z8all_dotsiiPdS_S_:
.quad _Z23__device_stub__all_dotsiiPdS_S_
.size _Z8all_dotsiiPdS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8all_dotsiiPdS_S_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__all_dotsiiPdS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8all_dotsiiPdS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z8all_dotsiiPdS_S_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc60000000a00 */
/*0030*/ S2R R9, SR_TID.X ; /* 0x0000000000097919 */
/* 0x000e280000002100 */
/*0040*/ S2R R7, SR_CTAID.Y ; /* 0x0000000000077919 */
/* 0x000e680000002600 */
/*0050*/ S2R R6, SR_TID.Y ; /* 0x0000000000067919 */
/* 0x000ea20000002200 */
/*0060*/ IMAD R0, R0, c[0x0][0x0], R9.reuse ; /* 0x0000000000007a24 */
/* 0x101fe400078e0209 */
/*0070*/ IMAD R4, R7, c[0x0][0x4], R9 ; /* 0x0000010007047a24 */
/* 0x002fc600078e0209 */
/*0080*/ ISETP.GE.AND P0, PT, R0, c[0x0][0x160], PT ; /* 0x0000580000007a0c */
/* 0x000fe40003f06270 */
/*0090*/ ISETP.GE.AND P1, PT, R4, c[0x0][0x164], PT ; /* 0x0000590004007a0c */
/* 0x000fe40003f26270 */
/*00a0*/ ISETP.NE.OR P2, PT, R6.reuse, RZ, P0 ; /* 0x000000ff0600720c */
/* 0x044fe40000745670 */
/*00b0*/ ISETP.NE.OR P1, PT, R6, 0x1, P1 ; /* 0x000000010600780c */
/* 0x000fd60000f25670 */
/*00c0*/ @!P2 MOV R3, 0x8 ; /* 0x000000080003a802 */
/* 0x000fe40000000f00 */
/*00d0*/ @!P1 MOV R5, 0x8 ; /* 0x0000000800059802 */
/* 0x000fc60000000f00 */
/*00e0*/ @!P2 IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a000002a625 */
/* 0x000fc800078e0203 */
/*00f0*/ @!P1 IMAD.WIDE R4, R4, R5, c[0x0][0x170] ; /* 0x00005c0004049625 */
/* 0x000fe400078e0205 */
/*0100*/ @!P2 LDG.E.64 R2, [R2.64] ; /* 0x000000040202a981 */
/* 0x000ea8000c1e1b00 */
/*0110*/ @!P1 LDG.E.64 R4, [R4.64] ; /* 0x0000000404049981 */
/* 0x000ee2000c1e1b00 */
/*0120*/ IMAD R7, R7, c[0x0][0x4], R6 ; /* 0x0000010007077a24 */
/* 0x000fca00078e0206 */
/*0130*/ ISETP.GE.OR P0, PT, R7, c[0x0][0x164], P0 ; /* 0x0000590007007a0c */
/* 0x000fe20000706670 */
/*0140*/ @!P2 STS.64 [R9.X8], R2 ; /* 0x000000020900a388 */
/* 0x0041e80000008a00 */
/*0150*/ @!P1 STS.64 [R9.X8+0x100], R4 ; /* 0x0001000409009388 */
/* 0x0081e80000008a00 */
/*0160*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0170*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0180*/ LDS.64 R2, [R9.X8] ; /* 0x0000000009027984 */
/* 0x001fe20000008a00 */
/*0190*/ HFMA2.MMA R11, -RZ, RZ, 0, 4.76837158203125e-07 ; /* 0x00000008ff0b7435 */
/* 0x000fe200000001ff */
/*01a0*/ IMAD R0, R7, c[0x0][0x160], R0 ; /* 0x0000580007007a24 */
/* 0x000fc400078e0200 */
/*01b0*/ LDS.64 R4, [R6.X8+0x100] ; /* 0x0001000006047984 */
/* 0x000e240000008a00 */
/*01c0*/ DADD R2, R2, R4 ; /* 0x0000000002027229 */
/* 0x00104a0000000004 */
/*01d0*/ IMAD.WIDE R4, R0, R11, c[0x0][0x178] ; /* 0x00005e0000047625 */
/* 0x001fca00078e020b */
/*01e0*/ STG.E.64 [R4.64], R2 ; /* 0x0000000204007986 */
/* 0x002fe2000c101b04 */
/*01f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0200*/ BRA 0x200; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0210*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0220*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0230*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0240*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0250*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0260*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0270*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0280*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0290*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*02f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z8all_dotsiiPdS_S_
.globl _Z8all_dotsiiPdS_S_
.p2align 8
.type _Z8all_dotsiiPdS_S_,@function
_Z8all_dotsiiPdS_S_:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s6, s[0:1], 0x0
v_and_b32_e32 v3, 0x3ff, v0
s_add_u32 s4, s0, 32
s_addc_u32 s5, s1, 0
v_bfe_u32 v0, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s14, s2, v[3:4]
v_cmp_eq_u32_e64 s2, 0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, s6, v1
s_and_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_2
s_load_b64 s[8:9], s[0:1], 0x8
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[4:5], 3, v[1:2]
v_lshlrev_b32_e32 v2, 3, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s2, s9, v5, s2
global_load_b64 v[4:5], v[4:5], off
s_waitcnt vmcnt(0)
ds_store_b64 v2, v[4:5]
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s2, s[4:5], 0xc
s_load_b32 s4, s[0:1], 0x4
v_cmp_eq_u32_e64 s3, 1, v0
s_waitcnt lgkmcnt(0)
s_lshr_b32 s2, s2, 16
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_mul_i32 s15, s15, s2
v_add_nc_u32_e32 v4, s15, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_gt_i32_e64 s2, s4, v4
s_and_b32 s2, s2, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_4
s_load_b64 s[8:9], s[0:1], 0x10
v_ashrrev_i32_e32 v5, 31, v4
v_lshlrev_b32_e32 v2, 3, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[4:5], 3, v[4:5]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v4, s2, s8, v4
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v5, s2, s9, v5, s2
global_load_b64 v[4:5], v[4:5], off
s_waitcnt vmcnt(0)
ds_store_b64 v2, v[4:5] offset:256
.LBB0_4:
s_or_b32 exec_lo, exec_lo, s3
v_add_nc_u32_e32 v2, s15, v0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
v_cmp_gt_i32_e64 s2, s4, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_6
v_lshlrev_b32_e32 v3, 3, v3
v_lshlrev_b32_e32 v0, 3, v0
s_load_b64 s[0:1], s[0:1], 0x18
ds_load_b64 v[3:4], v3
ds_load_b64 v[5:6], v0 offset:256
s_waitcnt lgkmcnt(0)
v_add_f64 v[3:4], v[3:4], v[5:6]
v_mad_u64_u32 v[5:6], null, v2, s6, v[1:2]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v6, 31, v5
v_lshlrev_b64 v[0:1], 3, v[5:6]
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b64 v[0:1], v[3:4], off
.LBB0_6:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z8all_dotsiiPdS_S_
.amdhsa_group_segment_fixed_size 512
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 7
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z8all_dotsiiPdS_S_, .Lfunc_end0-_Z8all_dotsiiPdS_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .offset: 0
.size: 4
.value_kind: by_value
- .offset: 4
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 512
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z8all_dotsiiPdS_S_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z8all_dotsiiPdS_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 7
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0011817c_00000000-6_all_dots.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
.type _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_, @function
_Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movl %edi, 28(%rsp)
movl %esi, 24(%rsp)
movq %rdx, 16(%rsp)
movq %rcx, 8(%rsp)
movq %r8, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 28(%rsp), %rax
movq %rax, 96(%rsp)
leaq 24(%rsp), %rax
movq %rax, 104(%rsp)
leaq 16(%rsp), %rax
movq %rax, 112(%rsp)
leaq 8(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z8all_dotsiiPdS_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_, .-_Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
.globl _Z8all_dotsiiPdS_S_
.type _Z8all_dotsiiPdS_S_, @function
_Z8all_dotsiiPdS_S_:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z8all_dotsiiPdS_S_iiPdS_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z8all_dotsiiPdS_S_, .-_Z8all_dotsiiPdS_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z8all_dotsiiPdS_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z8all_dotsiiPdS_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "all_dots.hip"
.globl _Z23__device_stub__all_dotsiiPdS_S_ # -- Begin function _Z23__device_stub__all_dotsiiPdS_S_
.p2align 4, 0x90
.type _Z23__device_stub__all_dotsiiPdS_S_,@function
_Z23__device_stub__all_dotsiiPdS_S_: # @_Z23__device_stub__all_dotsiiPdS_S_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movl %edi, 4(%rsp)
movl %esi, (%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 4(%rsp), %rax
movq %rax, 80(%rsp)
movq %rsp, %rax
movq %rax, 88(%rsp)
leaq 72(%rsp), %rax
movq %rax, 96(%rsp)
leaq 64(%rsp), %rax
movq %rax, 104(%rsp)
leaq 56(%rsp), %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z8all_dotsiiPdS_S_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z23__device_stub__all_dotsiiPdS_S_, .Lfunc_end0-_Z23__device_stub__all_dotsiiPdS_S_
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z8all_dotsiiPdS_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z8all_dotsiiPdS_S_,@object # @_Z8all_dotsiiPdS_S_
.section .rodata,"a",@progbits
.globl _Z8all_dotsiiPdS_S_
.p2align 3, 0x0
_Z8all_dotsiiPdS_S_:
.quad _Z23__device_stub__all_dotsiiPdS_S_
.size _Z8all_dotsiiPdS_S_, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z8all_dotsiiPdS_S_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z23__device_stub__all_dotsiiPdS_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z8all_dotsiiPdS_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
cudaMalloc((void**)&adev, size);
cudaMalloc((void**)&bdev, size);
cudaMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float gpuTime = 0.0f;
cudaEventRecord(start, 0);
cudaMemcpy(adev, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bdev, b, size, cudaMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
cudaMemcpy(c, cdev, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(adev);
cudaFree(bdev);
cudaFree(cdev);
delete a;
delete b;
delete c;
return 0;
} | code for sm_80
Function : _Z7gpuMultPfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R14, -RZ, RZ, 0, 0 ; /* 0x00000000ff0e7435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000ea20000002200 */
/*0090*/ LEA R7, R0, R3, 0x4 ; /* 0x0000000300077211 */
/* 0x001fca00078e20ff */
/*00a0*/ @!P0 BRA 0xbf0 ; /* 0x00000b4000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IADD3 R6, R4.reuse, -0x1, RZ ; /* 0xffffffff04067810 */
/* 0x040fe40007ffe0ff */
/*00c0*/ LOP3.LUT R8, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304087812 */
/* 0x000fe400078ec0ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f06070 */
/*00e0*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fd20000000f00 */
/*0100*/ @!P0 BRA 0xad0 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R9, -R8, c[0x0][0x170], RZ ; /* 0x00005c0008097a10 */
/* 0x000fe20007ffe1ff */
/*0120*/ HFMA2.MMA R16, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff107435 */
/* 0x000fe200000001ff */
/*0130*/ LEA R10, R2, R5, 0x4 ; /* 0x00000005020a7211 */
/* 0x006fe200078e20ff */
/*0140*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0150*/ ISETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f04270 */
/*0160*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0170*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD R10, R10, c[0x0][0x170], RZ ; /* 0x00005c000a0a7a24 */
/* 0x000fc800078e02ff */
/*0190*/ IMAD.WIDE R16, R7, R16, c[0x0][0x168] ; /* 0x00005a0007107625 */
/* 0x000fcc00078e0210 */
/*01a0*/ @!P0 BRA 0x940 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe40003f24270 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01d0*/ @!P1 BRA 0x680 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01f0*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0200*/ IMAD.WIDE R28, R4, 0x4, R16 ; /* 0x00000004041c7825 */
/* 0x000fe200078e0210 */
/*0210*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R25, [R16.64] ; /* 0x0000000410197981 */
/* 0x0000a8000c1e1900 */
/*0230*/ IMAD.WIDE R12, R10, 0x4, R12 ; /* 0x000000040a0c7825 */
/* 0x000fe200078e020c */
/*0240*/ LDG.E R15, [R28.64] ; /* 0x000000041c0f7981 */
/* 0x0002e8000c1e1900 */
/*0250*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000ea8000c1e1900 */
/*0260*/ LDG.E R24, [R12.64+0x4] ; /* 0x000004040c187981 */
/* 0x000ee2000c1e1900 */
/*0270*/ IMAD.WIDE R28, R4, 0x4, R28 ; /* 0x00000004041c7825 */
/* 0x002fc600078e021c */
/*0280*/ LDG.E R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000f28000c1e1900 */
/*0290*/ LDG.E R11, [R28.64] ; /* 0x000000041c0b7981 */
/* 0x000322000c1e1900 */
/*02a0*/ IMAD.WIDE R18, R4, 0x4, R28 ; /* 0x0000000404127825 */
/* 0x000fc600078e021c */
/*02b0*/ LDG.E R22, [R12.64+0xc] ; /* 0x00000c040c167981 */
/* 0x000f66000c1e1900 */
/*02c0*/ IMAD.WIDE R26, R4.reuse, 0x4, R18 ; /* 0x00000004041a7825 */
/* 0x040fe200078e0212 */
/*02d0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */
/* 0x000368000c1e1900 */
/*02e0*/ LDG.E R16, [R12.64+0x10] ; /* 0x000010040c107981 */
/* 0x001f68000c1e1900 */
/*02f0*/ LDG.E R17, [R26.64] ; /* 0x000000041a117981 */
/* 0x000164000c1e1900 */
/*0300*/ IMAD.WIDE R26, R4, 0x4, R26 ; /* 0x00000004041a7825 */
/* 0x001fcc00078e021a */
/*0310*/ IMAD.WIDE R28, R4, 0x4, R26 ; /* 0x00000004041c7825 */
/* 0x002fcc00078e021a */
/*0320*/ IMAD.WIDE R18, R4, 0x4, R28 ; /* 0x0000000404127825 */
/* 0x000fc800078e021c */
/*0330*/ FFMA R23, R25, R23, R14 ; /* 0x0000001719177223 */
/* 0x004fe4000000000e */
/*0340*/ LDG.E R14, [R26.64] ; /* 0x000000041a0e7981 */
/* 0x000aa4000c1e1900 */
/*0350*/ FFMA R23, R15, R24, R23 ; /* 0x000000180f177223 */
/* 0x008fe40000000017 */
/*0360*/ LDG.E R15, [R12.64+0x14] ; /* 0x000014040c0f7981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R24, [R28.64] ; /* 0x000000041c187981 */
/* 0x0000e2000c1e1900 */
/*0380*/ FFMA R23, R11, R20, R23 ; /* 0x000000140b177223 */
/* 0x010fc60000000017 */
/*0390*/ LDG.E R25, [R12.64+0x18] ; /* 0x000018040c197981 */
/* 0x000ee8000c1e1900 */
/*03a0*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */
/* 0x000322000c1e1900 */
/*03b0*/ FFMA R27, R21, R22, R23 ; /* 0x00000016151b7223 */
/* 0x020fc60000000017 */
/*03c0*/ LDG.E R11, [R12.64+0x1c] ; /* 0x00001c040c0b7981 */
/* 0x000f28000c1e1900 */
/*03d0*/ LDG.E R26, [R12.64+0x20] ; /* 0x000020040c1a7981 */
/* 0x000f62000c1e1900 */
/*03e0*/ IMAD.WIDE R18, R4, 0x4, R18 ; /* 0x0000000404127825 */
/* 0x002fca00078e0212 */
/*03f0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */
/* 0x000562000c1e1900 */
/*0400*/ IMAD.WIDE R22, R4, 0x4, R18 ; /* 0x0000000404167825 */
/* 0x000fc800078e0212 */
/*0410*/ FFMA R29, R17, R16, R27 ; /* 0x00000010111d7223 */
/* 0x001fe2000000001b */
/*0420*/ LDG.E R28, [R22.64] ; /* 0x00000004161c7981 */
/* 0x000168000c1e1900 */
/*0430*/ LDG.E R27, [R12.64+0x24] ; /* 0x000024040c1b7981 */
/* 0x000f62000c1e1900 */
/*0440*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x000fc800078e0216 */
/*0450*/ FFMA R18, R14, R15, R29 ; /* 0x0000000f0e127223 */
/* 0x004fe4000000001d */
/*0460*/ IMAD.WIDE R14, R4, 0x4, R16 ; /* 0x00000004040e7825 */
/* 0x000fe200078e0210 */
/*0470*/ LDG.E R29, [R16.64] ; /* 0x00000004101d7981 */
/* 0x0002a6000c1e1900 */
/*0480*/ FFMA R25, R24, R25, R18 ; /* 0x0000001918197223 */
/* 0x008fe40000000012 */
/*0490*/ IMAD.WIDE R18, R4, 0x4, R14 ; /* 0x0000000404127825 */
/* 0x000fe200078e020e */
/*04a0*/ LDG.E R24, [R12.64+0x28] ; /* 0x000028040c187981 */
/* 0x000ea8000c1e1900 */
/*04b0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a2000c1e1900 */
/*04c0*/ FFMA R20, R20, R11, R25 ; /* 0x0000000b14147223 */
/* 0x010fc40000000019 */
/*04d0*/ IMAD.WIDE R22, R4.reuse, 0x4, R18 ; /* 0x0000000404167825 */
/* 0x041fe200078e0212 */
/*04e0*/ LDG.E R25, [R12.64+0x2c] ; /* 0x00002c040c197981 */
/* 0x000f28000c1e1900 */
/*04f0*/ LDG.E R11, [R18.64] ; /* 0x00000004120b7981 */
/* 0x000122000c1e1900 */
/*0500*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x002fc800078e0216 */
/*0510*/ FFMA R20, R21, R26, R20 ; /* 0x0000001a15147223 */
/* 0x020fe20000000014 */
/*0520*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */
/* 0x0082e8000c1e1900 */
/*0530*/ LDG.E R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x001f68000c1e1900 */
/*0540*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x0000e4000c1e1900 */
/*0550*/ FFMA R22, R28, R27, R20 ; /* 0x0000001b1c167223 */
/* 0x001fc40000000014 */
/*0560*/ LDG.E R27, [R12.64+0x34] ; /* 0x000034040c1b7981 */
/* 0x000ee2000c1e1900 */
/*0570*/ IMAD.WIDE R20, R4, 0x4, R16 ; /* 0x0000000404147825 */
/* 0x000fc600078e0210 */
/*0580*/ LDG.E R28, [R12.64+0x38] ; /* 0x000038040c1c7981 */
/* 0x000ee8000c1e1900 */
/*0590*/ LDG.E R16, [R12.64+0x3c] ; /* 0x00003c040c107981 */
/* 0x002ee8000c1e1900 */
/*05a0*/ LDG.E R19, [R20.64] ; /* 0x0000000414137981 */
/* 0x000ee2000c1e1900 */
/*05b0*/ IADD3 R9, R9, -0x10, RZ ; /* 0xfffffff009097810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe20003f24270 */
/*05d0*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*05e0*/ IADD3 R6, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0600*/ FFMA R24, R29, R24, R22 ; /* 0x000000181d187223 */
/* 0x004fc80000000016 */
/*0610*/ FFMA R14, R14, R25, R24 ; /* 0x000000190e0e7223 */
/* 0x010fc80000000018 */
/*0620*/ FFMA R11, R11, R18, R14 ; /* 0x000000120b0b7223 */
/* 0x020fc8000000000e */
/*0630*/ FFMA R11, R26, R27, R11 ; /* 0x0000001b1a0b7223 */
/* 0x008fc8000000000b */
/*0640*/ FFMA R11, R15, R28, R11 ; /* 0x0000001c0f0b7223 */
/* 0x000fc8000000000b */
/*0650*/ FFMA R14, R19, R16, R11 ; /* 0x00000010130e7223 */
/* 0x000fe4000000000b */
/*0660*/ IMAD.WIDE R16, R4, 0x4, R20 ; /* 0x0000000404107825 */
/* 0x000fe200078e0214 */
/*0670*/ @P1 BRA 0x1f0 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*0680*/ ISETP.GT.AND P1, PT, R9, 0x4, PT ; /* 0x000000040900780c */
/* 0x000fda0003f24270 */
/*0690*/ @!P1 BRA 0x920 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06a0*/ MOV R20, UR6 ; /* 0x0000000600147c02 */
/* 0x000fe40008000f00 */
/*06b0*/ MOV R21, UR7 ; /* 0x0000000700157c02 */
/* 0x000fe20008000f00 */
/*06c0*/ IMAD.WIDE R22, R4, 0x4, R16 ; /* 0x0000000404167825 */
/* 0x000fe200078e0210 */
/*06d0*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x0000a6000c1e1900 */
/*06e0*/ IMAD.WIDE R20, R10, 0x4, R20 ; /* 0x000000040a147825 */
/* 0x000fe200078e0214 */
/*06f0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x0002e8000c1e1900 */
/*0700*/ LDG.E R15, [R20.64] ; /* 0x00000004140f7981 */
/* 0x000ea8000c1e1900 */
/*0710*/ LDG.E R27, [R20.64+0x4] ; /* 0x00000404141b7981 */
/* 0x000ee2000c1e1900 */
/*0720*/ IMAD.WIDE R12, R4, 0x4, R22 ; /* 0x00000004040c7825 */
/* 0x000fc600078e0216 */
/*0730*/ LDG.E R29, [R20.64+0x8] ; /* 0x00000804141d7981 */
/* 0x000f26000c1e1900 */
/*0740*/ IMAD.WIDE R16, R4.reuse, 0x4, R12 ; /* 0x0000000404107825 */
/* 0x041fe200078e020c */
/*0750*/ LDG.E R22, [R20.64+0x10] ; /* 0x0000100414167981 */
/* 0x002f68000c1e1900 */
/*0760*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000122000c1e1900 */
/*0770*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fc600078e0210 */
/*0780*/ LDG.E R28, [R16.64] ; /* 0x00000004101c7981 */
/* 0x000326000c1e1900 */
/*0790*/ IMAD.WIDE R24, R4, 0x4, R18 ; /* 0x0000000404187825 */
/* 0x000fe200078e0212 */
/*07a0*/ LDG.E R13, [R18.64] ; /* 0x00000004120d7981 */
/* 0x001168000c1e1900 */
/*07b0*/ LDG.E R19, [R20.64+0xc] ; /* 0x00000c0414137981 */
/* 0x001f68000c1e1900 */
/*07c0*/ LDG.E R18, [R20.64+0x18] ; /* 0x0000180414127981 */
/* 0x000f62000c1e1900 */
/*07d0*/ FFMA R23, R11, R15, R14 ; /* 0x0000000f0b177223 */
/* 0x004fc4000000000e */
/*07e0*/ IMAD.WIDE R14, R4.reuse, 0x4, R24 ; /* 0x00000004040e7825 */
/* 0x040fe200078e0218 */
/*07f0*/ LDG.E R11, [R24.64] ; /* 0x00000004180b7981 */
/* 0x0006aa000c1e1900 */
/*0800*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x002fc800078e020e */
/*0810*/ FFMA R24, R26, R27, R23 ; /* 0x0000001b1a187223 */
/* 0x008fe40000000017 */
/*0820*/ LDG.E R26, [R20.64+0x14] ; /* 0x00001404141a7981 */
/* 0x000ea8000c1e1900 */
/*0830*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x0000e8000c1e1900 */
/*0840*/ LDG.E R27, [R16.64] ; /* 0x00000004101b7981 */
/* 0x0002e8000c1e1900 */
/*0850*/ LDG.E R14, [R20.64+0x1c] ; /* 0x00001c04140e7981 */
/* 0x001ee2000c1e1900 */
/*0860*/ FFMA R12, R12, R29, R24 ; /* 0x0000001d0c0c7223 */
/* 0x010fc80000000018 */
/*0870*/ FFMA R12, R28, R19, R12 ; /* 0x000000131c0c7223 */
/* 0x020fc8000000000c */
/*0880*/ FFMA R12, R13, R22, R12 ; /* 0x000000160d0c7223 */
/* 0x000fe2000000000c */
/*0890*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08b0*/ IMAD.WIDE R16, R4, 0x4, R16 ; /* 0x0000000404107825 */
/* 0x002fe200078e0210 */
/*08c0*/ IADD3 R6, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe40007ffe0ff */
/*08d0*/ IADD3 R9, R9, -0x8, RZ ; /* 0xfffffff809097810 */
/* 0x000fe20007ffe0ff */
/*08e0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08f0*/ FFMA R11, R11, R26, R12 ; /* 0x0000001a0b0b7223 */
/* 0x004fc8000000000c */
/*0900*/ FFMA R11, R23, R18, R11 ; /* 0x00000012170b7223 */
/* 0x008fc8000000000b */
/*0910*/ FFMA R14, R27, R14, R11 ; /* 0x0000000e1b0e7223 */
/* 0x000fe4000000000b */
/*0920*/ ISETP.NE.OR P0, PT, R9, RZ, P0 ; /* 0x000000ff0900720c */
/* 0x000fda0000705670 */
/*0930*/ @!P0 BRA 0xad0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0940*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0950*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fe200078e0210 */
/*0960*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fe20008000f00 */
/*0970*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea8000c1e1900 */
/*0980*/ IMAD.WIDE R12, R10, 0x4, R12 ; /* 0x000000040a0c7825 */
/* 0x000fc800078e020c */
/*0990*/ IMAD.WIDE R20, R4.reuse, 0x4, R18 ; /* 0x0000000404147825 */
/* 0x040fe200078e0212 */
/*09a0*/ LDG.E R11, [R12.64] ; /* 0x000000040c0b7981 */
/* 0x000ea8000c1e1900 */
/*09b0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*09c0*/ IMAD.WIDE R22, R4, 0x4, R20 ; /* 0x0000000404167825 */
/* 0x000fc600078e0214 */
/*09d0*/ LDG.E R15, [R12.64+0x4] ; /* 0x000004040c0f7981 */
/* 0x000ee8000c1e1900 */
/*09e0*/ LDG.E R24, [R20.64] ; /* 0x0000000414187981 */
/* 0x000f28000c1e1900 */
/*09f0*/ LDG.E R25, [R12.64+0x8] ; /* 0x000008040c197981 */
/* 0x000f28000c1e1900 */
/*0a00*/ LDG.E R27, [R12.64+0xc] ; /* 0x00000c040c1b7981 */
/* 0x000f68000c1e1900 */
/*0a10*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000f62000c1e1900 */
/*0a20*/ IADD3 R9, R9, -0x4, RZ ; /* 0xfffffffc09097810 */
/* 0x000fc80007ffe0ff */
/*0a30*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*0a40*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a50*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fc60007ffe0ff */
/*0a60*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a70*/ FFMA R11, R17, R11, R14 ; /* 0x0000000b110b7223 */
/* 0x004fc8000000000e */
/*0a80*/ FFMA R11, R18, R15, R11 ; /* 0x0000000f120b7223 */
/* 0x008fe4000000000b */
/*0a90*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x000fc800078e0216 */
/*0aa0*/ FFMA R11, R24, R25, R11 ; /* 0x00000019180b7223 */
/* 0x010fc8000000000b */
/*0ab0*/ FFMA R14, R26, R27, R11 ; /* 0x0000001b1a0e7223 */
/* 0x020fe2000000000b */
/*0ac0*/ @P0 BRA 0x940 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0ad0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f05270 */
/*0ae0*/ @!P0 BRA 0xbf0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0af0*/ HFMA2.MMA R12, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0c7435 */
/* 0x000fe200000001ff */
/*0b00*/ LEA R9, R2, R5, 0x4 ; /* 0x0000000502097211 */
/* 0x006fe200078e20ff */
/*0b10*/ IMAD R3, R6, c[0x0][0x170], R3 ; /* 0x00005c0006037a24 */
/* 0x000fc800078e0203 */
/*0b20*/ IMAD R10, R9, c[0x0][0x170], R6 ; /* 0x00005c00090a7a24 */
/* 0x000fe200078e0206 */
/*0b30*/ LEA R3, R0, R3, 0x4 ; /* 0x0000000300037211 */
/* 0x000fc600078e20ff */
/*0b40*/ IMAD.WIDE R10, R10, R12, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fc800078e020c */
/*0b50*/ IMAD.WIDE R12, R3, R12, c[0x0][0x168] ; /* 0x00005a00030c7625 */
/* 0x000fca00078e020c */
/*0b60*/ LDG.E R3, [R12.64] ; /* 0x000000040c037981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R12, R4, 0x4, R12 ; /* 0x00000004040c7825 */
/* 0x001fe200078e020c */
/*0bb0*/ IADD3 R10, P1, R10, 0x4, RZ ; /* 0x000000040a0a7810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R11, RZ, R11, RZ, P1, !PT ; /* 0x0000000bff0b7210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ FFMA R14, R3, R0, R14 ; /* 0x00000000030e7223 */
/* 0x004fcc000000000e */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ SHF.L.U32 R4, R4, 0x4, RZ ; /* 0x0000000404047819 */
/* 0x000fe200000006ff */
/*0c00*/ IMAD R5, R5, c[0x0][0x170], RZ ; /* 0x00005c0005057a24 */
/* 0x004fe200078e02ff */
/*0c10*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fc60000000f00 */
/*0c20*/ IMAD R4, R4, R2, R5 ; /* 0x0000000204047224 */
/* 0x002fca00078e0205 */
/*0c30*/ IADD3 R2, R7, R4, RZ ; /* 0x0000000407027210 */
/* 0x000fca0007ffe0ff */
/*0c40*/ IMAD.WIDE R2, R2, R3, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fca00078e0203 */
/*0c50*/ STG.E [R2.64], R14 ; /* 0x0000000e02007986 */
/* 0x000fe2000c101904 */
/*0c60*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c70*/ BRA 0xc70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
cudaMalloc((void**)&adev, size);
cudaMalloc((void**)&bdev, size);
cudaMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float gpuTime = 0.0f;
cudaEventRecord(start, 0);
cudaMemcpy(adev, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bdev, b, size, cudaMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
cudaMemcpy(c, cdev, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(adev);
cudaFree(bdev);
cudaFree(cdev);
delete a;
delete b;
delete c;
return 0;
} | .file "tmpxft_00086c40_00000000-6_mult.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3804:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7cpuMultPfS_iS_
.type _Z7cpuMultPfS_iS_, @function
_Z7cpuMultPfS_iS_:
.LFB3800:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L11
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rsi, %rbx
movl %edx, %ebp
movq %rcx, %r9
movslq %edx, %r11
leaq 0(,%r11,4), %rcx
movq %rdi, %r10
leaq (%rdi,%rcx), %rsi
movl $0, %r12d
.L5:
movq %rbx, %r8
movl $0, %edi
.L8:
movq %r8, %rdx
movq %r10, %rax
pxor %xmm1, %xmm1
.L6:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rcx, %rdx
cmpq %rsi, %rax
jne .L6
movss %xmm1, (%r9,%rdi,4)
addq $1, %rdi
addq $4, %r8
cmpq %r11, %rdi
jne .L8
addl $1, %r12d
addq %rcx, %r9
addq %rcx, %r10
addq %rcx, %rsi
cmpl %r12d, %ebp
jne .L5
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3800:
.size _Z7cpuMultPfS_iS_, .-_Z7cpuMultPfS_iS_
.globl _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
.type _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_, @function
_Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_:
.LFB3826:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7gpuMultPfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3826:
.size _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_, .-_Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
.globl _Z7gpuMultPfS_iS_
.type _Z7gpuMultPfS_iS_, @function
_Z7gpuMultPfS_iS_:
.LFB3827:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3827:
.size _Z7gpuMultPfS_iS_, .-_Z7gpuMultPfS_iS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Time CPU: "
.LC4:
.string " ms\n"
.LC5:
.string "Time GPU: "
.text
.globl main
.type main, @function
main:
.LFB3801:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movq %rbp, %rdi
movq %rbx, %rsi
movl $0, %ecx
.L23:
movl $0, %eax
.L24:
leal (%rcx,%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rdi,%rax,4)
movss %xmm0, (%rsi,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L24
addl $1024, %ecx
addq $4096, %rdi
addq $4096, %rsi
cmpl $1048576, %ecx
jne .L23
call clock@PLT
movq %rax, %r13
movq %r12, %rcx
movl $1024, %edx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z7cpuMultPfS_iS_
call clock@PLT
movq %rax, %r14
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %rax
subq %r13, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
mulsd .LC3(%rip), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $1, 56(%rsp)
movl $64, 60(%rsp)
movl $64, 64(%rsp)
movl $1, 68(%rsp)
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0x00000000, 4(%rsp)
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 48(%rsp)
movl $16, 52(%rsp)
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 4(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $4, %esi
movq %rbp, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %r12, %rdi
call _ZdlPvm@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movq 24(%rsp), %rcx
movl $1024, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
jmp .L26
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z7gpuMultPfS_iS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3829:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z7gpuMultPfS_iS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3829:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1093567616
.align 8
.LC3:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
cudaMalloc((void**)&adev, size);
cudaMalloc((void**)&bdev, size);
cudaMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float gpuTime = 0.0f;
cudaEventRecord(start, 0);
cudaMemcpy(adev, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(bdev, b, size, cudaMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
cudaMemcpy(c, cdev, size, cudaMemcpyDeviceToHost);
cudaEventRecord( stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(adev);
cudaFree(bdev);
cudaFree(cdev);
delete a;
delete b;
delete c;
return 0;
} | #include <hip/hip_runtime.h>
#include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
hipMalloc((void**)&adev, size);
hipMalloc((void**)&bdev, size);
hipMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float gpuTime = 0.0f;
hipEventRecord(start, 0);
hipMemcpy(adev, a, size, hipMemcpyHostToDevice);
hipMemcpy(bdev, b, size, hipMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
hipMemcpy(c, cdev, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(adev);
hipFree(bdev);
hipFree(cdev);
delete a;
delete b;
delete c;
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
hipMalloc((void**)&adev, size);
hipMalloc((void**)&bdev, size);
hipMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float gpuTime = 0.0f;
hipEventRecord(start, 0);
hipMemcpy(adev, a, size, hipMemcpyHostToDevice);
hipMemcpy(bdev, b, size, hipMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
hipMemcpy(c, cdev, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(adev);
hipFree(bdev);
hipFree(cdev);
delete a;
delete b;
delete c;
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7gpuMultPfS_iS_
.globl _Z7gpuMultPfS_iS_
.p2align 8
.type _Z7gpuMultPfS_iS_,@function
_Z7gpuMultPfS_iS_:
s_load_b32 s3, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_lshl_b32 s2, s14, 4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v5, s2, v0
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v4, v1, s3
s_mul_i32 s4, s3, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b32 s2, s4, 4
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_3
s_load_b128 s[4:7], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v0, s2, v4
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v2, v5
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_mov_b32 s4, s3
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s4, s4, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s4, 0
v_lshlrev_b64 v[7:8], 2, v[2:3]
v_add_nc_u32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
global_load_b32 v3, v[0:1], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v0, vcc_lo, v0, 4
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v3, v7
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v6, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v5, v4, s2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7gpuMultPfS_iS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7gpuMultPfS_iS_, .Lfunc_end0-_Z7gpuMultPfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7gpuMultPfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7gpuMultPfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "iostream"
#include <fstream>
#include <string>
#define N 1024 // size of matrix is N*N
#define BLOCK_SIZE 16
__global__ void gpuMult ( float * a, float * b, int n, float * c ) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float result = 0.0f;
int idxA = n * BLOCK_SIZE * by + n * ty;
int idxB = BLOCK_SIZE * bx + tx;
for ( int idx = 0; idx < n; idx++ )
result += a [idxA + idx] * b [idxB + idx*n];
int idxC = n * BLOCK_SIZE * by + BLOCK_SIZE * bx;
c [idxC + n * ty + tx] = result;
}
void cpuMult( float * a, float * b, int n, float * c ) {
for (int rowIdxC = 0; rowIdxC < n; rowIdxC++) {
for (int colIdxC = 0; colIdxC < n; colIdxC++) {
float resultC = 0.0f;
for (int idx = 0; idx < n; idx++) {
int idxA = (rowIdxC * n) + idx;
int idxB = colIdxC + (idx * n);
resultC += a[idxA] * b[idxB];
}
int idxC = rowIdxC * n + colIdxC;
c[idxC] = resultC;
}
}
}
int main() {
float * a = new float [N*N];
float * b = new float [N*N];
float * c = new float [N*N];
for ( int i = 0; i < N; i++ ) {
for ( int j = 0; j < N; j++ ) {
int k = N * i + j;
a [k] = k;
b [k] = k;
}
}
//CPU-------------------------------------
clock_t start_s = clock();
cpuMult(a, b, N, c);
clock_t stop_s = clock();
std::cout << "Time CPU: " << (stop_s - start_s) / double(CLOCKS_PER_SEC) * 1000 << " ms\n";
//GPU-------------------------------------
int size = N * N * sizeof(float);
float * adev = NULL;
float * bdev = NULL;
float * cdev = NULL;
hipMalloc((void**)&adev, size);
hipMalloc((void**)&bdev, size);
hipMalloc((void**)&cdev, size);
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 blocks(N / threads.x, N / threads.y);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float gpuTime = 0.0f;
hipEventRecord(start, 0);
hipMemcpy(adev, a, size, hipMemcpyHostToDevice);
hipMemcpy(bdev, b, size, hipMemcpyHostToDevice);
gpuMult<<<blocks, threads>>>(adev, bdev, N, cdev);
hipMemcpy(c, cdev, size, hipMemcpyDeviceToHost);
hipEventRecord( stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpuTime, start, stop);
std::cout << "Time GPU: " << gpuTime << " ms\n";
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(adev);
hipFree(bdev);
hipFree(cdev);
delete a;
delete b;
delete c;
return 0;
} | .text
.file "mult.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z22__device_stub__gpuMultPfS_iS_ # -- Begin function _Z22__device_stub__gpuMultPfS_iS_
.p2align 4, 0x90
.type _Z22__device_stub__gpuMultPfS_iS_,@function
_Z22__device_stub__gpuMultPfS_iS_: # @_Z22__device_stub__gpuMultPfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7gpuMultPfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__gpuMultPfS_iS_, .Lfunc_end0-_Z22__device_stub__gpuMultPfS_iS_
.cfi_endproc
# -- End function
.globl _Z7cpuMultPfS_iS_ # -- Begin function _Z7cpuMultPfS_iS_
.p2align 4, 0x90
.type _Z7cpuMultPfS_iS_,@function
_Z7cpuMultPfS_iS_: # @_Z7cpuMultPfS_iS_
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_8
# %bb.1: # %.preheader29.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %eax
leaq (,%rax,4), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB1_2: # %.preheader29
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
# Child Loop BB1_4 Depth 3
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
leaq (%rcx,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.preheader
# Parent Loop BB1_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_4 Depth 3
xorps %xmm0, %xmm0
movq %r14, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# Parent Loop BB1_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r12), %xmm1
addss %xmm1, %xmm0
incq %r13
addq %r8, %r12
cmpq %r13, %rax
jne .LBB1_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_3 Depth=2
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB1_3
# %bb.6: # %._crit_edge33
# in Loop: Header=BB1_2 Depth=1
incq %r10
addl %edx, %r9d
cmpq %rax, %r10
jne .LBB1_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB1_8: # %._crit_edge35
retq
.Lfunc_end1:
.size _Z7cpuMultPfS_iS_, .Lfunc_end1-_Z7cpuMultPfS_iS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI2_1:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
movl $1024, %edx # imm = 0x400
movq %rax, %rsi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm0, %xmm0
cvtsi2ss %esi, %xmm0
movss %xmm0, (%rbx,%rsi,4)
movss %xmm0, (%r14,%rsi,4)
incq %rsi
decq %rdx
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rcx
addq $1024, %rax # imm = 0x400
cmpq $1024, %rcx # imm = 0x400
jne .LBB2_1
# %bb.4:
xorl %r13d, %r13d
callq clock
movq %rax, %r12
movq %rbx, %rax
.p2align 4, 0x90
.LBB2_5: # %.preheader29.i
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
# Child Loop BB2_7 Depth 3
movq %r13, %rcx
shlq $12, %rcx
addq %r15, %rcx
movq %r14, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_6: # %.preheader.i
# Parent Loop BB2_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_7 Depth 3
xorps %xmm0, %xmm0
movq %rdx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB2_7: # Parent Loop BB2_5 Depth=1
# Parent Loop BB2_6 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rax,%r8,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rdi), %xmm1
addss %xmm1, %xmm0
incq %r8
addq $4096, %rdi # imm = 0x1000
cmpq $1024, %r8 # imm = 0x400
jne .LBB2_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB2_6 Depth=2
movss %xmm0, (%rcx,%rsi,4)
incq %rsi
addq $4, %rdx
cmpq $1024, %rsi # imm = 0x400
jne .LBB2_6
# %bb.9: # %._crit_edge33.i
# in Loop: Header=BB2_5 Depth=1
incq %r13
addq $4096, %rax # imm = 0x1000
cmpq $1024, %r13 # imm = 0x400
jne .LBB2_5
# %bb.10: # %_Z7cpuMultPfS_iS_.exit
callq clock
movq %rax, %r13
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r12, %r13
xorps %xmm0, %xmm0
cvtsi2sd %r13, %xmm0
divsd .LCPI2_0(%rip), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq $0, 32(%rsp)
movq $0, 24(%rsp)
movq $0, 16(%rsp)
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movl $0, 4(%rsp)
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 32(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $274877907008, %rdi # imm = 0x4000000040
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl $1024, 52(%rsp) # imm = 0x400
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 104(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7gpuMultPfS_iS_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
movq 16(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 40(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 40(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq _ZdlPv
movq %r14, %rdi
callq _ZdlPv
movq %r15, %rdi
callq _ZdlPv
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7gpuMultPfS_iS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7gpuMultPfS_iS_,@object # @_Z7gpuMultPfS_iS_
.section .rodata,"a",@progbits
.globl _Z7gpuMultPfS_iS_
.p2align 3, 0x0
_Z7gpuMultPfS_iS_:
.quad _Z22__device_stub__gpuMultPfS_iS_
.size _Z7gpuMultPfS_iS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Time CPU: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " ms\n"
.size .L.str.1, 5
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Time GPU: "
.size .L.str.2, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7gpuMultPfS_iS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__gpuMultPfS_iS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7gpuMultPfS_iS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z7gpuMultPfS_iS_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e220000002500 */
/*0020*/ MOV R4, c[0x0][0x170] ; /* 0x00005c0000047a02 */
/* 0x000fe20000000f00 */
/*0030*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*0040*/ HFMA2.MMA R14, -RZ, RZ, 0, 0 ; /* 0x00000000ff0e7435 */
/* 0x000fe200000001ff */
/*0050*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e220000002100 */
/*0060*/ ISETP.GE.AND P0, PT, R4, 0x1, PT ; /* 0x000000010400780c */
/* 0x000fc60003f06270 */
/*0070*/ S2R R2, SR_CTAID.Y ; /* 0x0000000000027919 */
/* 0x000e680000002600 */
/*0080*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000ea20000002200 */
/*0090*/ LEA R7, R0, R3, 0x4 ; /* 0x0000000300077211 */
/* 0x001fca00078e20ff */
/*00a0*/ @!P0 BRA 0xbf0 ; /* 0x00000b4000008947 */
/* 0x000fea0003800000 */
/*00b0*/ IADD3 R6, R4.reuse, -0x1, RZ ; /* 0xffffffff04067810 */
/* 0x040fe40007ffe0ff */
/*00c0*/ LOP3.LUT R8, R4, 0x3, RZ, 0xc0, !PT ; /* 0x0000000304087812 */
/* 0x000fe400078ec0ff */
/*00d0*/ ISETP.GE.U32.AND P0, PT, R6, 0x3, PT ; /* 0x000000030600780c */
/* 0x000fe40003f06070 */
/*00e0*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x000fe40000000f00 */
/*00f0*/ MOV R6, RZ ; /* 0x000000ff00067202 */
/* 0x000fd20000000f00 */
/*0100*/ @!P0 BRA 0xad0 ; /* 0x000009c000008947 */
/* 0x000fea0003800000 */
/*0110*/ IADD3 R9, -R8, c[0x0][0x170], RZ ; /* 0x00005c0008097a10 */
/* 0x000fe20007ffe1ff */
/*0120*/ HFMA2.MMA R16, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff107435 */
/* 0x000fe200000001ff */
/*0130*/ LEA R10, R2, R5, 0x4 ; /* 0x00000005020a7211 */
/* 0x006fe200078e20ff */
/*0140*/ ULDC.64 UR6, c[0x0][0x160] ; /* 0x0000580000067ab9 */
/* 0x000fe20000000a00 */
/*0150*/ ISETP.GT.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f04270 */
/*0160*/ HFMA2.MMA R6, -RZ, RZ, 0, 0 ; /* 0x00000000ff067435 */
/* 0x000fe200000001ff */
/*0170*/ MOV R14, RZ ; /* 0x000000ff000e7202 */
/* 0x000fe20000000f00 */
/*0180*/ IMAD R10, R10, c[0x0][0x170], RZ ; /* 0x00005c000a0a7a24 */
/* 0x000fc800078e02ff */
/*0190*/ IMAD.WIDE R16, R7, R16, c[0x0][0x168] ; /* 0x00005a0007107625 */
/* 0x000fcc00078e0210 */
/*01a0*/ @!P0 BRA 0x940 ; /* 0x0000079000008947 */
/* 0x000fea0003800000 */
/*01b0*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe40003f24270 */
/*01c0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x80, 0x0 ; /* 0x000000000000781c */
/* 0x000fd60003f0f070 */
/*01d0*/ @!P1 BRA 0x680 ; /* 0x000004a000009947 */
/* 0x000fea0003800000 */
/*01e0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe40003f0e170 */
/*01f0*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0200*/ IMAD.WIDE R28, R4, 0x4, R16 ; /* 0x00000004041c7825 */
/* 0x000fe200078e0210 */
/*0210*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fe20008000f00 */
/*0220*/ LDG.E R25, [R16.64] ; /* 0x0000000410197981 */
/* 0x0000a8000c1e1900 */
/*0230*/ IMAD.WIDE R12, R10, 0x4, R12 ; /* 0x000000040a0c7825 */
/* 0x000fe200078e020c */
/*0240*/ LDG.E R15, [R28.64] ; /* 0x000000041c0f7981 */
/* 0x0002e8000c1e1900 */
/*0250*/ LDG.E R23, [R12.64] ; /* 0x000000040c177981 */
/* 0x000ea8000c1e1900 */
/*0260*/ LDG.E R24, [R12.64+0x4] ; /* 0x000004040c187981 */
/* 0x000ee2000c1e1900 */
/*0270*/ IMAD.WIDE R28, R4, 0x4, R28 ; /* 0x00000004041c7825 */
/* 0x002fc600078e021c */
/*0280*/ LDG.E R20, [R12.64+0x8] ; /* 0x000008040c147981 */
/* 0x000f28000c1e1900 */
/*0290*/ LDG.E R11, [R28.64] ; /* 0x000000041c0b7981 */
/* 0x000322000c1e1900 */
/*02a0*/ IMAD.WIDE R18, R4, 0x4, R28 ; /* 0x0000000404127825 */
/* 0x000fc600078e021c */
/*02b0*/ LDG.E R22, [R12.64+0xc] ; /* 0x00000c040c167981 */
/* 0x000f66000c1e1900 */
/*02c0*/ IMAD.WIDE R26, R4.reuse, 0x4, R18 ; /* 0x00000004041a7825 */
/* 0x040fe200078e0212 */
/*02d0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */
/* 0x000368000c1e1900 */
/*02e0*/ LDG.E R16, [R12.64+0x10] ; /* 0x000010040c107981 */
/* 0x001f68000c1e1900 */
/*02f0*/ LDG.E R17, [R26.64] ; /* 0x000000041a117981 */
/* 0x000164000c1e1900 */
/*0300*/ IMAD.WIDE R26, R4, 0x4, R26 ; /* 0x00000004041a7825 */
/* 0x001fcc00078e021a */
/*0310*/ IMAD.WIDE R28, R4, 0x4, R26 ; /* 0x00000004041c7825 */
/* 0x002fcc00078e021a */
/*0320*/ IMAD.WIDE R18, R4, 0x4, R28 ; /* 0x0000000404127825 */
/* 0x000fc800078e021c */
/*0330*/ FFMA R23, R25, R23, R14 ; /* 0x0000001719177223 */
/* 0x004fe4000000000e */
/*0340*/ LDG.E R14, [R26.64] ; /* 0x000000041a0e7981 */
/* 0x000aa4000c1e1900 */
/*0350*/ FFMA R23, R15, R24, R23 ; /* 0x000000180f177223 */
/* 0x008fe40000000017 */
/*0360*/ LDG.E R15, [R12.64+0x14] ; /* 0x000014040c0f7981 */
/* 0x000ea8000c1e1900 */
/*0370*/ LDG.E R24, [R28.64] ; /* 0x000000041c187981 */
/* 0x0000e2000c1e1900 */
/*0380*/ FFMA R23, R11, R20, R23 ; /* 0x000000140b177223 */
/* 0x010fc60000000017 */
/*0390*/ LDG.E R25, [R12.64+0x18] ; /* 0x000018040c197981 */
/* 0x000ee8000c1e1900 */
/*03a0*/ LDG.E R20, [R18.64] ; /* 0x0000000412147981 */
/* 0x000322000c1e1900 */
/*03b0*/ FFMA R27, R21, R22, R23 ; /* 0x00000016151b7223 */
/* 0x020fc60000000017 */
/*03c0*/ LDG.E R11, [R12.64+0x1c] ; /* 0x00001c040c0b7981 */
/* 0x000f28000c1e1900 */
/*03d0*/ LDG.E R26, [R12.64+0x20] ; /* 0x000020040c1a7981 */
/* 0x000f62000c1e1900 */
/*03e0*/ IMAD.WIDE R18, R4, 0x4, R18 ; /* 0x0000000404127825 */
/* 0x002fca00078e0212 */
/*03f0*/ LDG.E R21, [R18.64] ; /* 0x0000000412157981 */
/* 0x000562000c1e1900 */
/*0400*/ IMAD.WIDE R22, R4, 0x4, R18 ; /* 0x0000000404167825 */
/* 0x000fc800078e0212 */
/*0410*/ FFMA R29, R17, R16, R27 ; /* 0x00000010111d7223 */
/* 0x001fe2000000001b */
/*0420*/ LDG.E R28, [R22.64] ; /* 0x00000004161c7981 */
/* 0x000168000c1e1900 */
/*0430*/ LDG.E R27, [R12.64+0x24] ; /* 0x000024040c1b7981 */
/* 0x000f62000c1e1900 */
/*0440*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x000fc800078e0216 */
/*0450*/ FFMA R18, R14, R15, R29 ; /* 0x0000000f0e127223 */
/* 0x004fe4000000001d */
/*0460*/ IMAD.WIDE R14, R4, 0x4, R16 ; /* 0x00000004040e7825 */
/* 0x000fe200078e0210 */
/*0470*/ LDG.E R29, [R16.64] ; /* 0x00000004101d7981 */
/* 0x0002a6000c1e1900 */
/*0480*/ FFMA R25, R24, R25, R18 ; /* 0x0000001918197223 */
/* 0x008fe40000000012 */
/*0490*/ IMAD.WIDE R18, R4, 0x4, R14 ; /* 0x0000000404127825 */
/* 0x000fe200078e020e */
/*04a0*/ LDG.E R24, [R12.64+0x28] ; /* 0x000028040c187981 */
/* 0x000ea8000c1e1900 */
/*04b0*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x0006a2000c1e1900 */
/*04c0*/ FFMA R20, R20, R11, R25 ; /* 0x0000000b14147223 */
/* 0x010fc40000000019 */
/*04d0*/ IMAD.WIDE R22, R4.reuse, 0x4, R18 ; /* 0x0000000404167825 */
/* 0x041fe200078e0212 */
/*04e0*/ LDG.E R25, [R12.64+0x2c] ; /* 0x00002c040c197981 */
/* 0x000f28000c1e1900 */
/*04f0*/ LDG.E R11, [R18.64] ; /* 0x00000004120b7981 */
/* 0x000122000c1e1900 */
/*0500*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x002fc800078e0216 */
/*0510*/ FFMA R20, R21, R26, R20 ; /* 0x0000001a15147223 */
/* 0x020fe20000000014 */
/*0520*/ LDG.E R15, [R16.64] ; /* 0x00000004100f7981 */
/* 0x0082e8000c1e1900 */
/*0530*/ LDG.E R18, [R12.64+0x30] ; /* 0x000030040c127981 */
/* 0x001f68000c1e1900 */
/*0540*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x0000e4000c1e1900 */
/*0550*/ FFMA R22, R28, R27, R20 ; /* 0x0000001b1c167223 */
/* 0x001fc40000000014 */
/*0560*/ LDG.E R27, [R12.64+0x34] ; /* 0x000034040c1b7981 */
/* 0x000ee2000c1e1900 */
/*0570*/ IMAD.WIDE R20, R4, 0x4, R16 ; /* 0x0000000404147825 */
/* 0x000fc600078e0210 */
/*0580*/ LDG.E R28, [R12.64+0x38] ; /* 0x000038040c1c7981 */
/* 0x000ee8000c1e1900 */
/*0590*/ LDG.E R16, [R12.64+0x3c] ; /* 0x00003c040c107981 */
/* 0x002ee8000c1e1900 */
/*05a0*/ LDG.E R19, [R20.64] ; /* 0x0000000414137981 */
/* 0x000ee2000c1e1900 */
/*05b0*/ IADD3 R9, R9, -0x10, RZ ; /* 0xfffffff009097810 */
/* 0x000fc80007ffe0ff */
/*05c0*/ ISETP.GT.AND P1, PT, R9, 0xc, PT ; /* 0x0000000c0900780c */
/* 0x000fe20003f24270 */
/*05d0*/ UIADD3 UR6, UP0, UR6, 0x40, URZ ; /* 0x0000004006067890 */
/* 0x000fe2000ff1e03f */
/*05e0*/ IADD3 R6, R6, 0x10, RZ ; /* 0x0000001006067810 */
/* 0x000fc60007ffe0ff */
/*05f0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0600*/ FFMA R24, R29, R24, R22 ; /* 0x000000181d187223 */
/* 0x004fc80000000016 */
/*0610*/ FFMA R14, R14, R25, R24 ; /* 0x000000190e0e7223 */
/* 0x010fc80000000018 */
/*0620*/ FFMA R11, R11, R18, R14 ; /* 0x000000120b0b7223 */
/* 0x020fc8000000000e */
/*0630*/ FFMA R11, R26, R27, R11 ; /* 0x0000001b1a0b7223 */
/* 0x008fc8000000000b */
/*0640*/ FFMA R11, R15, R28, R11 ; /* 0x0000001c0f0b7223 */
/* 0x000fc8000000000b */
/*0650*/ FFMA R14, R19, R16, R11 ; /* 0x00000010130e7223 */
/* 0x000fe4000000000b */
/*0660*/ IMAD.WIDE R16, R4, 0x4, R20 ; /* 0x0000000404107825 */
/* 0x000fe200078e0214 */
/*0670*/ @P1 BRA 0x1f0 ; /* 0xfffffb7000001947 */
/* 0x000fea000383ffff */
/*0680*/ ISETP.GT.AND P1, PT, R9, 0x4, PT ; /* 0x000000040900780c */
/* 0x000fda0003f24270 */
/*0690*/ @!P1 BRA 0x920 ; /* 0x0000028000009947 */
/* 0x000fea0003800000 */
/*06a0*/ MOV R20, UR6 ; /* 0x0000000600147c02 */
/* 0x000fe40008000f00 */
/*06b0*/ MOV R21, UR7 ; /* 0x0000000700157c02 */
/* 0x000fe20008000f00 */
/*06c0*/ IMAD.WIDE R22, R4, 0x4, R16 ; /* 0x0000000404167825 */
/* 0x000fe200078e0210 */
/*06d0*/ LDG.E R11, [R16.64] ; /* 0x00000004100b7981 */
/* 0x0000a6000c1e1900 */
/*06e0*/ IMAD.WIDE R20, R10, 0x4, R20 ; /* 0x000000040a147825 */
/* 0x000fe200078e0214 */
/*06f0*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x0002e8000c1e1900 */
/*0700*/ LDG.E R15, [R20.64] ; /* 0x00000004140f7981 */
/* 0x000ea8000c1e1900 */
/*0710*/ LDG.E R27, [R20.64+0x4] ; /* 0x00000404141b7981 */
/* 0x000ee2000c1e1900 */
/*0720*/ IMAD.WIDE R12, R4, 0x4, R22 ; /* 0x00000004040c7825 */
/* 0x000fc600078e0216 */
/*0730*/ LDG.E R29, [R20.64+0x8] ; /* 0x00000804141d7981 */
/* 0x000f26000c1e1900 */
/*0740*/ IMAD.WIDE R16, R4.reuse, 0x4, R12 ; /* 0x0000000404107825 */
/* 0x041fe200078e020c */
/*0750*/ LDG.E R22, [R20.64+0x10] ; /* 0x0000100414167981 */
/* 0x002f68000c1e1900 */
/*0760*/ LDG.E R12, [R12.64] ; /* 0x000000040c0c7981 */
/* 0x000122000c1e1900 */
/*0770*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fc600078e0210 */
/*0780*/ LDG.E R28, [R16.64] ; /* 0x00000004101c7981 */
/* 0x000326000c1e1900 */
/*0790*/ IMAD.WIDE R24, R4, 0x4, R18 ; /* 0x0000000404187825 */
/* 0x000fe200078e0212 */
/*07a0*/ LDG.E R13, [R18.64] ; /* 0x00000004120d7981 */
/* 0x001168000c1e1900 */
/*07b0*/ LDG.E R19, [R20.64+0xc] ; /* 0x00000c0414137981 */
/* 0x001f68000c1e1900 */
/*07c0*/ LDG.E R18, [R20.64+0x18] ; /* 0x0000180414127981 */
/* 0x000f62000c1e1900 */
/*07d0*/ FFMA R23, R11, R15, R14 ; /* 0x0000000f0b177223 */
/* 0x004fc4000000000e */
/*07e0*/ IMAD.WIDE R14, R4.reuse, 0x4, R24 ; /* 0x00000004040e7825 */
/* 0x040fe200078e0218 */
/*07f0*/ LDG.E R11, [R24.64] ; /* 0x00000004180b7981 */
/* 0x0006aa000c1e1900 */
/*0800*/ IMAD.WIDE R16, R4, 0x4, R14 ; /* 0x0000000404107825 */
/* 0x002fc800078e020e */
/*0810*/ FFMA R24, R26, R27, R23 ; /* 0x0000001b1a187223 */
/* 0x008fe40000000017 */
/*0820*/ LDG.E R26, [R20.64+0x14] ; /* 0x00001404141a7981 */
/* 0x000ea8000c1e1900 */
/*0830*/ LDG.E R23, [R14.64] ; /* 0x000000040e177981 */
/* 0x0000e8000c1e1900 */
/*0840*/ LDG.E R27, [R16.64] ; /* 0x00000004101b7981 */
/* 0x0002e8000c1e1900 */
/*0850*/ LDG.E R14, [R20.64+0x1c] ; /* 0x00001c04140e7981 */
/* 0x001ee2000c1e1900 */
/*0860*/ FFMA R12, R12, R29, R24 ; /* 0x0000001d0c0c7223 */
/* 0x010fc80000000018 */
/*0870*/ FFMA R12, R28, R19, R12 ; /* 0x000000131c0c7223 */
/* 0x020fc8000000000c */
/*0880*/ FFMA R12, R13, R22, R12 ; /* 0x000000160d0c7223 */
/* 0x000fe2000000000c */
/*0890*/ UIADD3 UR6, UP0, UR6, 0x20, URZ ; /* 0x0000002006067890 */
/* 0x000fe2000ff1e03f */
/*08a0*/ PLOP3.LUT P0, PT, PT, PT, PT, 0x8, 0x0 ; /* 0x000000000000781c */
/* 0x000fe20003f0e170 */
/*08b0*/ IMAD.WIDE R16, R4, 0x4, R16 ; /* 0x0000000404107825 */
/* 0x002fe200078e0210 */
/*08c0*/ IADD3 R6, R6, 0x8, RZ ; /* 0x0000000806067810 */
/* 0x000fe40007ffe0ff */
/*08d0*/ IADD3 R9, R9, -0x8, RZ ; /* 0xfffffff809097810 */
/* 0x000fe20007ffe0ff */
/*08e0*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*08f0*/ FFMA R11, R11, R26, R12 ; /* 0x0000001a0b0b7223 */
/* 0x004fc8000000000c */
/*0900*/ FFMA R11, R23, R18, R11 ; /* 0x00000012170b7223 */
/* 0x008fc8000000000b */
/*0910*/ FFMA R14, R27, R14, R11 ; /* 0x0000000e1b0e7223 */
/* 0x000fe4000000000b */
/*0920*/ ISETP.NE.OR P0, PT, R9, RZ, P0 ; /* 0x000000ff0900720c */
/* 0x000fda0000705670 */
/*0930*/ @!P0 BRA 0xad0 ; /* 0x0000019000008947 */
/* 0x000fea0003800000 */
/*0940*/ MOV R12, UR6 ; /* 0x00000006000c7c02 */
/* 0x000fe20008000f00 */
/*0950*/ IMAD.WIDE R18, R4, 0x4, R16 ; /* 0x0000000404127825 */
/* 0x000fe200078e0210 */
/*0960*/ MOV R13, UR7 ; /* 0x00000007000d7c02 */
/* 0x000fe20008000f00 */
/*0970*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea8000c1e1900 */
/*0980*/ IMAD.WIDE R12, R10, 0x4, R12 ; /* 0x000000040a0c7825 */
/* 0x000fc800078e020c */
/*0990*/ IMAD.WIDE R20, R4.reuse, 0x4, R18 ; /* 0x0000000404147825 */
/* 0x040fe200078e0212 */
/*09a0*/ LDG.E R11, [R12.64] ; /* 0x000000040c0b7981 */
/* 0x000ea8000c1e1900 */
/*09b0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ee2000c1e1900 */
/*09c0*/ IMAD.WIDE R22, R4, 0x4, R20 ; /* 0x0000000404167825 */
/* 0x000fc600078e0214 */
/*09d0*/ LDG.E R15, [R12.64+0x4] ; /* 0x000004040c0f7981 */
/* 0x000ee8000c1e1900 */
/*09e0*/ LDG.E R24, [R20.64] ; /* 0x0000000414187981 */
/* 0x000f28000c1e1900 */
/*09f0*/ LDG.E R25, [R12.64+0x8] ; /* 0x000008040c197981 */
/* 0x000f28000c1e1900 */
/*0a00*/ LDG.E R27, [R12.64+0xc] ; /* 0x00000c040c1b7981 */
/* 0x000f68000c1e1900 */
/*0a10*/ LDG.E R26, [R22.64] ; /* 0x00000004161a7981 */
/* 0x000f62000c1e1900 */
/*0a20*/ IADD3 R9, R9, -0x4, RZ ; /* 0xfffffffc09097810 */
/* 0x000fc80007ffe0ff */
/*0a30*/ ISETP.NE.AND P0, PT, R9, RZ, PT ; /* 0x000000ff0900720c */
/* 0x000fe20003f05270 */
/*0a40*/ UIADD3 UR6, UP0, UR6, 0x10, URZ ; /* 0x0000001006067890 */
/* 0x000fe2000ff1e03f */
/*0a50*/ IADD3 R6, R6, 0x4, RZ ; /* 0x0000000406067810 */
/* 0x000fc60007ffe0ff */
/*0a60*/ UIADD3.X UR7, URZ, UR7, URZ, UP0, !UPT ; /* 0x000000073f077290 */
/* 0x000fe200087fe43f */
/*0a70*/ FFMA R11, R17, R11, R14 ; /* 0x0000000b110b7223 */
/* 0x004fc8000000000e */
/*0a80*/ FFMA R11, R18, R15, R11 ; /* 0x0000000f120b7223 */
/* 0x008fe4000000000b */
/*0a90*/ IMAD.WIDE R16, R4, 0x4, R22 ; /* 0x0000000404107825 */
/* 0x000fc800078e0216 */
/*0aa0*/ FFMA R11, R24, R25, R11 ; /* 0x00000019180b7223 */
/* 0x010fc8000000000b */
/*0ab0*/ FFMA R14, R26, R27, R11 ; /* 0x0000001b1a0e7223 */
/* 0x020fe2000000000b */
/*0ac0*/ @P0 BRA 0x940 ; /* 0xfffffe7000000947 */
/* 0x000fea000383ffff */
/*0ad0*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fda0003f05270 */
/*0ae0*/ @!P0 BRA 0xbf0 ; /* 0x0000010000008947 */
/* 0x000fea0003800000 */
/*0af0*/ HFMA2.MMA R12, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0c7435 */
/* 0x000fe200000001ff */
/*0b00*/ LEA R9, R2, R5, 0x4 ; /* 0x0000000502097211 */
/* 0x006fe200078e20ff */
/*0b10*/ IMAD R3, R6, c[0x0][0x170], R3 ; /* 0x00005c0006037a24 */
/* 0x000fc800078e0203 */
/*0b20*/ IMAD R10, R9, c[0x0][0x170], R6 ; /* 0x00005c00090a7a24 */
/* 0x000fe200078e0206 */
/*0b30*/ LEA R3, R0, R3, 0x4 ; /* 0x0000000300037211 */
/* 0x000fc600078e20ff */
/*0b40*/ IMAD.WIDE R10, R10, R12, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x000fc800078e020c */
/*0b50*/ IMAD.WIDE R12, R3, R12, c[0x0][0x168] ; /* 0x00005a00030c7625 */
/* 0x000fca00078e020c */
/*0b60*/ LDG.E R3, [R12.64] ; /* 0x000000040c037981 */
/* 0x0000a8000c1e1900 */
/*0b70*/ LDG.E R0, [R10.64] ; /* 0x000000040a007981 */
/* 0x0002a2000c1e1900 */
/*0b80*/ IADD3 R8, R8, -0x1, RZ ; /* 0xffffffff08087810 */
/* 0x000fc80007ffe0ff */
/*0b90*/ ISETP.NE.AND P0, PT, R8, RZ, PT ; /* 0x000000ff0800720c */
/* 0x000fe20003f05270 */
/*0ba0*/ IMAD.WIDE R12, R4, 0x4, R12 ; /* 0x00000004040c7825 */
/* 0x001fe200078e020c */
/*0bb0*/ IADD3 R10, P1, R10, 0x4, RZ ; /* 0x000000040a0a7810 */
/* 0x002fc80007f3e0ff */
/*0bc0*/ IADD3.X R11, RZ, R11, RZ, P1, !PT ; /* 0x0000000bff0b7210 */
/* 0x000fe20000ffe4ff */
/*0bd0*/ FFMA R14, R3, R0, R14 ; /* 0x00000000030e7223 */
/* 0x004fcc000000000e */
/*0be0*/ @P0 BRA 0xb60 ; /* 0xffffff7000000947 */
/* 0x000fea000383ffff */
/*0bf0*/ SHF.L.U32 R4, R4, 0x4, RZ ; /* 0x0000000404047819 */
/* 0x000fe200000006ff */
/*0c00*/ IMAD R5, R5, c[0x0][0x170], RZ ; /* 0x00005c0005057a24 */
/* 0x004fe200078e02ff */
/*0c10*/ MOV R3, 0x4 ; /* 0x0000000400037802 */
/* 0x000fc60000000f00 */
/*0c20*/ IMAD R4, R4, R2, R5 ; /* 0x0000000204047224 */
/* 0x002fca00078e0205 */
/*0c30*/ IADD3 R2, R7, R4, RZ ; /* 0x0000000407027210 */
/* 0x000fca0007ffe0ff */
/*0c40*/ IMAD.WIDE R2, R2, R3, c[0x0][0x178] ; /* 0x00005e0002027625 */
/* 0x000fca00078e0203 */
/*0c50*/ STG.E [R2.64], R14 ; /* 0x0000000e02007986 */
/* 0x000fe2000c101904 */
/*0c60*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0c70*/ BRA 0xc70; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0c80*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0c90*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ca0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cb0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cc0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cd0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0ce0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0cf0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z7gpuMultPfS_iS_
.globl _Z7gpuMultPfS_iS_
.p2align 8
.type _Z7gpuMultPfS_iS_,@function
_Z7gpuMultPfS_iS_:
s_load_b32 s3, s[0:1], 0x10
v_bfe_u32 v1, v0, 10, 10
v_and_b32_e32 v0, 0x3ff, v0
s_lshl_b32 s2, s14, 4
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_add_nc_u32_e32 v5, s2, v0
s_waitcnt lgkmcnt(0)
v_mul_lo_u32 v4, v1, s3
s_mul_i32 s4, s3, s15
s_delay_alu instid0(SALU_CYCLE_1)
s_lshl_b32 s2, s4, 4
s_cmp_lt_i32 s3, 1
s_cbranch_scc1 .LBB0_3
s_load_b128 s[4:7], s[0:1], 0x0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v0, s2, v4
v_mov_b32_e32 v6, 0
v_mov_b32_e32 v2, v5
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s4, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
s_mov_b32 s4, s3
.p2align 6
.LBB0_2:
v_ashrrev_i32_e32 v3, 31, v2
s_add_i32 s4, s4, -1
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
s_cmp_eq_u32 s4, 0
v_lshlrev_b64 v[7:8], 2, v[2:3]
v_add_nc_u32_e32 v2, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s6, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s7, v8, vcc_lo
global_load_b32 v3, v[0:1], off
global_load_b32 v7, v[7:8], off
v_add_co_u32 v0, vcc_lo, v0, 4
v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
s_waitcnt vmcnt(0)
v_fmac_f32_e32 v6, v3, v7
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_4
.LBB0_3:
v_mov_b32_e32 v6, 0
.LBB0_4:
s_load_b64 s[0:1], s[0:1], 0x18
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v5, v4, s2
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v6, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z7gpuMultPfS_iS_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 32
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z7gpuMultPfS_iS_, .Lfunc_end0-_Z7gpuMultPfS_iS_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 32
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z7gpuMultPfS_iS_
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z7gpuMultPfS_iS_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00086c40_00000000-6_mult.cudafe1.cpp"
.text
#APP
.globl _ZSt21ios_base_library_initv
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB3804:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3804:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7cpuMultPfS_iS_
.type _Z7cpuMultPfS_iS_, @function
_Z7cpuMultPfS_iS_:
.LFB3800:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L11
pushq %r12
.cfi_def_cfa_offset 16
.cfi_offset 12, -16
pushq %rbp
.cfi_def_cfa_offset 24
.cfi_offset 6, -24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset 3, -32
movq %rsi, %rbx
movl %edx, %ebp
movq %rcx, %r9
movslq %edx, %r11
leaq 0(,%r11,4), %rcx
movq %rdi, %r10
leaq (%rdi,%rcx), %rsi
movl $0, %r12d
.L5:
movq %rbx, %r8
movl $0, %edi
.L8:
movq %r8, %rdx
movq %r10, %rax
pxor %xmm1, %xmm1
.L6:
movss (%rax), %xmm0
mulss (%rdx), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
addq %rcx, %rdx
cmpq %rsi, %rax
jne .L6
movss %xmm1, (%r9,%rdi,4)
addq $1, %rdi
addq $4, %r8
cmpq %r11, %rdi
jne .L8
addl $1, %r12d
addq %rcx, %r9
addq %rcx, %r10
addq %rcx, %rsi
cmpl %r12d, %ebp
jne .L5
popq %rbx
.cfi_def_cfa_offset 24
popq %rbp
.cfi_def_cfa_offset 16
popq %r12
.cfi_def_cfa_offset 8
ret
.L11:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.cfi_endproc
.LFE3800:
.size _Z7cpuMultPfS_iS_, .-_Z7cpuMultPfS_iS_
.globl _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
.type _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_, @function
_Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_:
.LFB3826:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %rcx, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movq %rsp, %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L18
.L14:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L18:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z7gpuMultPfS_iS_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L14
.L19:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3826:
.size _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_, .-_Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
.globl _Z7gpuMultPfS_iS_
.type _Z7gpuMultPfS_iS_, @function
_Z7gpuMultPfS_iS_:
.LFB3827:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3827:
.size _Z7gpuMultPfS_iS_, .-_Z7gpuMultPfS_iS_
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "Time CPU: "
.LC4:
.string " ms\n"
.LC5:
.string "Time GPU: "
.text
.globl main
.type main, @function
main:
.LFB3801:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
subq $80, %rsp
.cfi_def_cfa_offset 128
movq %fs:40, %rax
movq %rax, 72(%rsp)
xorl %eax, %eax
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbp
movl $4194304, %edi
call _Znam@PLT
movq %rax, %rbx
movl $4194304, %edi
call _Znam@PLT
movq %rax, %r12
movq %rbp, %rdi
movq %rbx, %rsi
movl $0, %ecx
.L23:
movl $0, %eax
.L24:
leal (%rcx,%rax), %edx
pxor %xmm0, %xmm0
cvtsi2ssl %edx, %xmm0
movss %xmm0, (%rdi,%rax,4)
movss %xmm0, (%rsi,%rax,4)
addq $1, %rax
cmpq $1024, %rax
jne .L24
addl $1024, %ecx
addq $4096, %rdi
addq $4096, %rsi
cmpl $1048576, %ecx
jne .L23
call clock@PLT
movq %rax, %r13
movq %r12, %rcx
movl $1024, %edx
movq %rbx, %rsi
movq %rbp, %rdi
call _Z7cpuMultPfS_iS_
call clock@PLT
movq %rax, %r14
leaq .LC1(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
movq %r14, %rax
subq %r13, %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
divsd .LC2(%rip), %xmm0
mulsd .LC3(%rip), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq $0, 8(%rsp)
movq $0, 16(%rsp)
movq $0, 24(%rsp)
leaq 8(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 16(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
leaq 24(%rsp), %rdi
movl $4194304, %esi
call cudaMalloc@PLT
movl $1, 56(%rsp)
movl $64, 60(%rsp)
movl $64, 64(%rsp)
movl $1, 68(%rsp)
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
leaq 40(%rsp), %rdi
call cudaEventCreate@PLT
movl $0x00000000, 4(%rsp)
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $4194304, %edx
movq %rbx, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl $16, 48(%rsp)
movl $16, 52(%rsp)
movl 56(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 48(%rsp), %rdx
movq 60(%rsp), %rdi
movl 68(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L30
.L26:
movl $2, %ecx
movl $4194304, %edx
movq 24(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $0, %esi
movq 40(%rsp), %rdi
call cudaEventRecord@PLT
movq 40(%rsp), %rdi
call cudaEventSynchronize@PLT
leaq 4(%rsp), %rdi
movq 40(%rsp), %rdx
movq 32(%rsp), %rsi
call cudaEventElapsedTime@PLT
leaq .LC5(%rip), %rsi
leaq _ZSt4cout(%rip), %rdi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq %rax, %rdi
pxor %xmm0, %xmm0
cvtss2sd 4(%rsp), %xmm0
call _ZNSo9_M_insertIdEERSoT_@PLT
movq %rax, %rdi
leaq .LC4(%rip), %rsi
call _ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@PLT
movq 32(%rsp), %rdi
call cudaEventDestroy@PLT
movq 40(%rsp), %rdi
call cudaEventDestroy@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call cudaFree@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movl $4, %esi
movq %rbp, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %rbx, %rdi
call _ZdlPvm@PLT
movl $4, %esi
movq %r12, %rdi
call _ZdlPvm@PLT
movq 72(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $80, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L30:
.cfi_restore_state
movq 24(%rsp), %rcx
movl $1024, %edx
movq 16(%rsp), %rsi
movq 8(%rsp), %rdi
call _Z31__device_stub__Z7gpuMultPfS_iS_PfS_iS_
jmp .L26
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE3801:
.size main, .-main
.section .rodata.str1.1
.LC6:
.string "_Z7gpuMultPfS_iS_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB3829:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC6(%rip), %rdx
movq %rdx, %rcx
leaq _Z7gpuMultPfS_iS_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE3829:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC2:
.long 0
.long 1093567616
.align 8
.LC3:
.long 0
.long 1083129856
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "mult.hip"
# Start of file scope inline assembly
.globl _ZSt21ios_base_library_initv
# End of file scope inline assembly
.globl _Z22__device_stub__gpuMultPfS_iS_ # -- Begin function _Z22__device_stub__gpuMultPfS_iS_
.p2align 4, 0x90
.type _Z22__device_stub__gpuMultPfS_iS_,@function
_Z22__device_stub__gpuMultPfS_iS_: # @_Z22__device_stub__gpuMultPfS_iS_
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 4(%rsp)
movq %rcx, 56(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 4(%rsp), %rax
movq %rax, 96(%rsp)
leaq 56(%rsp), %rax
movq %rax, 104(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z7gpuMultPfS_iS_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z22__device_stub__gpuMultPfS_iS_, .Lfunc_end0-_Z22__device_stub__gpuMultPfS_iS_
.cfi_endproc
# -- End function
.globl _Z7cpuMultPfS_iS_ # -- Begin function _Z7cpuMultPfS_iS_
.p2align 4, 0x90
.type _Z7cpuMultPfS_iS_,@function
_Z7cpuMultPfS_iS_: # @_Z7cpuMultPfS_iS_
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB1_8
# %bb.1: # %.preheader29.lr.ph
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl %edx, %eax
leaq (,%rax,4), %r8
xorl %r9d, %r9d
xorl %r10d, %r10d
.p2align 4, 0x90
.LBB1_2: # %.preheader29
# =>This Loop Header: Depth=1
# Child Loop BB1_3 Depth 2
# Child Loop BB1_4 Depth 3
movl %r9d, %r11d
leaq (%rdi,%r11,4), %r11
movq %r10, %rbx
imulq %rax, %rbx
leaq (%rcx,%rbx,4), %rbx
movq %rsi, %r14
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_3: # %.preheader
# Parent Loop BB1_2 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB1_4 Depth 3
xorps %xmm0, %xmm0
movq %r14, %r12
xorl %r13d, %r13d
.p2align 4, 0x90
.LBB1_4: # Parent Loop BB1_2 Depth=1
# Parent Loop BB1_3 Depth=2
# => This Inner Loop Header: Depth=3
movss (%r11,%r13,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%r12), %xmm1
addss %xmm1, %xmm0
incq %r13
addq %r8, %r12
cmpq %r13, %rax
jne .LBB1_4
# %bb.5: # %._crit_edge
# in Loop: Header=BB1_3 Depth=2
movss %xmm0, (%rbx,%r15,4)
incq %r15
addq $4, %r14
cmpq %rax, %r15
jne .LBB1_3
# %bb.6: # %._crit_edge33
# in Loop: Header=BB1_2 Depth=1
incq %r10
addl %edx, %r9d
cmpq %rax, %r10
jne .LBB1_2
# %bb.7:
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r12
.cfi_restore %r13
.cfi_restore %r14
.cfi_restore %r15
.LBB1_8: # %._crit_edge35
retq
.Lfunc_end1:
.size _Z7cpuMultPfS_iS_, .Lfunc_end1-_Z7cpuMultPfS_iS_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI2_0:
.quad 0x412e848000000000 # double 1.0E+6
.LCPI2_1:
.quad 0x408f400000000000 # double 1000
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r13
.cfi_def_cfa_offset 32
pushq %r12
.cfi_def_cfa_offset 40
pushq %rbx
.cfi_def_cfa_offset 48
subq $160, %rsp
.cfi_def_cfa_offset 208
.cfi_offset %rbx, -48
.cfi_offset %r12, -40
.cfi_offset %r13, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %rbx
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r14
movl $4194304, %edi # imm = 0x400000
callq _Znam
movq %rax, %r15
xorl %eax, %eax
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB2_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB2_2 Depth 2
movl $1024, %edx # imm = 0x400
movq %rax, %rsi
.p2align 4, 0x90
.LBB2_2: # Parent Loop BB2_1 Depth=1
# => This Inner Loop Header: Depth=2
xorps %xmm0, %xmm0
cvtsi2ss %esi, %xmm0
movss %xmm0, (%rbx,%rsi,4)
movss %xmm0, (%r14,%rsi,4)
incq %rsi
decq %rdx
jne .LBB2_2
# %bb.3: # in Loop: Header=BB2_1 Depth=1
incq %rcx
addq $1024, %rax # imm = 0x400
cmpq $1024, %rcx # imm = 0x400
jne .LBB2_1
# %bb.4:
xorl %r13d, %r13d
callq clock
movq %rax, %r12
movq %rbx, %rax
.p2align 4, 0x90
.LBB2_5: # %.preheader29.i
# =>This Loop Header: Depth=1
# Child Loop BB2_6 Depth 2
# Child Loop BB2_7 Depth 3
movq %r13, %rcx
shlq $12, %rcx
addq %r15, %rcx
movq %r14, %rdx
xorl %esi, %esi
.p2align 4, 0x90
.LBB2_6: # %.preheader.i
# Parent Loop BB2_5 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB2_7 Depth 3
xorps %xmm0, %xmm0
movq %rdx, %rdi
xorl %r8d, %r8d
.p2align 4, 0x90
.LBB2_7: # Parent Loop BB2_5 Depth=1
# Parent Loop BB2_6 Depth=2
# => This Inner Loop Header: Depth=3
movss (%rax,%r8,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
mulss (%rdi), %xmm1
addss %xmm1, %xmm0
incq %r8
addq $4096, %rdi # imm = 0x1000
cmpq $1024, %r8 # imm = 0x400
jne .LBB2_7
# %bb.8: # %._crit_edge.i
# in Loop: Header=BB2_6 Depth=2
movss %xmm0, (%rcx,%rsi,4)
incq %rsi
addq $4, %rdx
cmpq $1024, %rsi # imm = 0x400
jne .LBB2_6
# %bb.9: # %._crit_edge33.i
# in Loop: Header=BB2_5 Depth=1
incq %r13
addq $4096, %rax # imm = 0x1000
cmpq $1024, %r13 # imm = 0x400
jne .LBB2_5
# %bb.10: # %_Z7cpuMultPfS_iS_.exit
callq clock
movq %rax, %r13
movl $_ZSt4cout, %edi
movl $.L.str, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
subq %r12, %r13
xorps %xmm0, %xmm0
cvtsi2sd %r13, %xmm0
divsd .LCPI2_0(%rip), %xmm0
mulsd .LCPI2_1(%rip), %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq $0, 32(%rsp)
movq $0, 24(%rsp)
movq $0, 16(%rsp)
leaq 32(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 24(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 16(%rsp), %rdi
movl $4194304, %esi # imm = 0x400000
callq hipMalloc
leaq 40(%rsp), %rdi
callq hipEventCreate
leaq 8(%rsp), %rdi
callq hipEventCreate
movl $0, 4(%rsp)
movq 40(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 32(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 24(%rsp), %rdi
movl $4194304, %edx # imm = 0x400000
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movabsq $274877907008, %rdi # imm = 0x4000000040
movabsq $68719476752, %rdx # imm = 0x1000000010
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB2_12
# %bb.11:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq 16(%rsp), %rdx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl $1024, 52(%rsp) # imm = 0x400
movq %rdx, 104(%rsp)
leaq 120(%rsp), %rax
movq %rax, 128(%rsp)
leaq 112(%rsp), %rax
movq %rax, 136(%rsp)
leaq 52(%rsp), %rax
movq %rax, 144(%rsp)
leaq 104(%rsp), %rax
movq %rax, 152(%rsp)
leaq 88(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 88(%rsp), %rsi
movl 96(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 128(%rsp), %r9
movl $_Z7gpuMultPfS_iS_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB2_12:
movq 16(%rsp), %rsi
movl $4194304, %edx # imm = 0x400000
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
movq 8(%rsp), %rdi
callq hipEventSynchronize
movq 40(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 4(%rsp), %rdi
callq hipEventElapsedTime
movl $_ZSt4cout, %edi
movl $.L.str.2, %esi
movl $10, %edx
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movss 4(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $_ZSt4cout, %edi
callq _ZNSo9_M_insertIdEERSoT_
movl $.L.str.1, %esi
movl $4, %edx
movq %rax, %rdi
callq _ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l
movq 40(%rsp), %rdi
callq hipEventDestroy
movq 8(%rsp), %rdi
callq hipEventDestroy
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq _ZdlPv
movq %r14, %rdi
callq _ZdlPv
movq %r15, %rdi
callq _ZdlPv
xorl %eax, %eax
addq $160, %rsp
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.Lfunc_end2:
.size main, .Lfunc_end2-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB3_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB3_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z7gpuMultPfS_iS_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end3:
.size __hip_module_ctor, .Lfunc_end3-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB4_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB4_2:
retq
.Lfunc_end4:
.size __hip_module_dtor, .Lfunc_end4-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z7gpuMultPfS_iS_,@object # @_Z7gpuMultPfS_iS_
.section .rodata,"a",@progbits
.globl _Z7gpuMultPfS_iS_
.p2align 3, 0x0
_Z7gpuMultPfS_iS_:
.quad _Z22__device_stub__gpuMultPfS_iS_
.size _Z7gpuMultPfS_iS_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Time CPU: "
.size .L.str, 11
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz " ms\n"
.size .L.str.1, 5
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "Time GPU: "
.size .L.str.2, 11
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z7gpuMultPfS_iS_"
.size .L__unnamed_1, 18
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z22__device_stub__gpuMultPfS_iS_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z7gpuMultPfS_iS_
.addrsig_sym _ZSt4cout
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
cudaMalloc(&h_a, size);
// Copy host vectors to device
cudaMemcpy( h_a, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, &dev, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
cudaMemcpy( &result,d_c, sizeof(int), cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
cudaFree(d_a);
//cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} | code for sm_80
Function : _Z6vecAddPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0080*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea6000c1e1900 */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ IMAD.IADD R7, R0, 0x1, R3 ; /* 0x0000000100077824 */
/* 0x004fca00078e0203 */
/*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
cudaMalloc(&h_a, size);
// Copy host vectors to device
cudaMemcpy( h_a, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, &dev, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
cudaMemcpy( &result,d_c, sizeof(int), cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
cudaFree(d_a);
//cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} | .file "tmpxft_000488c0_00000000-6_cuda_deviationvalue.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z6vecAddPiS_iPiS_i
.type _Z28__device_stub__Z6vecAddPiS_iPiS_i, @function
_Z28__device_stub__Z6vecAddPiS_iPiS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z6vecAddPiS_iPiS_i, .-_Z28__device_stub__Z6vecAddPiS_iPiS_i
.globl _Z6vecAddPiS_i
.type _Z6vecAddPiS_i, @function
_Z6vecAddPiS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z6vecAddPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6vecAddPiS_i, .-_Z6vecAddPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Input array"
.LC1:
.string "final result: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, 64(%rsp)
movl $1, 68(%rsp)
movl $2, 72(%rsp)
movl $3, 76(%rsp)
movl $4, 80(%rsp)
leaq 16(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $3, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 52(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
movl 52(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl $5, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z28__device_stub__Z6vecAddPiS_iPiS_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6vecAddPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
cudaMalloc(&h_a, size);
// Copy host vectors to device
cudaMemcpy( h_a, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, &dev, sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
cudaMemcpy( &result,d_c, sizeof(int), cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
cudaFree(d_a);
//cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
hipMalloc(&h_a, size);
// Copy host vectors to device
hipMemcpy( h_a, a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_c, &dev, sizeof(int), hipMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
hipMemcpy( &result,d_c, sizeof(int), hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
hipFree(d_a);
//cudaFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
hipMalloc(&h_a, size);
// Copy host vectors to device
hipMemcpy( h_a, a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_c, &dev, sizeof(int), hipMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
hipMemcpy( &result,d_c, sizeof(int), hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
hipFree(d_a);
//cudaFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_i
.globl _Z6vecAddPiS_i
.p2align 8
.type _Z6vecAddPiS_i,@function
_Z6vecAddPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_load_b32 s0, s[2:3], 0x0
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, s0, v0
global_store_b32 v1, v0, s[2:3]
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_i, .Lfunc_end0-_Z6vecAddPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(int *a, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=0;
// Make sure we do not go out of bounds
if (id < n)
*c+= a[id];
// printf("\n%d", c[id]);
}
int main( int argc, char* argv[] )
{
// Size of vectors
// int n = 100000;
int n=5;
const int size = n * sizeof(int);
// Host input vectors
int *h_a;
// double *h_b;
//Host output vector
int *h_c;
// Device input vectors
int *d_a;
//double *d_b;
//Device output vector
int *d_c;
int dev=0;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
//h_a = (int*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
// cudaMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
printf("Input array");
// Initialize vectors on host
/*for( i = 0; i < n; i++ ) {
// h_a[i] = sin(i)*sin(i);
//printf("\n",i);
h_a[i]=i;
//printf("\n%d", h_a[i]);
//h_b[i]=i;
//h_b[i] = cos(i)*cos(i);
}*/
int a[]= {0, 1, 2, 3, 4};
hipMalloc(&h_a, size);
// Copy host vectors to device
hipMemcpy( h_a, a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_c, &dev, sizeof(int), hipMemcpyHostToDevice);
// cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 2;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a,d_c,n);
int result;
// Copy array back to host
hipMemcpy( &result,d_c, sizeof(int), hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
//for(i=0; i<n; i++)
// sum += h_c[i];
printf("final result: %f\n",result );
// vecdev<<<gridSize, blockSize>>>(d_a,d_c, n);
// Release device memory
hipFree(d_a);
//cudaFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
return 0;
} | .text
.file "cuda_deviationvalue.hip"
.globl _Z21__device_stub__vecAddPiS_i # -- Begin function _Z21__device_stub__vecAddPiS_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_i,@function
_Z21__device_stub__vecAddPiS_i: # @_Z21__device_stub__vecAddPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPiS_i, .Lfunc_end0-_Z21__device_stub__vecAddPiS_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI1_0:
.long 0 # 0x0
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl $0, 8(%rsp)
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
movq %rsp, %rdi
movl $40, %esi
callq hipMalloc
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movaps .LCPI1_0(%rip), %xmm0 # xmm0 = [0,1,2,3]
movaps %xmm0, 128(%rsp)
movl $4, 144(%rsp)
leaq 24(%rsp), %rdi
movl $20, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 8(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967298, %rdx # imm = 0x100000002
leaq 1(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl $5, 12(%rsp)
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 12(%rsp), %rax
movq %rax, 48(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z6vecAddPiS_i, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 32(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPiS_i,@object # @_Z6vecAddPiS_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_i
.p2align 3, 0x0
_Z6vecAddPiS_i:
.quad _Z21__device_stub__vecAddPiS_i
.size _Z6vecAddPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Input array"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "final result: %f\n"
.size .L.str.1, 18
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_i"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6vecAddPiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R2, SR_CTAID.X ; /* 0x0000000000027919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R2, R2, c[0x0][0x0], R3 ; /* 0x0000000002027a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x170], PT ; /* 0x00005c0002007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R3, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff037435 */
/* 0x000fe200000001ff */
/*0070*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x168] ; /* 0x00005a00ff047624 */
/* 0x000fe200078e00ff */
/*0080*/ MOV R5, c[0x0][0x16c] ; /* 0x00005b0000057a02 */
/* 0x000fe20000000f00 */
/*0090*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fc80000000a00 */
/*00a0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea6000c1e1900 */
/*00b0*/ IMAD.WIDE R2, R2, R3, c[0x0][0x160] ; /* 0x0000580002027625 */
/* 0x000fcc00078e0203 */
/*00c0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea4000c1e1900 */
/*00d0*/ IMAD.IADD R7, R0, 0x1, R3 ; /* 0x0000000100077824 */
/* 0x004fca00078e0203 */
/*00e0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00f0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0100*/ BRA 0x100; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6vecAddPiS_i
.globl _Z6vecAddPiS_i
.p2align 8
.type _Z6vecAddPiS_i,@function
_Z6vecAddPiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v0, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_load_b32 s0, s[2:3], 0x0
global_load_b32 v0, v[0:1], off
s_waitcnt vmcnt(0) lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, s0, v0
global_store_b32 v1, v0, s[2:3]
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6vecAddPiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 3
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6vecAddPiS_i, .Lfunc_end0-_Z6vecAddPiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6vecAddPiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6vecAddPiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 3
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000488c0_00000000-6_cuda_deviationvalue.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z28__device_stub__Z6vecAddPiS_iPiS_i
.type _Z28__device_stub__Z6vecAddPiS_iPiS_i, @function
_Z28__device_stub__Z6vecAddPiS_iPiS_i:
.LFB2082:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6vecAddPiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z28__device_stub__Z6vecAddPiS_iPiS_i, .-_Z28__device_stub__Z6vecAddPiS_iPiS_i
.globl _Z6vecAddPiS_i
.type _Z6vecAddPiS_i, @function
_Z6vecAddPiS_i:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z28__device_stub__Z6vecAddPiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z6vecAddPiS_i, .-_Z6vecAddPiS_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "Input array"
.LC1:
.string "final result: %f\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %fs:40, %rax
movq %rax, 88(%rsp)
xorl %eax, %eax
movl $0, 12(%rsp)
leaq 24(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $40, %esi
call cudaMalloc@PLT
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, 64(%rsp)
movl $1, 68(%rsp)
movl $2, 72(%rsp)
movl $3, 76(%rsp)
movl $4, 80(%rsp)
leaq 16(%rsp), %rdi
movl $20, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rsi
movl $1, %ecx
movl $40, %edx
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
leaq 12(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 32(%rsp), %rdi
call cudaMemcpy@PLT
movl $2, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $3, 40(%rsp)
movl $1, 44(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L15
.L12:
leaq 52(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 32(%rsp), %rsi
call cudaMemcpy@PLT
movl 52(%rsp), %edx
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq 16(%rsp), %rdi
call free@PLT
movq 88(%rsp), %rax
subq %fs:40, %rax
jne .L16
movl $0, %eax
addq $104, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L15:
.cfi_restore_state
movl $5, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z28__device_stub__Z6vecAddPiS_iPiS_i
jmp .L12
.L16:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC2:
.string "_Z6vecAddPiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC2(%rip), %rdx
movq %rdx, %rcx
leaq _Z6vecAddPiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cuda_deviationvalue.hip"
.globl _Z21__device_stub__vecAddPiS_i # -- Begin function _Z21__device_stub__vecAddPiS_i
.p2align 4, 0x90
.type _Z21__device_stub__vecAddPiS_i,@function
_Z21__device_stub__vecAddPiS_i: # @_Z21__device_stub__vecAddPiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6vecAddPiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z21__device_stub__vecAddPiS_i, .Lfunc_end0-_Z21__device_stub__vecAddPiS_i
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function main
.LCPI1_0:
.long 0 # 0x0
.long 1 # 0x1
.long 2 # 0x2
.long 3 # 0x3
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movl $0, 8(%rsp)
leaq 16(%rsp), %rdi
movl $40, %esi
callq hipMalloc
movq %rsp, %rdi
movl $40, %esi
callq hipMalloc
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movaps .LCPI1_0(%rip), %xmm0 # xmm0 = [0,1,2,3]
movaps %xmm0, 128(%rsp)
movl $4, 144(%rsp)
leaq 24(%rsp), %rdi
movl $20, %esi
callq hipMalloc
movq 24(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $40, %edx
movl $1, %ecx
callq hipMemcpy
movq (%rsp), %rdi
leaq 8(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
movabsq $4294967298, %rdx # imm = 0x100000002
leaq 1(%rdx), %rdi
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.1:
movq 16(%rsp), %rax
movq (%rsp), %rcx
movq %rax, 120(%rsp)
movq %rcx, 112(%rsp)
movl $5, 12(%rsp)
leaq 120(%rsp), %rax
movq %rax, 32(%rsp)
leaq 112(%rsp), %rax
movq %rax, 40(%rsp)
leaq 12(%rsp), %rax
movq %rax, 48(%rsp)
leaq 96(%rsp), %rdi
leaq 80(%rsp), %rsi
leaq 72(%rsp), %rdx
leaq 64(%rsp), %rcx
callq __hipPopCallConfiguration
movq 96(%rsp), %rsi
movl 104(%rsp), %edx
movq 80(%rsp), %rcx
movl 88(%rsp), %r8d
leaq 32(%rsp), %r9
movl $_Z6vecAddPiS_i, %edi
pushq 64(%rsp)
.cfi_adjust_cfa_offset 8
pushq 80(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB1_2:
movq (%rsp), %rsi
leaq 32(%rsp), %rdi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
movl 32(%rsp), %esi
movl $.L.str.1, %edi
xorl %eax, %eax
callq printf
movq 16(%rsp), %rdi
callq hipFree
movq (%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq free
xorl %eax, %eax
addq $152, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6vecAddPiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z6vecAddPiS_i,@object # @_Z6vecAddPiS_i
.section .rodata,"a",@progbits
.globl _Z6vecAddPiS_i
.p2align 3, 0x0
_Z6vecAddPiS_i:
.quad _Z21__device_stub__vecAddPiS_i
.size _Z6vecAddPiS_i, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "Input array"
.size .L.str, 12
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "final result: %f\n"
.size .L.str.1, 18
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z6vecAddPiS_i"
.size .L__unnamed_1, 15
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z21__device_stub__vecAddPiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z6vecAddPiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} | code for sm_80
Function : _Z22one_channel_mul_kernelPKfS0_Pfii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0040*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x000fca00078e00ff */
/*0050*/ SHF.R.S32.HI R2, RZ, 0x1, R0 ; /* 0x00000001ff027819 */
/* 0x000fc80000011400 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x17c], PT ; /* 0x00005f0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ MOV R4, c[0x0][0x178] ; /* 0x00005e0000047a02 */
/* 0x000fe20000000f00 */
/*0090*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*00a0*/ IABS R8, R0 ; /* 0x0000000000087213 */
/* 0x000fe20000000000 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ ISETP.GE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f46270 */
/*00d0*/ IMAD.SHL.U32 R4, R4, 0x2, RZ ; /* 0x0000000204047824 */
/* 0x000fca00078e00ff */
/*00e0*/ IABS R6, R4.reuse ; /* 0x0000000400067213 */
/* 0x080fe40000000000 */
/*00f0*/ IABS R9, R4 ; /* 0x0000000400097213 */
/* 0x000fe40000000000 */
/*0100*/ I2F.RP R5, R6 ; /* 0x0000000600057306 */
/* 0x000e300000209400 */
/*0110*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0120*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fc40007ffe0ff */
/*0130*/ IADD3 R5, RZ, -R9, RZ ; /* 0x80000009ff057210 */
/* 0x000fc80007ffe0ff */
/*0140*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0150*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe200078e00ff */
/*0160*/ IADD3 R7, RZ, -R3, RZ ; /* 0x80000003ff077210 */
/* 0x002fca0007ffe0ff */
/*0170*/ IMAD R7, R7, R6, RZ ; /* 0x0000000607077224 */
/* 0x000fc800078e02ff */
/*0180*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0190*/ IMAD.HI.U32 R3, R3, R8, RZ ; /* 0x0000000803037227 */
/* 0x000fc800078e00ff */
/*01a0*/ IMAD R3, R3, R5, R8 ; /* 0x0000000503037224 */
/* 0x000fca00078e0208 */
/*01b0*/ ISETP.GT.U32.AND P0, PT, R6, R3, PT ; /* 0x000000030600720c */
/* 0x000fda0003f04070 */
/*01c0*/ @!P0 IMAD.IADD R3, R3, 0x1, -R6 ; /* 0x0000000103038824 */
/* 0x000fe200078e0a06 */
/*01d0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc80003f05270 */
/*01e0*/ ISETP.GT.U32.AND P1, PT, R6, R3, PT ; /* 0x000000030600720c */
/* 0x000fda0003f24070 */
/*01f0*/ @!P1 IADD3 R3, R3, -R6, RZ ; /* 0x8000000603039210 */
/* 0x000fca0007ffe0ff */
/*0200*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0003 */
/*0210*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc600078e020b */
/*0220*/ @!P2 IADD3 R5, -R5, RZ, RZ ; /* 0x000000ff0505a210 */
/* 0x000fe40007ffe1ff */
/*0230*/ @!P0 LOP3.LUT R5, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff058212 */
/* 0x000fe200078e33ff */
/*0240*/ LDG.E R7, [R2.64+0x4] ; /* 0x0000040402077981 */
/* 0x000ea8000c1e1900 */
/*0250*/ IMAD.WIDE R4, R5, R11, c[0x0][0x168] ; /* 0x00005a0005047625 */
/* 0x000fe200078e020b */
/*0260*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ee8000c1e1900 */
/*0270*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea8000c1e1900 */
/*0280*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ee2000c1e1900 */
/*0290*/ FMUL R10, R6, R7 ; /* 0x00000007060a7220 */
/* 0x004fc40000400000 */
/*02a0*/ IMAD.WIDE R6, R0, R11, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e020b */
/*02b0*/ FFMA R9, R8, R9, -R10 ; /* 0x0000000908097223 */
/* 0x008fca000000080a */
/*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe8000c101904 */
/*02d0*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*02e0*/ LDG.E R13, [R2.64+0x4] ; /* 0x00000404020d7981 */
/* 0x000ea8000c1e1900 */
/*02f0*/ LDG.E R0, [R4.64+0x4] ; /* 0x0000040404007981 */
/* 0x000ee8000c1e1900 */
/*0300*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x000ee2000c1e1900 */
/*0310*/ FMUL R8, R8, R13 ; /* 0x0000000d08087220 */
/* 0x004fc80000400000 */
/*0320*/ FFMA R11, R0, R11, R8 ; /* 0x0000000b000b7223 */
/* 0x008fca0000000008 */
/*0330*/ STG.E [R6.64+0x4], R11 ; /* 0x0000040b06007986 */
/* 0x000fe2000c101904 */
/*0340*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0350*/ BRA 0x350; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} | .file "tmpxft_000eb132_00000000-6_one_channel_mul_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
.type _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii, @function
_Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z22one_channel_mul_kernelPKfS0_Pfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii, .-_Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.type _Z22one_channel_mul_kernelPKfS0_Pfii, @function
_Z22one_channel_mul_kernelPKfS0_Pfii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z22one_channel_mul_kernelPKfS0_Pfii, .-_Z22one_channel_mul_kernelPKfS0_Pfii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z22one_channel_mul_kernelPKfS0_Pfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z22one_channel_mul_kernelPKfS0_Pfii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z22one_channel_mul_kernelPKfS0_Pfii
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.p2align 8
.type _Z22one_channel_mul_kernelPKfS0_Pfii,@function
_Z22one_channel_mul_kernelPKfS0_Pfii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_bfe_i32 v0, v1, 0, 31
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_lshl_b32 s3, s2, 1
s_bfe_i32 s2, s2, 0x1001e
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s3, s3, s2
s_xor_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v0, s2
s_sub_i32 s3, 0, s2
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_u32_f32_e32 v2, v0
v_lshlrev_b32_e32 v0, 1, v1
v_mul_lo_u32 v1, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v0
v_add_nc_u32_e32 v4, v0, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v1, v2, v1
v_xor_b32_e32 v4, v4, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v2, v1
v_mul_hi_u32 v1, v4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v1, s2
v_sub_nc_u32_e32 v1, v4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s2, v1
v_cmp_le_u32_e32 vcc_lo, s2, v1
v_cndmask_b32_e32 v1, v1, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s2, v1
v_cmp_le_u32_e32 vcc_lo, s2, v1
v_cndmask_b32_e32 v1, v1, v2, vcc_lo
v_or_b32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v1, v1, v3
v_sub_nc_u32_e32 v4, v1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v1, 31, v0
v_or_b32_e32 v6, 1, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v7, 31, v6
v_add_co_u32 v8, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v10, vcc_lo, s4, v0
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_ci_u32_e32 v11, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
global_load_b32 v12, v[8:9], off
global_load_b32 v13, v[6:7], off
global_load_b32 v14, v[10:11], off
global_load_b32 v15, v[4:5], off
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(2)
v_mul_f32_e32 v12, v12, v13
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_fma_f32 v12, v14, v15, -v12
global_store_b32 v[0:1], v12, off
global_load_b32 v0, v[8:9], off
global_load_b32 v1, v[4:5], off
global_load_b32 v4, v[10:11], off
global_load_b32 v5, v[6:7], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v6, v0, v1
v_add_co_u32 v0, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_fmac_f32_e32 v6, v4, v5
global_store_b32 v[0:1], v6, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22one_channel_mul_kernelPKfS0_Pfii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z22one_channel_mul_kernelPKfS0_Pfii, .Lfunc_end0-_Z22one_channel_mul_kernelPKfS0_Pfii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22one_channel_mul_kernelPKfS0_Pfii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z22one_channel_mul_kernelPKfS0_Pfii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include "includes.h"
__global__ void one_channel_mul_kernel(const float *data_l, const float *data_r, float *result, int channel_total, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
int one_ch_idx = idx % (2 * channel_total);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[one_ch_idx] - data_l[idx + 1] * data_r[one_ch_idx + 1];
result[idx + 1] = data_l[idx] * data_r[one_ch_idx + 1] + data_l[idx + 1] * data_r[one_ch_idx];
}
} | .text
.file "one_channel_mul_kernel.hip"
.globl _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii # -- Begin function _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.p2align 4, 0x90
.type _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii,@function
_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii: # @_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22one_channel_mul_kernelPKfS0_Pfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii, .Lfunc_end0-_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22one_channel_mul_kernelPKfS0_Pfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z22one_channel_mul_kernelPKfS0_Pfii,@object # @_Z22one_channel_mul_kernelPKfS0_Pfii
.section .rodata,"a",@progbits
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.p2align 3, 0x0
_Z22one_channel_mul_kernelPKfS0_Pfii:
.quad _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.size _Z22one_channel_mul_kernelPKfS0_Pfii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z22one_channel_mul_kernelPKfS0_Pfii"
.size .L__unnamed_1, 37
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z22one_channel_mul_kernelPKfS0_Pfii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z22one_channel_mul_kernelPKfS0_Pfii
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_CTAID.X ; /* 0x0000000000007919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R0, R0, c[0x0][0x0], R3 ; /* 0x0000000000007a24 */
/* 0x001fc800078e0203 */
/*0040*/ IMAD.SHL.U32 R0, R0, 0x2, RZ ; /* 0x0000000200007824 */
/* 0x000fca00078e00ff */
/*0050*/ SHF.R.S32.HI R2, RZ, 0x1, R0 ; /* 0x00000001ff027819 */
/* 0x000fc80000011400 */
/*0060*/ ISETP.GE.AND P0, PT, R2, c[0x0][0x17c], PT ; /* 0x00005f0002007a0c */
/* 0x000fda0003f06270 */
/*0070*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0080*/ MOV R4, c[0x0][0x178] ; /* 0x00005e0000047a02 */
/* 0x000fe20000000f00 */
/*0090*/ HFMA2.MMA R11, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0b7435 */
/* 0x000fe200000001ff */
/*00a0*/ IABS R8, R0 ; /* 0x0000000000087213 */
/* 0x000fe20000000000 */
/*00b0*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fe20000000a00 */
/*00c0*/ ISETP.GE.AND P2, PT, R0, RZ, PT ; /* 0x000000ff0000720c */
/* 0x000fe20003f46270 */
/*00d0*/ IMAD.SHL.U32 R4, R4, 0x2, RZ ; /* 0x0000000204047824 */
/* 0x000fca00078e00ff */
/*00e0*/ IABS R6, R4.reuse ; /* 0x0000000400067213 */
/* 0x080fe40000000000 */
/*00f0*/ IABS R9, R4 ; /* 0x0000000400097213 */
/* 0x000fe40000000000 */
/*0100*/ I2F.RP R5, R6 ; /* 0x0000000600057306 */
/* 0x000e300000209400 */
/*0110*/ MUFU.RCP R5, R5 ; /* 0x0000000500057308 */
/* 0x001e240000001000 */
/*0120*/ IADD3 R2, R5, 0xffffffe, RZ ; /* 0x0ffffffe05027810 */
/* 0x001fc40007ffe0ff */
/*0130*/ IADD3 R5, RZ, -R9, RZ ; /* 0x80000009ff057210 */
/* 0x000fc80007ffe0ff */
/*0140*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0150*/ IMAD.MOV.U32 R2, RZ, RZ, RZ ; /* 0x000000ffff027224 */
/* 0x001fe200078e00ff */
/*0160*/ IADD3 R7, RZ, -R3, RZ ; /* 0x80000003ff077210 */
/* 0x002fca0007ffe0ff */
/*0170*/ IMAD R7, R7, R6, RZ ; /* 0x0000000607077224 */
/* 0x000fc800078e02ff */
/*0180*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fcc00078e0002 */
/*0190*/ IMAD.HI.U32 R3, R3, R8, RZ ; /* 0x0000000803037227 */
/* 0x000fc800078e00ff */
/*01a0*/ IMAD R3, R3, R5, R8 ; /* 0x0000000503037224 */
/* 0x000fca00078e0208 */
/*01b0*/ ISETP.GT.U32.AND P0, PT, R6, R3, PT ; /* 0x000000030600720c */
/* 0x000fda0003f04070 */
/*01c0*/ @!P0 IMAD.IADD R3, R3, 0x1, -R6 ; /* 0x0000000103038824 */
/* 0x000fe200078e0a06 */
/*01d0*/ ISETP.NE.AND P0, PT, R4, RZ, PT ; /* 0x000000ff0400720c */
/* 0x000fc80003f05270 */
/*01e0*/ ISETP.GT.U32.AND P1, PT, R6, R3, PT ; /* 0x000000030600720c */
/* 0x000fda0003f24070 */
/*01f0*/ @!P1 IADD3 R3, R3, -R6, RZ ; /* 0x8000000603039210 */
/* 0x000fca0007ffe0ff */
/*0200*/ IMAD.MOV.U32 R5, RZ, RZ, R3 ; /* 0x000000ffff057224 */
/* 0x000fe400078e0003 */
/*0210*/ IMAD.WIDE R2, R0, R11, c[0x0][0x160] ; /* 0x0000580000027625 */
/* 0x000fc600078e020b */
/*0220*/ @!P2 IADD3 R5, -R5, RZ, RZ ; /* 0x000000ff0505a210 */
/* 0x000fe40007ffe1ff */
/*0230*/ @!P0 LOP3.LUT R5, RZ, R4, RZ, 0x33, !PT ; /* 0x00000004ff058212 */
/* 0x000fe200078e33ff */
/*0240*/ LDG.E R7, [R2.64+0x4] ; /* 0x0000040402077981 */
/* 0x000ea8000c1e1900 */
/*0250*/ IMAD.WIDE R4, R5, R11, c[0x0][0x168] ; /* 0x00005a0005047625 */
/* 0x000fe200078e020b */
/*0260*/ LDG.E R9, [R2.64] ; /* 0x0000000402097981 */
/* 0x000ee8000c1e1900 */
/*0270*/ LDG.E R6, [R4.64+0x4] ; /* 0x0000040404067981 */
/* 0x000ea8000c1e1900 */
/*0280*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ee2000c1e1900 */
/*0290*/ FMUL R10, R6, R7 ; /* 0x00000007060a7220 */
/* 0x004fc40000400000 */
/*02a0*/ IMAD.WIDE R6, R0, R11, c[0x0][0x170] ; /* 0x00005c0000067625 */
/* 0x000fc800078e020b */
/*02b0*/ FFMA R9, R8, R9, -R10 ; /* 0x0000000908097223 */
/* 0x008fca000000080a */
/*02c0*/ STG.E [R6.64], R9 ; /* 0x0000000906007986 */
/* 0x000fe8000c101904 */
/*02d0*/ LDG.E R8, [R4.64] ; /* 0x0000000404087981 */
/* 0x000ea8000c1e1900 */
/*02e0*/ LDG.E R13, [R2.64+0x4] ; /* 0x00000404020d7981 */
/* 0x000ea8000c1e1900 */
/*02f0*/ LDG.E R0, [R4.64+0x4] ; /* 0x0000040404007981 */
/* 0x000ee8000c1e1900 */
/*0300*/ LDG.E R11, [R2.64] ; /* 0x00000004020b7981 */
/* 0x000ee2000c1e1900 */
/*0310*/ FMUL R8, R8, R13 ; /* 0x0000000d08087220 */
/* 0x004fc80000400000 */
/*0320*/ FFMA R11, R0, R11, R8 ; /* 0x0000000b000b7223 */
/* 0x008fca0000000008 */
/*0330*/ STG.E [R6.64+0x4], R11 ; /* 0x0000040b06007986 */
/* 0x000fe2000c101904 */
/*0340*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0350*/ BRA 0x350; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0360*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0370*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0380*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0390*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z22one_channel_mul_kernelPKfS0_Pfii
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.p2align 8
.type _Z22one_channel_mul_kernelPKfS0_Pfii,@function
_Z22one_channel_mul_kernelPKfS0_Pfii:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x1c
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_bfe_i32 v0, v1, 0, 31
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_gt_i32_e64 s3, v0
s_cbranch_execz .LBB0_2
s_clause 0x2
s_load_b32 s2, s[0:1], 0x18
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_lshl_b32 s3, s2, 1
s_bfe_i32 s2, s2, 0x1001e
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_add_i32 s3, s3, s2
s_xor_b32 s2, s3, s2
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_cvt_f32_u32_e32 v0, s2
s_sub_i32 s3, 0, s2
v_rcp_iflag_f32_e32 v0, v0
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v0, 0x4f7ffffe, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_cvt_u32_f32_e32 v2, v0
v_lshlrev_b32_e32 v0, 1, v1
v_mul_lo_u32 v1, s3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v3, 31, v0
v_add_nc_u32_e32 v4, v0, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_mul_hi_u32 v1, v2, v1
v_xor_b32_e32 v4, v4, v3
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v2, v1
v_mul_hi_u32 v1, v4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v1, v1, s2
v_sub_nc_u32_e32 v1, v4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s2, v1
v_cmp_le_u32_e32 vcc_lo, s2, v1
v_cndmask_b32_e32 v1, v1, v2, vcc_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v2, s2, v1
v_cmp_le_u32_e32 vcc_lo, s2, v1
v_cndmask_b32_e32 v1, v1, v2, vcc_lo
v_or_b32_e32 v2, 1, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_xor_b32_e32 v1, v1, v3
v_sub_nc_u32_e32 v4, v1, v3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v3, 31, v2
v_ashrrev_i32_e32 v1, 31, v0
v_or_b32_e32 v6, 1, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[2:3], 2, v[2:3]
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_ashrrev_i32_e32 v7, 31, v6
v_add_co_u32 v8, vcc_lo, s4, v2
v_add_co_ci_u32_e32 v9, vcc_lo, s5, v3, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[6:7], 2, v[6:7]
v_add_co_u32 v10, vcc_lo, s4, v0
v_lshlrev_b64 v[4:5], 2, v[4:5]
v_add_co_ci_u32_e32 v11, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v6, vcc_lo, s6, v6
v_add_co_ci_u32_e32 v7, vcc_lo, s7, v7, vcc_lo
s_delay_alu instid0(VALU_DEP_4)
v_add_co_u32 v4, vcc_lo, s6, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s7, v5, vcc_lo
global_load_b32 v12, v[8:9], off
global_load_b32 v13, v[6:7], off
global_load_b32 v14, v[10:11], off
global_load_b32 v15, v[4:5], off
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
s_waitcnt vmcnt(2)
v_mul_f32_e32 v12, v12, v13
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1)
v_fma_f32 v12, v14, v15, -v12
global_store_b32 v[0:1], v12, off
global_load_b32 v0, v[8:9], off
global_load_b32 v1, v[4:5], off
global_load_b32 v4, v[10:11], off
global_load_b32 v5, v[6:7], off
s_waitcnt vmcnt(2)
v_mul_f32_e32 v6, v0, v1
v_add_co_u32 v0, vcc_lo, s0, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v3, vcc_lo
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_3)
v_fmac_f32_e32 v6, v4, v5
global_store_b32 v[0:1], v6, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z22one_channel_mul_kernelPKfS0_Pfii
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 16
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z22one_channel_mul_kernelPKfS0_Pfii, .Lfunc_end0-_Z22one_channel_mul_kernelPKfS0_Pfii
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 28
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z22one_channel_mul_kernelPKfS0_Pfii
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z22one_channel_mul_kernelPKfS0_Pfii.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 16
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_000eb132_00000000-6_one_channel_mul_kernel.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2029:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2029:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
.type _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii, @function
_Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii:
.LFB2051:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movq %rsp, %rax
movq %rax, 128(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z22one_channel_mul_kernelPKfS0_Pfii(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2051:
.size _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii, .-_Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.type _Z22one_channel_mul_kernelPKfS0_Pfii, @function
_Z22one_channel_mul_kernelPKfS0_Pfii:
.LFB2052:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z50__device_stub__Z22one_channel_mul_kernelPKfS0_PfiiPKfS0_Pfii
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2052:
.size _Z22one_channel_mul_kernelPKfS0_Pfii, .-_Z22one_channel_mul_kernelPKfS0_Pfii
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "_Z22one_channel_mul_kernelPKfS0_Pfii"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2054:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z22one_channel_mul_kernelPKfS0_Pfii(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2054:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "one_channel_mul_kernel.hip"
.globl _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii # -- Begin function _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.p2align 4, 0x90
.type _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii,@function
_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii: # @_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.cfi_startproc
# %bb.0:
subq $120, %rsp
.cfi_def_cfa_offset 128
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movq %rdx, 56(%rsp)
movl %ecx, 4(%rsp)
movl %r8d, (%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 56(%rsp), %rax
movq %rax, 96(%rsp)
leaq 4(%rsp), %rax
movq %rax, 104(%rsp)
movq %rsp, %rax
movq %rax, 112(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z22one_channel_mul_kernelPKfS0_Pfii, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $136, %rsp
.cfi_adjust_cfa_offset -136
retq
.Lfunc_end0:
.size _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii, .Lfunc_end0-_Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB1_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB1_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z22one_channel_mul_kernelPKfS0_Pfii, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end1:
.size __hip_module_ctor, .Lfunc_end1-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB2_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB2_2:
retq
.Lfunc_end2:
.size __hip_module_dtor, .Lfunc_end2-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z22one_channel_mul_kernelPKfS0_Pfii,@object # @_Z22one_channel_mul_kernelPKfS0_Pfii
.section .rodata,"a",@progbits
.globl _Z22one_channel_mul_kernelPKfS0_Pfii
.p2align 3, 0x0
_Z22one_channel_mul_kernelPKfS0_Pfii:
.quad _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.size _Z22one_channel_mul_kernelPKfS0_Pfii, 8
.type .L__unnamed_1,@object # @0
.section .rodata.str1.1,"aMS",@progbits,1
.L__unnamed_1:
.asciz "_Z22one_channel_mul_kernelPKfS0_Pfii"
.size .L__unnamed_1, 37
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z37__device_stub__one_channel_mul_kernelPKfS0_Pfii
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z22one_channel_mul_kernelPKfS0_Pfii
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <cuda.h>
#include <stdlib.h>
#define IMAGE_HEIGHT 521
#define IMAGE_WIDTH 428
__global__
void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew)
{
// Get the X and y coords of the pixel for this thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Stop the thread if it is not part of the image
if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) {
return;
}
// Apply the box blur
if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4;
}
else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3;
}
else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y==0 &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==0 &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
}
int main (int argc, const char * argv[]) {
struct timeval tim;
gettimeofday(&tim, NULL);
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
char str[maxlen], lines[5][maxlen];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize];
int row = 0, col = 0, nblurs, lineno=0, k;
fp = fopen("David.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
R[row][col] = h1;
G[row][col] = h2;
B[row][col] = h3;
}
col++;
}
}
}
fclose(fp);
// Number of blur iterations
// nblurs = atoi(argv[1]); // Get iterations from argument
nblurs = 20;
// Start the timer
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// The size of the 1D arrays for the GPU
int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT;
// Initialise the arrays to hold the flatened image
int *h_R, *h_G, *h_B;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
// Create pointers to GPU array locations
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
// Define how many threads per block
int numBlocksY = ceil(IMAGE_HEIGHT/16.0);
int numBlocksX = ceil(IMAGE_WIDTH/16.0);
dim3 dimBlock(numBlocksX,numBlocksY);
// Define how many blocks per grid
dim3 dimGrid(16, 16);
// Allocate GPU mem for the 1D arrays
cudaMalloc((void **)&d_R, size);
cudaMalloc((void **)&d_G, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_Rnew, size);
cudaMalloc((void **)&d_Bnew, size);
cudaMalloc((void **)&d_Gnew, size);
// Flatten the 2D arrays to make them easier to handle with CUDA
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
h_R[IMAGE_WIDTH*row+col] = R[row][col];
h_G[IMAGE_WIDTH*row+col] = G[row][col];
h_B[IMAGE_WIDTH*row+col] = B[row][col];
}
}
// Copy these arrays to the GPU
cudaMemcpy(d_R, h_R, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_G, h_G, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Start the blur loop
for(k=0;k<nblurs;k++){
// Punch it Chewie
blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew);
// Copy the modified values to the original locations ready for a new iteration
cudaMemcpy(d_R, d_Rnew, size, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_G, d_Gnew, size, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_B, d_Bnew, size, cudaMemcpyDeviceToDevice);
}
// Copy the data off the GPU
cudaMemcpy(h_R, d_Rnew, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_G, d_Gnew, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_Bnew, size, cudaMemcpyDeviceToHost);
// Convert the 1D arrays back into 2D
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
R[row][col] = h_R[IMAGE_WIDTH*row+col];
G[row][col] = h_G[IMAGE_WIDTH*row+col];
B[row][col] = h_B[IMAGE_WIDTH*row+col];
}
}
// Free up the allocated memory
cudaFree(d_R); cudaFree(d_G); cudaFree(d_B);
cudaFree(d_Rnew); cudaFree(d_Gnew); cudaFree(d_Bnew);
free(h_R); free(h_G); free(h_B);
fout= fopen("DavidBlur.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
fclose(fout);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
return 0;
} | .file "tmpxft_0016cd1d_00000000-6_blur.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
.type _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_, @function
_Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z4blurPiS_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_, .-_Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
.globl _Z4blurPiS_S_S_S_S_
.type _Z4blurPiS_S_S_S_S_, @function
_Z4blurPiS_S_S_S_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4blurPiS_S_S_S_S_, .-_Z4blurPiS_S_S_S_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "David.ps"
.LC2:
.string "\n%[^\n]"
.LC3:
.string "%2x"
.LC5:
.string "w"
.LC6:
.string "DavidBlur.ps"
.LC7:
.string "\n%s"
.LC8:
.string "\n"
.LC9:
.string "%02x%02x%02x"
.LC10:
.string "%.6lf seconds elapsed\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
leaq -2674688(%rsp), %r11
.cfi_def_cfa 11, 2674744
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2536, %rsp
.cfi_def_cfa_offset 2677280
movq %fs:40, %rax
movq %rax, 2677208(%rsp)
xorl %eax, %eax
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
movq %rax, %r14
movl $0, %r12d
movl $0, %r15d
movl $0, %ebx
leaq 2677008(%rsp), %rax
movq %rax, 8(%rsp)
leaq 144(%rsp), %rax
movq %rax, 24(%rsp)
leaq .LC3(%rip), %r13
.L12:
movq %r14, %rdi
call feof@PLT
movl %eax, 20(%rsp)
testl %eax, %eax
jne .L43
movq 8(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %r14, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
cmpl $4, %ebx
jg .L44
leal 1(%rbx), %ebp
movslq %ebx, %rbx
leaq (%rbx,%rbx,4), %rax
leaq (%rax,%rax,4), %rax
salq $3, %rax
movl $1000, %edx
cmpq %rdx, %rax
cmovnb %rax, %rdx
subq %rax, %rdx
movq 24(%rsp), %rsi
leaq (%rsi,%rax), %rdi
movq 8(%rsp), %rsi
call __strcpy_chk@PLT
movl %ebp, %ebx
jmp .L12
.L44:
leaq 2677008(%rsp), %rbp
cmpb $0, 2677008(%rsp)
jne .L14
jmp .L12
.L45:
addl $1, %r15d
movl 20(%rsp), %r12d
jmp .L16
.L17:
addl $1, %r12d
addq $6, %rbp
cmpb $0, 0(%rbp)
je .L12
.L14:
leaq 44(%rsp), %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 48(%rsp), %rdx
leaq 2(%rbp), %rdi
movq %r13, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 52(%rsp), %rdx
leaq 4(%rbp), %rdi
movq %r13, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
cmpl $428, %r12d
je .L45
.L16:
cmpl $520, %r15d
jg .L17
movslq %r12d, %rdx
movslq %r15d, %rax
imulq $428, %rax, %rax
addq %rdx, %rax
movl 44(%rsp), %edx
movl %edx, 1152(%rsp,%rax,4)
movl 48(%rsp), %edx
movl %edx, 893104(%rsp,%rax,4)
movl 52(%rsp), %edx
movl %edx, 1785056(%rsp,%rax,4)
jmp .L17
.L43:
movq %r14, %rdi
call fclose@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp)
movl $891952, %edi
call malloc@PLT
movq %rax, %r13
movl $891952, %edi
call malloc@PLT
movq %rax, %r12
movl $891952, %edi
call malloc@PLT
movq %rax, %rbp
movl $27, 104(%rsp)
movl $33, 108(%rsp)
movl $1, 112(%rsp)
movl $16, 116(%rsp)
movl $16, 120(%rsp)
movl $1, 124(%rsp)
leaq 56(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 96(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
movl $1712, %ecx
.L19:
leaq -1712(%rcx), %rax
.L20:
movl 1152(%rsp,%rax), %edx
movl %edx, 0(%r13,%rax)
movl 893104(%rsp,%rax), %edx
movl %edx, (%r12,%rax)
movl 1785056(%rsp,%rax), %edx
movl %edx, 0(%rbp,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L20
addq $1712, %rcx
cmpq $893664, %rcx
jne .L19
movl $1, %ecx
movl $891952, %edx
movq %r13, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $891952, %edx
movq %r12, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $891952, %edx
movq %rbp, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $20, %r14d
jmp .L23
.L22:
movl $3, %ecx
movl $891952, %edx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $3, %ecx
movl $891952, %edx
movq 88(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $3, %ecx
movl $891952, %edx
movq 96(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
subl $1, %r14d
je .L46
.L23:
movl 112(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 104(%rsp), %rdx
movq 116(%rsp), %rdi
movl 124(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movq 96(%rsp), %r9
movq 88(%rsp), %r8
movq 80(%rsp), %rcx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
jmp .L22
.L46:
movl $2, %ecx
movl $891952, %edx
movq 80(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $891952, %edx
movq 88(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $891952, %edx
movq 96(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl $1712, %ecx
.L24:
leaq -1712(%rcx), %rax
.L25:
movl 0(%r13,%rax), %edx
movl %edx, 1152(%rsp,%rax)
movl (%r12,%rax), %edx
movl %edx, 893104(%rsp,%rax)
movl 0(%rbp,%rax), %edx
movl %edx, 1785056(%rsp,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L25
addq $1712, %rcx
cmpq $893664, %rcx
jne .L24
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
leaq .LC5(%rip), %rsi
leaq .LC6(%rip), %rdi
call fopen@PLT
movq %rax, %r12
testl %ebx, %ebx
jle .L27
leaq 144(%rsp), %rbp
movslq %ebx, %rbx
leaq (%rbx,%rbx,4), %rax
leaq (%rax,%rax,4), %rax
leaq 0(%rbp,%rax,8), %r13
leaq .LC7(%rip), %rbx
.L28:
movq %rbp, %rcx
movq %rbx, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $200, %rbp
cmpq %rbp, %r13
jne .L28
.L27:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1712, %r13d
movl $0, %ebp
leaq .LC9(%rip), %r14
leaq .LC8(%rip), %r15
jmp .L29
.L30:
addq $4, %rbx
cmpq %rbx, %r13
je .L47
.L31:
movl 1152(%rsp,%rbx), %ecx
movl 1785056(%rsp,%rbx), %r9d
movl 893104(%rsp,%rbx), %r8d
movq %r14, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addl $1, %ebp
cmpl $12, %ebp
jne .L30
movq %r15, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %ebp
jmp .L30
.L47:
addq $1712, %r13
cmpq $893664, %r13
je .L32
.L29:
leaq -1712(%r13), %rbx
jmp .L31
.L32:
movq %r12, %rdi
call fclose@PLT
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
addsd %xmm1, %xmm0
subsd 8(%rsp), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 2677208(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $2677224, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z4blurPiS_S_S_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z4blurPiS_S_S_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC4:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <cuda.h>
#include <stdlib.h>
#define IMAGE_HEIGHT 521
#define IMAGE_WIDTH 428
__global__
void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew)
{
// Get the X and y coords of the pixel for this thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Stop the thread if it is not part of the image
if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) {
return;
}
// Apply the box blur
if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4;
}
else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3;
}
else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y==0 &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==0 &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
}
int main (int argc, const char * argv[]) {
struct timeval tim;
gettimeofday(&tim, NULL);
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
char str[maxlen], lines[5][maxlen];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize];
int row = 0, col = 0, nblurs, lineno=0, k;
fp = fopen("David.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
R[row][col] = h1;
G[row][col] = h2;
B[row][col] = h3;
}
col++;
}
}
}
fclose(fp);
// Number of blur iterations
// nblurs = atoi(argv[1]); // Get iterations from argument
nblurs = 20;
// Start the timer
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// The size of the 1D arrays for the GPU
int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT;
// Initialise the arrays to hold the flatened image
int *h_R, *h_G, *h_B;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
// Create pointers to GPU array locations
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
// Define how many threads per block
int numBlocksY = ceil(IMAGE_HEIGHT/16.0);
int numBlocksX = ceil(IMAGE_WIDTH/16.0);
dim3 dimBlock(numBlocksX,numBlocksY);
// Define how many blocks per grid
dim3 dimGrid(16, 16);
// Allocate GPU mem for the 1D arrays
cudaMalloc((void **)&d_R, size);
cudaMalloc((void **)&d_G, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_Rnew, size);
cudaMalloc((void **)&d_Bnew, size);
cudaMalloc((void **)&d_Gnew, size);
// Flatten the 2D arrays to make them easier to handle with CUDA
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
h_R[IMAGE_WIDTH*row+col] = R[row][col];
h_G[IMAGE_WIDTH*row+col] = G[row][col];
h_B[IMAGE_WIDTH*row+col] = B[row][col];
}
}
// Copy these arrays to the GPU
cudaMemcpy(d_R, h_R, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_G, h_G, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Start the blur loop
for(k=0;k<nblurs;k++){
// Punch it Chewie
blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew);
// Copy the modified values to the original locations ready for a new iteration
cudaMemcpy(d_R, d_Rnew, size, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_G, d_Gnew, size, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_B, d_Bnew, size, cudaMemcpyDeviceToDevice);
}
// Copy the data off the GPU
cudaMemcpy(h_R, d_Rnew, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_G, d_Gnew, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_Bnew, size, cudaMemcpyDeviceToHost);
// Convert the 1D arrays back into 2D
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
R[row][col] = h_R[IMAGE_WIDTH*row+col];
G[row][col] = h_G[IMAGE_WIDTH*row+col];
B[row][col] = h_B[IMAGE_WIDTH*row+col];
}
}
// Free up the allocated memory
cudaFree(d_R); cudaFree(d_G); cudaFree(d_B);
cudaFree(d_Rnew); cudaFree(d_Gnew); cudaFree(d_Bnew);
free(h_R); free(h_G); free(h_B);
fout= fopen("DavidBlur.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
fclose(fout);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
return 0;
} | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define IMAGE_HEIGHT 521
#define IMAGE_WIDTH 428
__global__
void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew)
{
// Get the X and y coords of the pixel for this thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Stop the thread if it is not part of the image
if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) {
return;
}
// Apply the box blur
if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4;
}
else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3;
}
else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y==0 &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==0 &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
}
int main (int argc, const char * argv[]) {
struct timeval tim;
gettimeofday(&tim, NULL);
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
char str[maxlen], lines[5][maxlen];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize];
int row = 0, col = 0, nblurs, lineno=0, k;
fp = fopen("David.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
R[row][col] = h1;
G[row][col] = h2;
B[row][col] = h3;
}
col++;
}
}
}
fclose(fp);
// Number of blur iterations
// nblurs = atoi(argv[1]); // Get iterations from argument
nblurs = 20;
// Start the timer
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// The size of the 1D arrays for the GPU
int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT;
// Initialise the arrays to hold the flatened image
int *h_R, *h_G, *h_B;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
// Create pointers to GPU array locations
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
// Define how many threads per block
int numBlocksY = ceil(IMAGE_HEIGHT/16.0);
int numBlocksX = ceil(IMAGE_WIDTH/16.0);
dim3 dimBlock(numBlocksX,numBlocksY);
// Define how many blocks per grid
dim3 dimGrid(16, 16);
// Allocate GPU mem for the 1D arrays
hipMalloc((void **)&d_R, size);
hipMalloc((void **)&d_G, size);
hipMalloc((void **)&d_B, size);
hipMalloc((void **)&d_Rnew, size);
hipMalloc((void **)&d_Bnew, size);
hipMalloc((void **)&d_Gnew, size);
// Flatten the 2D arrays to make them easier to handle with CUDA
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
h_R[IMAGE_WIDTH*row+col] = R[row][col];
h_G[IMAGE_WIDTH*row+col] = G[row][col];
h_B[IMAGE_WIDTH*row+col] = B[row][col];
}
}
// Copy these arrays to the GPU
hipMemcpy(d_R, h_R, size, hipMemcpyHostToDevice);
hipMemcpy(d_G, h_G, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Start the blur loop
for(k=0;k<nblurs;k++){
// Punch it Chewie
blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew);
// Copy the modified values to the original locations ready for a new iteration
hipMemcpy(d_R, d_Rnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_G, d_Gnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_B, d_Bnew, size, hipMemcpyDeviceToDevice);
}
// Copy the data off the GPU
hipMemcpy(h_R, d_Rnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_G, d_Gnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_Bnew, size, hipMemcpyDeviceToHost);
// Convert the 1D arrays back into 2D
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
R[row][col] = h_R[IMAGE_WIDTH*row+col];
G[row][col] = h_G[IMAGE_WIDTH*row+col];
B[row][col] = h_B[IMAGE_WIDTH*row+col];
}
}
// Free up the allocated memory
hipFree(d_R); hipFree(d_G); hipFree(d_B);
hipFree(d_Rnew); hipFree(d_Gnew); hipFree(d_Bnew);
free(h_R); free(h_G); free(h_B);
fout= fopen("DavidBlur.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
fclose(fout);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define IMAGE_HEIGHT 521
#define IMAGE_WIDTH 428
__global__
void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew)
{
// Get the X and y coords of the pixel for this thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Stop the thread if it is not part of the image
if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) {
return;
}
// Apply the box blur
if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4;
}
else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3;
}
else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y==0 &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==0 &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
}
int main (int argc, const char * argv[]) {
struct timeval tim;
gettimeofday(&tim, NULL);
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
char str[maxlen], lines[5][maxlen];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize];
int row = 0, col = 0, nblurs, lineno=0, k;
fp = fopen("David.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
R[row][col] = h1;
G[row][col] = h2;
B[row][col] = h3;
}
col++;
}
}
}
fclose(fp);
// Number of blur iterations
// nblurs = atoi(argv[1]); // Get iterations from argument
nblurs = 20;
// Start the timer
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// The size of the 1D arrays for the GPU
int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT;
// Initialise the arrays to hold the flatened image
int *h_R, *h_G, *h_B;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
// Create pointers to GPU array locations
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
// Define how many threads per block
int numBlocksY = ceil(IMAGE_HEIGHT/16.0);
int numBlocksX = ceil(IMAGE_WIDTH/16.0);
dim3 dimBlock(numBlocksX,numBlocksY);
// Define how many blocks per grid
dim3 dimGrid(16, 16);
// Allocate GPU mem for the 1D arrays
hipMalloc((void **)&d_R, size);
hipMalloc((void **)&d_G, size);
hipMalloc((void **)&d_B, size);
hipMalloc((void **)&d_Rnew, size);
hipMalloc((void **)&d_Bnew, size);
hipMalloc((void **)&d_Gnew, size);
// Flatten the 2D arrays to make them easier to handle with CUDA
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
h_R[IMAGE_WIDTH*row+col] = R[row][col];
h_G[IMAGE_WIDTH*row+col] = G[row][col];
h_B[IMAGE_WIDTH*row+col] = B[row][col];
}
}
// Copy these arrays to the GPU
hipMemcpy(d_R, h_R, size, hipMemcpyHostToDevice);
hipMemcpy(d_G, h_G, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Start the blur loop
for(k=0;k<nblurs;k++){
// Punch it Chewie
blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew);
// Copy the modified values to the original locations ready for a new iteration
hipMemcpy(d_R, d_Rnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_G, d_Gnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_B, d_Bnew, size, hipMemcpyDeviceToDevice);
}
// Copy the data off the GPU
hipMemcpy(h_R, d_Rnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_G, d_Gnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_Bnew, size, hipMemcpyDeviceToHost);
// Convert the 1D arrays back into 2D
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
R[row][col] = h_R[IMAGE_WIDTH*row+col];
G[row][col] = h_G[IMAGE_WIDTH*row+col];
B[row][col] = h_B[IMAGE_WIDTH*row+col];
}
}
// Free up the allocated memory
hipFree(d_R); hipFree(d_G); hipFree(d_B);
hipFree(d_Rnew); hipFree(d_Gnew); hipFree(d_Bnew);
free(h_R); free(h_G); free(h_B);
fout= fopen("DavidBlur.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
fclose(fout);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z4blurPiS_S_S_S_S_
.globl _Z4blurPiS_S_S_S_S_
.p2align 8
.type _Z4blurPiS_S_S_S_S_,@function
_Z4blurPiS_S_S_S_S_:
s_load_b32 s2, s[0:1], 0x3c
v_and_b32_e32 v2, 0x3ff, v0
v_bfe_u32 v4, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_and_b32 s3, s2, 0xffff
s_lshr_b32 s2, s2, 16
v_mad_u64_u32 v[0:1], null, s14, s3, v[2:3]
v_mad_u64_u32 v[2:3], null, s15, s2, v[4:5]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_gt_i32_e32 vcc_lo, 0x1ac, v0
v_cmp_gt_i32_e64 s2, 0x209, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s2, vcc_lo, s2
s_and_saveexec_b32 s3, s2
s_cbranch_execz .LBB0_47
s_clause 0x1
s_load_b256 s[8:15], s[0:1], 0x0
s_load_b128 s[16:19], s[0:1], 0x20
v_cmp_eq_u32_e32 vcc_lo, 0, v2
v_cmp_eq_u32_e64 s0, 0x208, v2
v_cmp_eq_u32_e64 s1, 0, v0
v_cmp_eq_u32_e64 s2, 0x1ab, v0
s_delay_alu instid0(VALU_DEP_3)
s_or_b32 s0, vcc_lo, s0
s_delay_alu instid0(VALU_DEP_2) | instid1(SALU_CYCLE_1)
s_or_b32 s0, s1, s0
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
s_or_b32 s0, s2, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s1, s0
s_xor_b32 s5, exec_lo, s1
s_cbranch_execz .LBB0_45
v_cmp_eq_u32_e32 vcc_lo, 0, v2
s_mov_b32 s0, -1
s_and_saveexec_b32 s2, vcc_lo
s_cbranch_execz .LBB0_8
s_mov_b32 s1, -1
s_mov_b32 s3, exec_lo
v_cmpx_gt_i32_e32 0x1ab, v0
s_cbranch_execz .LBB0_7
v_cmp_eq_u32_e64 s0, 0, v0
s_mov_b32 s4, exec_lo
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB0_6
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_co_u32 v1, s1, v3, 0x6b0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s1, 0, v4, s1
v_add_co_u32 v14, s1, v3, 4
v_add_co_ci_u32_e64 v15, s1, 0, v4, s1
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, s1, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s1, s9, v13, s1
v_add_co_u32 v16, s1, v3, -4
v_add_co_ci_u32_e64 v17, s1, -1, v4, s1
v_add_co_u32 v7, s1, s8, v14
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v8, s1, s9, v15, s1
v_add_co_u32 v9, s1, s8, v16
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s1, s9, v17, s1
s_clause 0x2
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[7:8], off
global_load_b32 v7, v[9:10], off
s_waitcnt vmcnt(0)
v_add3_u32 v5, v6, v5, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_hi_i32 v11, v5, 0x55555556
v_add_co_u32 v5, s1, s14, v3
v_add_co_ci_u32_e64 v6, s1, s15, v4, s1
v_add_co_u32 v7, s1, s10, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v8, s1, s11, v13, s1
v_lshrrev_b32_e32 v12, 31, v11
v_add_co_u32 v9, s1, s10, v14
v_add_co_ci_u32_e64 v10, s1, s11, v15, s1
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v18, v11, v12
v_add_co_u32 v11, s1, s10, v16
v_add_co_ci_u32_e64 v12, s1, s11, v17, s1
global_store_b32 v[5:6], v18, off
s_clause 0x2
global_load_b32 v5, v[7:8], off
global_load_b32 v6, v[9:10], off
global_load_b32 v7, v[11:12], off
s_waitcnt vmcnt(0)
v_add3_u32 v5, v6, v5, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_hi_i32 v11, v5, 0x55555556
v_add_co_u32 v5, s1, s16, v3
v_add_co_ci_u32_e64 v6, s1, s17, v4, s1
v_add_co_u32 v7, s1, s12, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v8, s1, s13, v13, s1
v_lshrrev_b32_e32 v1, 31, v11
v_add_co_u32 v9, s1, s12, v14
v_add_co_ci_u32_e64 v10, s1, s13, v15, s1
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v11, v1
v_add_co_u32 v11, s1, s12, v16
v_add_co_ci_u32_e64 v12, s1, s13, v17, s1
global_store_b32 v[5:6], v1, off
s_clause 0x2
global_load_b32 v1, v[7:8], off
global_load_b32 v5, v[9:10], off
global_load_b32 v6, v[11:12], off
v_add_co_u32 v3, s1, s18, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s1, s19, v4, s1
s_waitcnt vmcnt(0)
v_add3_u32 v1, v5, v1, v6
v_mul_hi_i32 v1, v1, 0x55555556
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v5, 31, v1
v_add_nc_u32_e32 v1, v1, v5
global_store_b32 v[3:4], v1, off
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s1, s0, exec_lo
.LBB0_7:
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s0, s1, exec_lo
.LBB0_8:
s_or_b32 exec_lo, exec_lo, s2
s_and_saveexec_b32 s6, s0
s_cbranch_execz .LBB0_44
v_cmp_eq_u32_e64 s0, 0x208, v2
s_mov_b32 s1, -1
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s3, s0
s_cbranch_execz .LBB0_15
s_mov_b32 s2, -1
s_mov_b32 s4, exec_lo
v_cmpx_gt_i32_e32 0x1ab, v0
s_cbranch_execz .LBB0_14
v_cmp_eq_u32_e64 s1, 0, v0
s_mov_b32 s7, exec_lo
v_cmpx_ne_u32_e32 0, v0
s_cbranch_execz .LBB0_13
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[0:1]
v_add_co_u32 v1, s2, v3, 0xd8ed0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v11, s2, 0, v4, s2
v_add_co_u32 v12, s2, v3, 0xd9584
v_add_co_ci_u32_e64 v13, s2, 0, v4, s2
s_waitcnt lgkmcnt(0)
v_add_co_u32 v5, s2, s8, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s2, s9, v11, s2
v_add_co_u32 v14, s2, v3, 0xd957c
v_add_co_ci_u32_e64 v15, s2, 0, v4, s2
v_add_co_u32 v7, s2, s8, v12
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_ci_u32_e64 v8, s2, s9, v13, s2
v_add_co_u32 v9, s2, s8, v14
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s2, s9, v15, s2
s_clause 0x2
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[7:8], off
global_load_b32 v7, v[9:10], off
v_add_co_u32 v16, s2, v3, 0xd9580
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e64 v17, s2, 0, v4, s2
v_add_co_u32 v3, s2, s14, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s2, s15, v17, s2
s_waitcnt vmcnt(0)
v_add3_u32 v5, v6, v5, v7
v_mul_hi_i32 v9, v5, 0x55555556
v_add_co_u32 v5, s2, s10, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s2, s11, v11, s2
v_add_co_u32 v7, s2, s10, v12
v_add_co_ci_u32_e64 v8, s2, s11, v13, s2
v_lshrrev_b32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v18, v9, v10
v_add_co_u32 v9, s2, s10, v14
v_add_co_ci_u32_e64 v10, s2, s11, v15, s2
global_store_b32 v[3:4], v18, off
s_clause 0x2
global_load_b32 v3, v[5:6], off
global_load_b32 v4, v[7:8], off
global_load_b32 v5, v[9:10], off
s_waitcnt vmcnt(0)
v_add3_u32 v3, v4, v3, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_hi_i32 v9, v3, 0x55555556
v_add_co_u32 v3, s2, s16, v16
v_add_co_ci_u32_e64 v4, s2, s17, v17, s2
v_add_co_u32 v5, s2, s12, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s2, s13, v11, s2
v_lshrrev_b32_e32 v1, 31, v9
v_add_co_u32 v7, s2, s12, v12
v_add_co_ci_u32_e64 v8, s2, s13, v13, s2
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v9, v1
v_add_co_u32 v9, s2, s12, v14
v_add_co_ci_u32_e64 v10, s2, s13, v15, s2
global_store_b32 v[3:4], v1, off
s_clause 0x2
global_load_b32 v1, v[5:6], off
global_load_b32 v3, v[7:8], off
global_load_b32 v4, v[9:10], off
s_waitcnt vmcnt(0)
v_add3_u32 v1, v3, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x55555556
v_lshrrev_b32_e32 v3, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v3
v_add_co_u32 v3, s2, s18, v16
v_add_co_ci_u32_e64 v4, s2, s19, v17, s2
global_store_b32 v[3:4], v1, off
.LBB0_13:
s_or_b32 exec_lo, exec_lo, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s2, s1, exec_lo
.LBB0_14:
s_or_b32 exec_lo, exec_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s1, s2, exec_lo
.LBB0_15:
s_or_b32 exec_lo, exec_lo, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s1
s_cbranch_execz .LBB0_44
v_cmp_eq_u32_e64 s1, 0, v0
s_mov_b32 s2, -1
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s4, s1
s_cbranch_execz .LBB0_22
s_mov_b32 s3, -1
s_mov_b32 s7, exec_lo
v_cmpx_gt_i32_e32 0x208, v2
s_cbranch_execz .LBB0_21
v_cmp_eq_u32_e64 s2, 0, v2
s_mov_b32 s20, exec_lo
v_cmpx_ne_u32_e32 0, v2
s_cbranch_execz .LBB0_20
v_mul_lo_u32 v3, v2, 0x1ac
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v4, 0x1ac, v3
v_add_nc_u32_e32 v6, 0xfffffe54, v3
v_or_b32_e32 v8, 1, v3
v_ashrrev_i32_e32 v5, 31, v4
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_ashrrev_i32_e32 v7, 31, v6
v_ashrrev_i32_e32 v9, 31, v8
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[10:11], 2, v[4:5]
v_lshlrev_b64 v[5:6], 2, v[6:7]
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[7:8], 2, v[8:9]
v_ashrrev_i32_e32 v4, 31, v3
s_waitcnt lgkmcnt(0)
v_add_co_u32 v12, s3, s8, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v13, s3, s9, v11, s3
v_add_co_u32 v14, s3, s8, v5
v_add_co_ci_u32_e64 v15, s3, s9, v6, s3
v_add_co_u32 v16, s3, s8, v7
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v17, s3, s9, v8, s3
s_clause 0x2
global_load_b32 v1, v[12:13], off
global_load_b32 v9, v[14:15], off
global_load_b32 v12, v[16:17], off
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_waitcnt vmcnt(0)
v_add3_u32 v1, v9, v1, v12
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v12, s3, s14, v3
v_add_co_ci_u32_e64 v13, s3, s15, v4, s3
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x55555556
v_add_co_u32 v14, s3, s10, v10
v_add_co_ci_u32_e64 v15, s3, s11, v11, s3
v_add_co_u32 v16, s3, s10, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v17, s3, s11, v6, s3
v_lshrrev_b32_e32 v9, 31, v1
v_add_co_u32 v18, s3, s10, v7
v_add_co_ci_u32_e64 v19, s3, s11, v8, s3
s_delay_alu instid0(VALU_DEP_3)
v_add_nc_u32_e32 v1, v1, v9
global_store_b32 v[12:13], v1, off
s_clause 0x2
global_load_b32 v1, v[14:15], off
global_load_b32 v9, v[16:17], off
global_load_b32 v12, v[18:19], off
s_waitcnt vmcnt(0)
v_add3_u32 v1, v9, v1, v12
v_add_co_u32 v12, s3, s16, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v13, s3, s17, v4, s3
v_mul_hi_i32 v1, v1, 0x55555556
v_add_co_u32 v9, s3, s12, v10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s3, s13, v11, s3
v_add_co_u32 v5, s3, s12, v5
v_add_co_ci_u32_e64 v6, s3, s13, v6, s3
v_lshrrev_b32_e32 v11, 31, v1
v_add_co_u32 v7, s3, s12, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_ci_u32_e64 v8, s3, s13, v8, s3
v_add_nc_u32_e32 v1, v1, v11
v_add_co_u32 v3, s3, s18, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s3, s19, v4, s3
global_store_b32 v[12:13], v1, off
s_clause 0x2
global_load_b32 v1, v[9:10], off
global_load_b32 v5, v[5:6], off
global_load_b32 v6, v[7:8], off
s_waitcnt vmcnt(0)
v_add3_u32 v1, v5, v1, v6
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x55555556
v_lshrrev_b32_e32 v5, 31, v1
s_delay_alu instid0(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v5
global_store_b32 v[3:4], v1, off
.LBB0_20:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s3, s2, exec_lo
.LBB0_21:
s_or_b32 exec_lo, exec_lo, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s2, s3, exec_lo
.LBB0_22:
s_or_b32 exec_lo, exec_lo, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s2
s_cbranch_execz .LBB0_44
v_cmp_eq_u32_e64 s2, 0x1ab, v0
s_mov_b32 s3, -1
s_delay_alu instid0(VALU_DEP_1)
s_and_saveexec_b32 s7, s2
s_cbranch_execz .LBB0_29
s_mov_b32 s4, -1
s_mov_b32 s20, exec_lo
v_cmpx_gt_i32_e32 0x208, v2
s_cbranch_execz .LBB0_28
v_cmp_eq_u32_e64 s3, 0, v2
s_mov_b32 s21, exec_lo
v_cmpx_ne_u32_e32 0, v2
s_cbranch_execz .LBB0_27
v_mul_lo_u32 v3, v2, 0x1ac
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v5, 0x357, v3
v_ashrrev_i32_e32 v4, 31, v3
v_add_nc_u32_e32 v7, -1, v3
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_ashrrev_i32_e32 v8, 31, v7
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[5:6], 2, v[5:6]
v_lshlrev_b64 v[7:8], 2, v[7:8]
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v1, s4, v3, 0x6a8
v_add_co_ci_u32_e64 v15, s4, 0, v4, s4
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v9, s4, s8, v5
v_add_co_ci_u32_e64 v10, s4, s9, v6, s4
v_add_co_u32 v11, s4, s8, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v12, s4, s9, v8, s4
v_add_co_u32 v13, s4, s8, v1
v_add_co_ci_u32_e64 v14, s4, s9, v15, s4
s_clause 0x2
global_load_b32 v9, v[9:10], off
global_load_b32 v10, v[11:12], off
global_load_b32 v11, v[13:14], off
v_add_co_u32 v16, s4, v3, 0x6ac
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e64 v17, s4, 0, v4, s4
v_add_co_u32 v3, s4, s14, v16
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s4, s15, v17, s4
s_waitcnt vmcnt(0)
v_add3_u32 v9, v10, v9, v11
v_mul_hi_i32 v13, v9, 0x55555556
v_add_co_u32 v9, s4, s10, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v10, s4, s11, v6, s4
v_add_co_u32 v11, s4, s10, v7
v_add_co_ci_u32_e64 v12, s4, s11, v8, s4
v_lshrrev_b32_e32 v14, 31, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v18, v13, v14
v_add_co_u32 v13, s4, s10, v1
v_add_co_ci_u32_e64 v14, s4, s11, v15, s4
global_store_b32 v[3:4], v18, off
s_clause 0x2
global_load_b32 v3, v[9:10], off
global_load_b32 v4, v[11:12], off
global_load_b32 v9, v[13:14], off
s_waitcnt vmcnt(0)
v_add3_u32 v3, v4, v3, v9
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mul_hi_i32 v9, v3, 0x55555556
v_add_co_u32 v3, s4, s16, v16
v_add_co_ci_u32_e64 v4, s4, s17, v17, s4
v_add_co_u32 v5, s4, s12, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v6, s4, s13, v6, s4
v_lshrrev_b32_e32 v10, 31, v9
v_add_co_u32 v7, s4, s12, v7
v_add_co_ci_u32_e64 v8, s4, s13, v8, s4
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v11, v9, v10
v_add_co_u32 v9, s4, s12, v1
v_add_co_ci_u32_e64 v10, s4, s13, v15, s4
global_store_b32 v[3:4], v11, off
s_clause 0x2
global_load_b32 v1, v[5:6], off
global_load_b32 v3, v[7:8], off
global_load_b32 v4, v[9:10], off
s_waitcnt vmcnt(0)
v_add3_u32 v1, v3, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_hi_i32 v1, v1, 0x55555556
v_lshrrev_b32_e32 v3, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v3
v_add_co_u32 v3, s4, s18, v16
v_add_co_ci_u32_e64 v4, s4, s19, v17, s4
global_store_b32 v[3:4], v1, off
.LBB0_27:
s_or_b32 exec_lo, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s4, s3, exec_lo
.LBB0_28:
s_or_b32 exec_lo, exec_lo, s20
s_delay_alu instid0(SALU_CYCLE_1)
s_or_not1_b32 s3, s4, exec_lo
.LBB0_29:
s_or_b32 exec_lo, exec_lo, s7
s_delay_alu instid0(SALU_CYCLE_1)
s_and_b32 exec_lo, exec_lo, s3
s_cbranch_execz .LBB0_44
v_or_b32_e32 v0, v2, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cmp_ne_u32_e64 s3, 0, v0
s_and_saveexec_b32 s4, s3
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s3, exec_lo, s4
s_cbranch_execz .LBB0_42
s_and_b32 s4, s2, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s4, s4, -1
s_and_saveexec_b32 s7, s4
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s4, exec_lo, s7
s_cbranch_execz .LBB0_39
s_and_b32 s1, s1, s0
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_xor_b32 s1, s1, -1
s_and_saveexec_b32 s7, s1
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s1, exec_lo, s7
s_cbranch_execz .LBB0_36
s_and_b32 s2, s2, s0
s_delay_alu instid0(SALU_CYCLE_1)
s_and_saveexec_b32 s0, s2
s_cbranch_execz .LBB0_35
v_mov_b32_e32 v0, 0xd9000
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[8:9] offset:3112
global_load_b32 v2, v0, s[8:9] offset:1404
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[14:15] offset:3116
s_clause 0x1
global_load_b32 v1, v0, s[10:11] offset:3112
global_load_b32 v2, v0, s[10:11] offset:1404
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[16:17] offset:3116
s_clause 0x1
global_load_b32 v1, v0, s[12:13] offset:3112
global_load_b32 v2, v0, s[12:13] offset:1404
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[18:19] offset:3116
.LBB0_35:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_36:
s_and_not1_saveexec_b32 s0, s1
s_cbranch_execz .LBB0_38
v_mov_b32_e32 v0, 0xd9000
v_mov_b32_e32 v1, 0xd8000
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v2, v0, s[8:9] offset:1412
global_load_b32 v3, v1, s[8:9] offset:3792
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v3, 31, v2
v_add_nc_u32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 1, v2
global_store_b32 v0, v2, s[14:15] offset:1408
s_clause 0x1
global_load_b32 v2, v0, s[10:11] offset:1412
global_load_b32 v3, v1, s[10:11] offset:3792
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v3, 31, v2
v_add_nc_u32_e32 v2, v2, v3
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v2, 1, v2
global_store_b32 v0, v2, s[16:17] offset:1408
s_clause 0x1
global_load_b32 v2, v0, s[12:13] offset:1412
global_load_b32 v1, v1, s[12:13] offset:3792
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[18:19] offset:1408
.LBB0_38:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_39:
s_and_not1_saveexec_b32 s0, s4
s_cbranch_execz .LBB0_41
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[8:9] offset:1704
global_load_b32 v2, v0, s[8:9] offset:3420
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[14:15] offset:1708
s_clause 0x1
global_load_b32 v1, v0, s[10:11] offset:1704
global_load_b32 v2, v0, s[10:11] offset:3420
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[16:17] offset:1708
s_clause 0x1
global_load_b32 v1, v0, s[12:13] offset:1704
global_load_b32 v2, v0, s[12:13] offset:3420
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[18:19] offset:1708
.LBB0_41:
s_or_b32 exec_lo, exec_lo, s0
.LBB0_42:
s_and_not1_saveexec_b32 s0, s3
s_cbranch_execz .LBB0_44
v_mov_b32_e32 v0, 0
s_waitcnt lgkmcnt(0)
s_clause 0x1
global_load_b32 v1, v0, s[8:9] offset:4
global_load_b32 v2, v0, s[8:9] offset:1712
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[14:15]
s_clause 0x1
global_load_b32 v1, v0, s[10:11] offset:4
global_load_b32 v2, v0, s[10:11] offset:1712
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[16:17]
s_clause 0x1
global_load_b32 v1, v0, s[12:13] offset:4
global_load_b32 v2, v0, s[12:13] offset:1712
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v1, v2, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v2, 31, v1
v_add_nc_u32_e32 v1, v1, v2
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 1, v1
global_store_b32 v0, v1, s[18:19]
.LBB0_44:
s_or_b32 exec_lo, exec_lo, s6
.LBB0_45:
s_and_not1_saveexec_b32 s0, s5
s_cbranch_execz .LBB0_47
v_mad_u64_u32 v[3:4], null, v2, 0x1ac, v[0:1]
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add_nc_u32_e32 v0, 0x1ac, v3
v_ashrrev_i32_e32 v4, 31, v3
v_add_nc_u32_e32 v5, 0xfffffe54, v3
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_lshlrev_b64 v[2:3], 2, v[3:4]
v_ashrrev_i32_e32 v6, 31, v5
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[0:1], 2, v[0:1]
v_lshlrev_b64 v[4:5], 2, v[5:6]
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_add_co_u32 v16, vcc_lo, v2, 4
v_add_co_ci_u32_e32 v17, vcc_lo, 0, v3, vcc_lo
s_waitcnt lgkmcnt(0)
v_add_co_u32 v6, vcc_lo, s8, v0
v_add_co_ci_u32_e32 v7, vcc_lo, s9, v1, vcc_lo
v_add_co_u32 v8, vcc_lo, s8, v4
v_add_co_ci_u32_e32 v9, vcc_lo, s9, v5, vcc_lo
v_add_co_u32 v18, vcc_lo, v2, -4
v_add_co_ci_u32_e32 v19, vcc_lo, -1, v3, vcc_lo
v_add_co_u32 v10, vcc_lo, s8, v16
v_add_co_ci_u32_e32 v11, vcc_lo, s9, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v12, vcc_lo, s8, v18
v_add_co_ci_u32_e32 v13, vcc_lo, s9, v19, vcc_lo
s_clause 0x3
global_load_b32 v6, v[6:7], off
global_load_b32 v7, v[8:9], off
global_load_b32 v8, v[10:11], off
global_load_b32 v9, v[12:13], off
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v6, v7, v6
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add3_u32 v12, v6, v8, v9
v_add_co_u32 v6, vcc_lo, s14, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s15, v3, vcc_lo
v_ashrrev_i32_e32 v10, 31, v12
v_add_co_u32 v8, vcc_lo, s10, v0
v_add_co_ci_u32_e32 v9, vcc_lo, s11, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshrrev_b32_e32 v13, 30, v10
v_add_co_u32 v10, vcc_lo, s10, v4
v_add_co_ci_u32_e32 v11, vcc_lo, s11, v5, vcc_lo
v_add_nc_u32_e32 v14, v12, v13
v_add_co_u32 v12, vcc_lo, s10, v16
v_add_co_ci_u32_e32 v13, vcc_lo, s11, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_ashrrev_i32_e32 v20, 2, v14
v_add_co_u32 v14, vcc_lo, s10, v18
v_add_co_ci_u32_e32 v15, vcc_lo, s11, v19, vcc_lo
global_store_b32 v[6:7], v20, off
s_clause 0x3
global_load_b32 v6, v[8:9], off
global_load_b32 v7, v[10:11], off
global_load_b32 v8, v[12:13], off
global_load_b32 v9, v[14:15], off
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v6, v7, v6
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_add3_u32 v8, v6, v8, v9
v_add_co_u32 v6, vcc_lo, s16, v2
v_add_co_ci_u32_e32 v7, vcc_lo, s17, v3, vcc_lo
v_ashrrev_i32_e32 v9, 31, v8
v_add_co_u32 v0, vcc_lo, s12, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s13, v1, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_lshrrev_b32_e32 v9, 30, v9
v_add_co_u32 v4, vcc_lo, s12, v4
v_add_co_ci_u32_e32 v5, vcc_lo, s13, v5, vcc_lo
v_add_nc_u32_e32 v10, v8, v9
v_add_co_u32 v8, vcc_lo, s12, v16
v_add_co_ci_u32_e32 v9, vcc_lo, s13, v17, vcc_lo
s_delay_alu instid0(VALU_DEP_3)
v_ashrrev_i32_e32 v12, 2, v10
v_add_co_u32 v10, vcc_lo, s12, v18
v_add_co_ci_u32_e32 v11, vcc_lo, s13, v19, vcc_lo
global_store_b32 v[6:7], v12, off
s_clause 0x3
global_load_b32 v0, v[0:1], off
global_load_b32 v1, v[4:5], off
global_load_b32 v4, v[8:9], off
global_load_b32 v5, v[10:11], off
s_waitcnt vmcnt(2)
v_add_nc_u32_e32 v0, v1, v0
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add3_u32 v0, v0, v4, v5
v_ashrrev_i32_e32 v1, 31, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshrrev_b32_e32 v1, 30, v1
v_add_nc_u32_e32 v0, v0, v1
s_delay_alu instid0(VALU_DEP_1)
v_ashrrev_i32_e32 v4, 2, v0
v_add_co_u32 v0, vcc_lo, s18, v2
v_add_co_ci_u32_e32 v1, vcc_lo, s19, v3, vcc_lo
global_store_b32 v[0:1], v4, off
.LBB0_47:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z4blurPiS_S_S_S_S_
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 304
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 21
.amdhsa_next_free_sgpr 22
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z4blurPiS_S_S_S_S_, .Lfunc_end0-_Z4blurPiS_S_S_S_S_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 40
.size: 8
.value_kind: global_buffer
- .offset: 48
.size: 4
.value_kind: hidden_block_count_x
- .offset: 52
.size: 4
.value_kind: hidden_block_count_y
- .offset: 56
.size: 4
.value_kind: hidden_block_count_z
- .offset: 60
.size: 2
.value_kind: hidden_group_size_x
- .offset: 62
.size: 2
.value_kind: hidden_group_size_y
- .offset: 64
.size: 2
.value_kind: hidden_group_size_z
- .offset: 66
.size: 2
.value_kind: hidden_remainder_x
- .offset: 68
.size: 2
.value_kind: hidden_remainder_y
- .offset: 70
.size: 2
.value_kind: hidden_remainder_z
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 104
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 112
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 304
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z4blurPiS_S_S_S_S_
.private_segment_fixed_size: 0
.sgpr_count: 24
.sgpr_spill_count: 0
.symbol: _Z4blurPiS_S_S_S_S_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 21
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#define IMAGE_HEIGHT 521
#define IMAGE_WIDTH 428
__global__
void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew)
{
// Get the X and y coords of the pixel for this thread
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// Stop the thread if it is not part of the image
if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) {
return;
}
// Apply the box blur
if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4;
}
else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3;
}
else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3;
}
else if (y==0 &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==0 &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==0){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){
d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2;
d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2;
}
}
int main (int argc, const char * argv[]) {
struct timeval tim;
gettimeofday(&tim, NULL);
static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12;
char str[maxlen], lines[5][maxlen];
FILE *fp, *fout;
int nlines = 0;
unsigned int h1, h2, h3;
char *sptr;
int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize];
int row = 0, col = 0, nblurs, lineno=0, k;
fp = fopen("David.ps", "r");
while(! feof(fp))
{
fscanf(fp, "\n%[^\n]", str);
if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);}
else{
for (sptr=&str[0];*sptr != '\0';sptr+=6){
sscanf(sptr,"%2x",&h1);
sscanf(sptr+2,"%2x",&h2);
sscanf(sptr+4,"%2x",&h3);
if (col==colsize){
col = 0;
row++;
}
if (row < rowsize) {
R[row][col] = h1;
G[row][col] = h2;
B[row][col] = h3;
}
col++;
}
}
}
fclose(fp);
// Number of blur iterations
// nblurs = atoi(argv[1]); // Get iterations from argument
nblurs = 20;
// Start the timer
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// The size of the 1D arrays for the GPU
int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT;
// Initialise the arrays to hold the flatened image
int *h_R, *h_G, *h_B;
h_R = (int *)malloc(size);
h_G = (int *)malloc(size);
h_B = (int *)malloc(size);
// Create pointers to GPU array locations
int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew;
// Define how many threads per block
int numBlocksY = ceil(IMAGE_HEIGHT/16.0);
int numBlocksX = ceil(IMAGE_WIDTH/16.0);
dim3 dimBlock(numBlocksX,numBlocksY);
// Define how many blocks per grid
dim3 dimGrid(16, 16);
// Allocate GPU mem for the 1D arrays
hipMalloc((void **)&d_R, size);
hipMalloc((void **)&d_G, size);
hipMalloc((void **)&d_B, size);
hipMalloc((void **)&d_Rnew, size);
hipMalloc((void **)&d_Bnew, size);
hipMalloc((void **)&d_Gnew, size);
// Flatten the 2D arrays to make them easier to handle with CUDA
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
h_R[IMAGE_WIDTH*row+col] = R[row][col];
h_G[IMAGE_WIDTH*row+col] = G[row][col];
h_B[IMAGE_WIDTH*row+col] = B[row][col];
}
}
// Copy these arrays to the GPU
hipMemcpy(d_R, h_R, size, hipMemcpyHostToDevice);
hipMemcpy(d_G, h_G, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// Start the blur loop
for(k=0;k<nblurs;k++){
// Punch it Chewie
blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew);
// Copy the modified values to the original locations ready for a new iteration
hipMemcpy(d_R, d_Rnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_G, d_Gnew, size, hipMemcpyDeviceToDevice);
hipMemcpy(d_B, d_Bnew, size, hipMemcpyDeviceToDevice);
}
// Copy the data off the GPU
hipMemcpy(h_R, d_Rnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_G, d_Gnew, size, hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_Bnew, size, hipMemcpyDeviceToHost);
// Convert the 1D arrays back into 2D
for (int row=0;row<IMAGE_HEIGHT;row++){
for (int col=0;col<IMAGE_WIDTH;col++){
R[row][col] = h_R[IMAGE_WIDTH*row+col];
G[row][col] = h_G[IMAGE_WIDTH*row+col];
B[row][col] = h_B[IMAGE_WIDTH*row+col];
}
}
// Free up the allocated memory
hipFree(d_R); hipFree(d_G); hipFree(d_B);
hipFree(d_Rnew); hipFree(d_Gnew); hipFree(d_Bnew);
free(h_R); free(h_G); free(h_B);
fout= fopen("DavidBlur.ps", "w");
for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]);
fprintf(fout,"\n");
for(row=0;row<rowsize;row++){
for (col=0;col<colsize;col++){
fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]);
lineno++;
if (lineno==linelen){
fprintf(fout,"\n");
lineno = 0;
}
}
}
fclose(fout);
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf seconds elapsed\n", t2-t1);
return 0;
} | .text
.file "blur.hip"
.globl _Z19__device_stub__blurPiS_S_S_S_S_ # -- Begin function _Z19__device_stub__blurPiS_S_S_S_S_
.p2align 4, 0x90
.type _Z19__device_stub__blurPiS_S_S_S_S_,@function
_Z19__device_stub__blurPiS_S_S_S_S_: # @_Z19__device_stub__blurPiS_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4blurPiS_S_S_S_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z19__device_stub__blurPiS_S_S_S_S_, .Lfunc_end0-_Z19__device_stub__blurPiS_S_S_S_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $2677352, %rsp # imm = 0x28DA68
.cfi_def_cfa_offset 2677408
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
leaq 80(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
movq %rax, %r15
movq %rax, %rdi
callq feof
testl %eax, %eax
je .LBB1_15
.LBB1_1: # %._crit_edge
movq %r15, %rdi
callq fclose
movq 80(%rsp), %rax
movq %rax, 56(%rsp) # 8-byte Spill
movq 88(%rsp), %rax
movq %rax, 120(%rsp) # 8-byte Spill
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %rbx
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %r14
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %r15
leaq 48(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 40(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 32(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 24(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 8(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 16(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 1785392(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %rbx, %rdi
callq memcpy@PLT
leaq 893440(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %r14, %rdi
callq memcpy@PLT
leaq 1488(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %r15, %rdi
callq memcpy@PLT
movq 48(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %rbx, 112(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %r14, 104(%rsp) # 8-byte Spill
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %r15, 96(%rsp) # 8-byte Spill
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movl $20, %ebx
movabsq $68719476752, %r12 # imm = 0x1000000010
movabsq $141733920795, %r13 # imm = 0x210000001B
leaq 136(%rsp), %r14
leaq 128(%rsp), %r15
leaq 224(%rsp), %rbp
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_2: # in Loop: Header=BB1_3 Depth=1
movq 48(%rsp), %rdi
movq 24(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movq 16(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movq 8(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
decl %ebx
je .LBB1_5
.LBB1_3: # =>This Inner Loop Header: Depth=1
movq %r12, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.4: # in Loop: Header=BB1_3 Depth=1
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq 8(%rsp), %r8
movq %rax, 216(%rsp)
movq %rcx, 208(%rsp)
movq %rdx, 200(%rsp)
movq %rsi, 192(%rsp)
movq %rdi, 184(%rsp)
movq %r8, 176(%rsp)
leaq 216(%rsp), %rax
movq %rax, 224(%rsp)
leaq 208(%rsp), %rax
movq %rax, 232(%rsp)
leaq 200(%rsp), %rax
movq %rax, 240(%rsp)
leaq 192(%rsp), %rax
movq %rax, 248(%rsp)
leaq 184(%rsp), %rax
movq %rax, 256(%rsp)
leaq 176(%rsp), %rax
movq %rax, 264(%rsp)
leaq 160(%rsp), %rdi
leaq 144(%rsp), %rsi
movq %r14, %rdx
movq %r15, %rcx
callq __hipPopCallConfiguration
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
movq 144(%rsp), %rcx
movl 152(%rsp), %r8d
movl $_Z4blurPiS_S_S_S_S_, %edi
movq %rbp, %r9
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_2
.LBB1_5:
movq 24(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 112(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 104(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 96(%rsp), %r15 # 8-byte Reload
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 1785392(%rsp), %r12
movl $891952, %edx # imm = 0xD9C30
movq %r12, %rdi
movq %rbx, %rsi
callq memcpy@PLT
leaq 893440(%rsp), %r13
movl $891952, %edx # imm = 0xD9C30
movq %r13, %rdi
movq %r14, %rsi
callq memcpy@PLT
leaq 1488(%rsp), %rbp
movl $891952, %edx # imm = 0xD9C30
movq %rbp, %rdi
movq %r15, %rsi
callq memcpy@PLT
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movl $.L.str.4, %edi
movl $.L.str.5, %esi
callq fopen
movq %rax, %rbx
movq (%rsp), %rax # 8-byte Reload
testl %eax, %eax
jle .LBB1_8
# %bb.6: # %.lr.ph164.preheader
movl %eax, %r15d
leaq 480(%rsp), %r14
.p2align 4, 0x90
.LBB1_7: # %.lr.ph164
# =>This Inner Loop Header: Depth=1
movl $.L.str.6, %esi
movq %rbx, %rdi
movq %r14, %rdx
xorl %eax, %eax
callq fprintf
addq $200, %r14
decq %r15
jne .LBB1_7
.LBB1_8: # %._crit_edge165
movl $10, %edi
movq %rbx, %rsi
callq fputc@PLT
xorl %eax, %eax
xorl %r15d, %r15d
jmp .LBB1_10
.p2align 4, 0x90
.LBB1_9: # in Loop: Header=BB1_10 Depth=1
movq (%rsp), %rax # 8-byte Reload
incq %rax
addq $1712, %r12 # imm = 0x6B0
addq $1712, %r13 # imm = 0x6B0
addq $1712, %rbp # imm = 0x6B0
cmpq $521, %rax # imm = 0x209
je .LBB1_14
.LBB1_10: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_12 Depth 2
movq %rax, (%rsp) # 8-byte Spill
xorl %r14d, %r14d
jmp .LBB1_12
.p2align 4, 0x90
.LBB1_11: # in Loop: Header=BB1_12 Depth=2
incq %r14
cmpq $428, %r14 # imm = 0x1AC
je .LBB1_9
.LBB1_12: # Parent Loop BB1_10 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r12,%r14,4), %edx
movl (%r13,%r14,4), %ecx
movl (%rbp,%r14,4), %r8d
movl $.L.str.8, %esi
movq %rbx, %rdi
xorl %eax, %eax
callq fprintf
incl %r15d
cmpl $12, %r15d
jne .LBB1_11
# %bb.13: # in Loop: Header=BB1_12 Depth=2
movl $10, %edi
movq %rbx, %rsi
callq fputc@PLT
xorl %r15d, %r15d
jmp .LBB1_11
.LBB1_14:
cvtsi2sdq 56(%rsp), %xmm0 # 8-byte Folded Reload
cvtsi2sdq 120(%rsp), %xmm1 # 8-byte Folded Reload
divsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movq %rbx, %rdi
callq fclose
leaq 80(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 80(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 88(%rsp), %xmm0
divsd .LCPI1_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd (%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.9, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $2677352, %rsp # imm = 0x28DA68
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_15: # %.lr.ph155.preheader
.cfi_def_cfa_offset 2677408
leaq 272(%rsp), %rbp
leaq 68(%rsp), %r13
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %r14d, %r14d
xorl %ebx, %ebx
movq %r15, 56(%rsp) # 8-byte Spill
jmp .LBB1_19
.p2align 4, 0x90
.LBB1_25: # in Loop: Header=BB1_19 Depth=1
movq 56(%rsp), %r15 # 8-byte Reload
.LBB1_18: # %.loopexit
# in Loop: Header=BB1_19 Depth=1
movq %r15, %rdi
callq feof
testl %eax, %eax
leaq 272(%rsp), %rbp
jne .LBB1_1
.LBB1_19: # %.lr.ph155
# =>This Loop Header: Depth=1
# Child Loop BB1_23 Depth 2
movl $.L.str.2, %esi
movq %r15, %r12
movq %r15, %rdi
movq %rbp, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
movq (%rsp), %rcx # 8-byte Reload
cmpl $5, %ecx
jge .LBB1_20
# %bb.16: # in Loop: Header=BB1_19 Depth=1
movslq %ecx, %rax
incl %ecx
movq %rcx, (%rsp) # 8-byte Spill
imulq $200, %rax, %rax
leaq (%rsp,%rax), %rdi
addq $480, %rdi # imm = 0x1E0
movq %rbp, %rsi
callq strcpy
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_20: # %.preheader145
# in Loop: Header=BB1_19 Depth=1
cmpb $0, 272(%rsp)
je .LBB1_17
# %bb.21: # %.lr.ph.preheader
# in Loop: Header=BB1_19 Depth=1
leaq 272(%rsp), %rbp
leaq 72(%rsp), %r12
jmp .LBB1_23
.p2align 4, 0x90
.LBB1_22: # in Loop: Header=BB1_23 Depth=2
incl %ebx
cmpb $0, 2(%rbp)
leaq 2(%rbp), %rbp
je .LBB1_25
.LBB1_23: # %.lr.ph
# Parent Loop BB1_19 Depth=1
# => This Inner Loop Header: Depth=2
movl %r14d, %r15d
movl $.L.str.3, %esi
movq %rbp, %rdi
leaq 76(%rsp), %rdx
xorl %eax, %eax
callq __isoc23_sscanf
leaq 2(%rbp), %rdi
movl $.L.str.3, %esi
movq %r12, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
addq $4, %rbp
movl $.L.str.3, %esi
movq %rbp, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
xorl %r14d, %r14d
cmpl $428, %ebx # imm = 0x1AC
sete %r14b
movl $0, %eax
cmovel %eax, %ebx
addl %r15d, %r14d
cmpl $520, %r14d # imm = 0x208
jg .LBB1_22
# %bb.24: # in Loop: Header=BB1_23 Depth=2
movl 76(%rsp), %eax
movslq %r14d, %rcx
movslq %ebx, %rdx
imulq $1712, %rcx, %rcx # imm = 0x6B0
leaq (%rsp,%rcx), %rsi
addq $1785392, %rsi # imm = 0x1B3E30
movl %eax, (%rsi,%rdx,4)
movl 72(%rsp), %eax
leaq 893440(%rsp,%rcx), %rsi
movl %eax, (%rsi,%rdx,4)
movl 68(%rsp), %eax
addq %rsp, %rcx
addq $1488, %rcx # imm = 0x5D0
movl %eax, (%rcx,%rdx,4)
jmp .LBB1_22
.p2align 4, 0x90
.LBB1_17: # %.loopexit
# in Loop: Header=BB1_19 Depth=1
movq %r12, %r15
jmp .LBB1_18
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4blurPiS_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4blurPiS_S_S_S_S_,@object # @_Z4blurPiS_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z4blurPiS_S_S_S_S_
.p2align 3, 0x0
_Z4blurPiS_S_S_S_S_:
.quad _Z19__device_stub__blurPiS_S_S_S_S_
.size _Z4blurPiS_S_S_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "David.ps"
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n%[^\n]"
.size .L.str.2, 7
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%2x"
.size .L.str.3, 4
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "DavidBlur.ps"
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "w"
.size .L.str.5, 2
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\n%s"
.size .L.str.6, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%02x%02x%02x"
.size .L.str.8, 13
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%.6lf seconds elapsed\n"
.size .L.str.9, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4blurPiS_S_S_S_S_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__blurPiS_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4blurPiS_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0016cd1d_00000000-6_blur.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2060:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
.type _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_, @function
_Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_:
.LFB2082:
.cfi_startproc
endbr64
subq $184, %rsp
.cfi_def_cfa_offset 192
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %r9, (%rsp)
movq %fs:40, %rax
movq %rax, 168(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movq %rsp, %rax
movq %rax, 152(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 168(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $184, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 200
pushq 56(%rsp)
.cfi_def_cfa_offset 208
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z4blurPiS_S_S_S_S_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2082:
.size _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_, .-_Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
.globl _Z4blurPiS_S_S_S_S_
.type _Z4blurPiS_S_S_S_S_, @function
_Z4blurPiS_S_S_S_S_:
.LFB2083:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2083:
.size _Z4blurPiS_S_S_S_S_, .-_Z4blurPiS_S_S_S_S_
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "r"
.LC1:
.string "David.ps"
.LC2:
.string "\n%[^\n]"
.LC3:
.string "%2x"
.LC5:
.string "w"
.LC6:
.string "DavidBlur.ps"
.LC7:
.string "\n%s"
.LC8:
.string "\n"
.LC9:
.string "%02x%02x%02x"
.LC10:
.string "%.6lf seconds elapsed\n"
.text
.globl main
.type main, @function
main:
.LFB2057:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
leaq -2674688(%rsp), %r11
.cfi_def_cfa 11, 2674744
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $2536, %rsp
.cfi_def_cfa_offset 2677280
movq %fs:40, %rax
movq %rax, 2677208(%rsp)
xorl %eax, %eax
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
leaq .LC0(%rip), %rsi
leaq .LC1(%rip), %rdi
call fopen@PLT
movq %rax, %r14
movl $0, %r12d
movl $0, %r15d
movl $0, %ebx
leaq 2677008(%rsp), %rax
movq %rax, 8(%rsp)
leaq 144(%rsp), %rax
movq %rax, 24(%rsp)
leaq .LC3(%rip), %r13
.L12:
movq %r14, %rdi
call feof@PLT
movl %eax, 20(%rsp)
testl %eax, %eax
jne .L43
movq 8(%rsp), %rdx
leaq .LC2(%rip), %rsi
movq %r14, %rdi
movl $0, %eax
call __isoc23_fscanf@PLT
cmpl $4, %ebx
jg .L44
leal 1(%rbx), %ebp
movslq %ebx, %rbx
leaq (%rbx,%rbx,4), %rax
leaq (%rax,%rax,4), %rax
salq $3, %rax
movl $1000, %edx
cmpq %rdx, %rax
cmovnb %rax, %rdx
subq %rax, %rdx
movq 24(%rsp), %rsi
leaq (%rsi,%rax), %rdi
movq 8(%rsp), %rsi
call __strcpy_chk@PLT
movl %ebp, %ebx
jmp .L12
.L44:
leaq 2677008(%rsp), %rbp
cmpb $0, 2677008(%rsp)
jne .L14
jmp .L12
.L45:
addl $1, %r15d
movl 20(%rsp), %r12d
jmp .L16
.L17:
addl $1, %r12d
addq $6, %rbp
cmpb $0, 0(%rbp)
je .L12
.L14:
leaq 44(%rsp), %rdx
movq %r13, %rsi
movq %rbp, %rdi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 48(%rsp), %rdx
leaq 2(%rbp), %rdi
movq %r13, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
leaq 52(%rsp), %rdx
leaq 4(%rbp), %rdi
movq %r13, %rsi
movl $0, %eax
call __isoc23_sscanf@PLT
cmpl $428, %r12d
je .L45
.L16:
cmpl $520, %r15d
jg .L17
movslq %r12d, %rdx
movslq %r15d, %rax
imulq $428, %rax, %rax
addq %rdx, %rax
movl 44(%rsp), %edx
movl %edx, 1152(%rsp,%rax,4)
movl 48(%rsp), %edx
movl %edx, 893104(%rsp,%rax,4)
movl 52(%rsp), %edx
movl %edx, 1785056(%rsp,%rax,4)
jmp .L17
.L43:
movq %r14, %rdi
call fclose@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
addsd %xmm1, %xmm0
movsd %xmm0, 8(%rsp)
movl $891952, %edi
call malloc@PLT
movq %rax, %r13
movl $891952, %edi
call malloc@PLT
movq %rax, %r12
movl $891952, %edi
call malloc@PLT
movq %rax, %rbp
movl $27, 104(%rsp)
movl $33, 108(%rsp)
movl $1, 112(%rsp)
movl $16, 116(%rsp)
movl $16, 120(%rsp)
movl $1, 124(%rsp)
leaq 56(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 72(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 80(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 96(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
leaq 88(%rsp), %rdi
movl $891952, %esi
call cudaMalloc@PLT
movl $1712, %ecx
.L19:
leaq -1712(%rcx), %rax
.L20:
movl 1152(%rsp,%rax), %edx
movl %edx, 0(%r13,%rax)
movl 893104(%rsp,%rax), %edx
movl %edx, (%r12,%rax)
movl 1785056(%rsp,%rax), %edx
movl %edx, 0(%rbp,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L20
addq $1712, %rcx
cmpq $893664, %rcx
jne .L19
movl $1, %ecx
movl $891952, %edx
movq %r13, %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $891952, %edx
movq %r12, %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movl $891952, %edx
movq %rbp, %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
movl $20, %r14d
jmp .L23
.L22:
movl $3, %ecx
movl $891952, %edx
movq 80(%rsp), %rsi
movq 56(%rsp), %rdi
call cudaMemcpy@PLT
movl $3, %ecx
movl $891952, %edx
movq 88(%rsp), %rsi
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
movl $3, %ecx
movl $891952, %edx
movq 96(%rsp), %rsi
movq 72(%rsp), %rdi
call cudaMemcpy@PLT
subl $1, %r14d
je .L46
.L23:
movl 112(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 104(%rsp), %rdx
movq 116(%rsp), %rdi
movl 124(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L22
movq 96(%rsp), %r9
movq 88(%rsp), %r8
movq 80(%rsp), %rcx
movq 72(%rsp), %rdx
movq 64(%rsp), %rsi
movq 56(%rsp), %rdi
call _Z33__device_stub__Z4blurPiS_S_S_S_S_PiS_S_S_S_S_
jmp .L22
.L46:
movl $2, %ecx
movl $891952, %edx
movq 80(%rsp), %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $891952, %edx
movq 88(%rsp), %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl $2, %ecx
movl $891952, %edx
movq 96(%rsp), %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl $1712, %ecx
.L24:
leaq -1712(%rcx), %rax
.L25:
movl 0(%r13,%rax), %edx
movl %edx, 1152(%rsp,%rax)
movl (%r12,%rax), %edx
movl %edx, 893104(%rsp,%rax)
movl 0(%rbp,%rax), %edx
movl %edx, 1785056(%rsp,%rax)
addq $4, %rax
cmpq %rcx, %rax
jne .L25
addq $1712, %rcx
cmpq $893664, %rcx
jne .L24
movq 56(%rsp), %rdi
call cudaFree@PLT
movq 64(%rsp), %rdi
call cudaFree@PLT
movq 72(%rsp), %rdi
call cudaFree@PLT
movq 80(%rsp), %rdi
call cudaFree@PLT
movq 88(%rsp), %rdi
call cudaFree@PLT
movq 96(%rsp), %rdi
call cudaFree@PLT
movq %r13, %rdi
call free@PLT
movq %r12, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
leaq .LC5(%rip), %rsi
leaq .LC6(%rip), %rdi
call fopen@PLT
movq %rax, %r12
testl %ebx, %ebx
jle .L27
leaq 144(%rsp), %rbp
movslq %ebx, %rbx
leaq (%rbx,%rbx,4), %rax
leaq (%rax,%rax,4), %rax
leaq 0(%rbp,%rax,8), %r13
leaq .LC7(%rip), %rbx
.L28:
movq %rbp, %rcx
movq %rbx, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addq $200, %rbp
cmpq %rbp, %r13
jne .L28
.L27:
leaq .LC8(%rip), %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1712, %r13d
movl $0, %ebp
leaq .LC9(%rip), %r14
leaq .LC8(%rip), %r15
jmp .L29
.L30:
addq $4, %rbx
cmpq %rbx, %r13
je .L47
.L31:
movl 1152(%rsp,%rbx), %ecx
movl 1785056(%rsp,%rbx), %r9d
movl 893104(%rsp,%rbx), %r8d
movq %r14, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
addl $1, %ebp
cmpl $12, %ebp
jne .L30
movq %r15, %rdx
movl $2, %esi
movq %r12, %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $0, %ebp
jmp .L30
.L47:
addq $1712, %r13
cmpq $893664, %r13
je .L32
.L29:
leaq -1712(%r13), %rbx
jmp .L31
.L32:
movq %r12, %rdi
call fclose@PLT
leaq 128(%rsp), %rdi
movl $0, %esi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 136(%rsp), %xmm0
divsd .LC4(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 128(%rsp), %xmm1
addsd %xmm1, %xmm0
subsd 8(%rsp), %xmm0
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 2677208(%rsp), %rax
subq %fs:40, %rax
jne .L48
movl $0, %eax
addq $2677224, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L48:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size main, .-main
.section .rodata.str1.1
.LC11:
.string "_Z4blurPiS_S_S_S_S_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2085:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC11(%rip), %rdx
movq %rdx, %rcx
leaq _Z4blurPiS_S_S_S_S_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2085:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC4:
.long 0
.long 1093567616
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "blur.hip"
.globl _Z19__device_stub__blurPiS_S_S_S_S_ # -- Begin function _Z19__device_stub__blurPiS_S_S_S_S_
.p2align 4, 0x90
.type _Z19__device_stub__blurPiS_S_S_S_S_,@function
_Z19__device_stub__blurPiS_S_S_S_S_: # @_Z19__device_stub__blurPiS_S_S_S_S_
.cfi_startproc
# %bb.0:
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
movq %r9, 48(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 48(%rsp), %rax
movq %rax, 136(%rsp)
leaq 32(%rsp), %rdi
leaq 16(%rsp), %rsi
leaq 8(%rsp), %rdx
movq %rsp, %rcx
callq __hipPopCallConfiguration
movq 32(%rsp), %rsi
movl 40(%rsp), %edx
movq 16(%rsp), %rcx
movl 24(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z4blurPiS_S_S_S_S_, %edi
pushq (%rsp)
.cfi_adjust_cfa_offset 8
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $168, %rsp
.cfi_adjust_cfa_offset -168
retq
.Lfunc_end0:
.size _Z19__device_stub__blurPiS_S_S_S_S_, .Lfunc_end0-_Z19__device_stub__blurPiS_S_S_S_S_
.cfi_endproc
# -- End function
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function main
.LCPI1_0:
.quad 0x412e848000000000 # double 1.0E+6
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $2677352, %rsp # imm = 0x28DA68
.cfi_def_cfa_offset 2677408
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
leaq 80(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
movl $.L.str, %edi
movl $.L.str.1, %esi
callq fopen
movq %rax, %r15
movq %rax, %rdi
callq feof
testl %eax, %eax
je .LBB1_15
.LBB1_1: # %._crit_edge
movq %r15, %rdi
callq fclose
movq 80(%rsp), %rax
movq %rax, 56(%rsp) # 8-byte Spill
movq 88(%rsp), %rax
movq %rax, 120(%rsp) # 8-byte Spill
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %rbx
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %r14
movl $891952, %edi # imm = 0xD9C30
callq malloc
movq %rax, %r15
leaq 48(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 40(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 32(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 24(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 8(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 16(%rsp), %rdi
movl $891952, %esi # imm = 0xD9C30
callq hipMalloc
leaq 1785392(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %rbx, %rdi
callq memcpy@PLT
leaq 893440(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %r14, %rdi
callq memcpy@PLT
leaq 1488(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq %r15, %rdi
callq memcpy@PLT
movq 48(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %rbx, 112(%rsp) # 8-byte Spill
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %r14, 104(%rsp) # 8-byte Spill
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movl $891952, %edx # imm = 0xD9C30
movq %r15, 96(%rsp) # 8-byte Spill
movq %r15, %rsi
movl $1, %ecx
callq hipMemcpy
movl $20, %ebx
movabsq $68719476752, %r12 # imm = 0x1000000010
movabsq $141733920795, %r13 # imm = 0x210000001B
leaq 136(%rsp), %r14
leaq 128(%rsp), %r15
leaq 224(%rsp), %rbp
jmp .LBB1_3
.p2align 4, 0x90
.LBB1_2: # in Loop: Header=BB1_3 Depth=1
movq 48(%rsp), %rdi
movq 24(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
movq 40(%rsp), %rdi
movq 16(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
movq 32(%rsp), %rdi
movq 8(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movl $3, %ecx
callq hipMemcpy
decl %ebx
je .LBB1_5
.LBB1_3: # =>This Inner Loop Header: Depth=1
movq %r12, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_2
# %bb.4: # in Loop: Header=BB1_3 Depth=1
movq 48(%rsp), %rax
movq 40(%rsp), %rcx
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
movq 16(%rsp), %rdi
movq 8(%rsp), %r8
movq %rax, 216(%rsp)
movq %rcx, 208(%rsp)
movq %rdx, 200(%rsp)
movq %rsi, 192(%rsp)
movq %rdi, 184(%rsp)
movq %r8, 176(%rsp)
leaq 216(%rsp), %rax
movq %rax, 224(%rsp)
leaq 208(%rsp), %rax
movq %rax, 232(%rsp)
leaq 200(%rsp), %rax
movq %rax, 240(%rsp)
leaq 192(%rsp), %rax
movq %rax, 248(%rsp)
leaq 184(%rsp), %rax
movq %rax, 256(%rsp)
leaq 176(%rsp), %rax
movq %rax, 264(%rsp)
leaq 160(%rsp), %rdi
leaq 144(%rsp), %rsi
movq %r14, %rdx
movq %r15, %rcx
callq __hipPopCallConfiguration
movq 160(%rsp), %rsi
movl 168(%rsp), %edx
movq 144(%rsp), %rcx
movl 152(%rsp), %r8d
movl $_Z4blurPiS_S_S_S_S_, %edi
movq %rbp, %r9
pushq 128(%rsp)
.cfi_adjust_cfa_offset 8
pushq 144(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
jmp .LBB1_2
.LBB1_5:
movq 24(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 112(%rsp), %rbx # 8-byte Reload
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
movq 16(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 104(%rsp), %r14 # 8-byte Reload
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
movq 8(%rsp), %rsi
movl $891952, %edx # imm = 0xD9C30
movq 96(%rsp), %r15 # 8-byte Reload
movq %r15, %rdi
movl $2, %ecx
callq hipMemcpy
leaq 1785392(%rsp), %r12
movl $891952, %edx # imm = 0xD9C30
movq %r12, %rdi
movq %rbx, %rsi
callq memcpy@PLT
leaq 893440(%rsp), %r13
movl $891952, %edx # imm = 0xD9C30
movq %r13, %rdi
movq %r14, %rsi
callq memcpy@PLT
leaq 1488(%rsp), %rbp
movl $891952, %edx # imm = 0xD9C30
movq %rbp, %rdi
movq %r15, %rsi
callq memcpy@PLT
movq 48(%rsp), %rdi
callq hipFree
movq 40(%rsp), %rdi
callq hipFree
movq 32(%rsp), %rdi
callq hipFree
movq 24(%rsp), %rdi
callq hipFree
movq 16(%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq %r15, %rdi
callq free
movl $.L.str.4, %edi
movl $.L.str.5, %esi
callq fopen
movq %rax, %rbx
movq (%rsp), %rax # 8-byte Reload
testl %eax, %eax
jle .LBB1_8
# %bb.6: # %.lr.ph164.preheader
movl %eax, %r15d
leaq 480(%rsp), %r14
.p2align 4, 0x90
.LBB1_7: # %.lr.ph164
# =>This Inner Loop Header: Depth=1
movl $.L.str.6, %esi
movq %rbx, %rdi
movq %r14, %rdx
xorl %eax, %eax
callq fprintf
addq $200, %r14
decq %r15
jne .LBB1_7
.LBB1_8: # %._crit_edge165
movl $10, %edi
movq %rbx, %rsi
callq fputc@PLT
xorl %eax, %eax
xorl %r15d, %r15d
jmp .LBB1_10
.p2align 4, 0x90
.LBB1_9: # in Loop: Header=BB1_10 Depth=1
movq (%rsp), %rax # 8-byte Reload
incq %rax
addq $1712, %r12 # imm = 0x6B0
addq $1712, %r13 # imm = 0x6B0
addq $1712, %rbp # imm = 0x6B0
cmpq $521, %rax # imm = 0x209
je .LBB1_14
.LBB1_10: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB1_12 Depth 2
movq %rax, (%rsp) # 8-byte Spill
xorl %r14d, %r14d
jmp .LBB1_12
.p2align 4, 0x90
.LBB1_11: # in Loop: Header=BB1_12 Depth=2
incq %r14
cmpq $428, %r14 # imm = 0x1AC
je .LBB1_9
.LBB1_12: # Parent Loop BB1_10 Depth=1
# => This Inner Loop Header: Depth=2
movl (%r12,%r14,4), %edx
movl (%r13,%r14,4), %ecx
movl (%rbp,%r14,4), %r8d
movl $.L.str.8, %esi
movq %rbx, %rdi
xorl %eax, %eax
callq fprintf
incl %r15d
cmpl $12, %r15d
jne .LBB1_11
# %bb.13: # in Loop: Header=BB1_12 Depth=2
movl $10, %edi
movq %rbx, %rsi
callq fputc@PLT
xorl %r15d, %r15d
jmp .LBB1_11
.LBB1_14:
cvtsi2sdq 56(%rsp), %xmm0 # 8-byte Folded Reload
cvtsi2sdq 120(%rsp), %xmm1 # 8-byte Folded Reload
divsd .LCPI1_0(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, (%rsp) # 8-byte Spill
movq %rbx, %rdi
callq fclose
leaq 80(%rsp), %rdi
xorl %esi, %esi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 80(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 88(%rsp), %xmm0
divsd .LCPI1_0(%rip), %xmm0
addsd %xmm1, %xmm0
subsd (%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.9, %edi
movb $1, %al
callq printf
xorl %eax, %eax
addq $2677352, %rsp # imm = 0x28DA68
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB1_15: # %.lr.ph155.preheader
.cfi_def_cfa_offset 2677408
leaq 272(%rsp), %rbp
leaq 68(%rsp), %r13
xorl %eax, %eax
movq %rax, (%rsp) # 8-byte Spill
xorl %r14d, %r14d
xorl %ebx, %ebx
movq %r15, 56(%rsp) # 8-byte Spill
jmp .LBB1_19
.p2align 4, 0x90
.LBB1_25: # in Loop: Header=BB1_19 Depth=1
movq 56(%rsp), %r15 # 8-byte Reload
.LBB1_18: # %.loopexit
# in Loop: Header=BB1_19 Depth=1
movq %r15, %rdi
callq feof
testl %eax, %eax
leaq 272(%rsp), %rbp
jne .LBB1_1
.LBB1_19: # %.lr.ph155
# =>This Loop Header: Depth=1
# Child Loop BB1_23 Depth 2
movl $.L.str.2, %esi
movq %r15, %r12
movq %r15, %rdi
movq %rbp, %rdx
xorl %eax, %eax
callq __isoc23_fscanf
movq (%rsp), %rcx # 8-byte Reload
cmpl $5, %ecx
jge .LBB1_20
# %bb.16: # in Loop: Header=BB1_19 Depth=1
movslq %ecx, %rax
incl %ecx
movq %rcx, (%rsp) # 8-byte Spill
imulq $200, %rax, %rax
leaq (%rsp,%rax), %rdi
addq $480, %rdi # imm = 0x1E0
movq %rbp, %rsi
callq strcpy
jmp .LBB1_17
.p2align 4, 0x90
.LBB1_20: # %.preheader145
# in Loop: Header=BB1_19 Depth=1
cmpb $0, 272(%rsp)
je .LBB1_17
# %bb.21: # %.lr.ph.preheader
# in Loop: Header=BB1_19 Depth=1
leaq 272(%rsp), %rbp
leaq 72(%rsp), %r12
jmp .LBB1_23
.p2align 4, 0x90
.LBB1_22: # in Loop: Header=BB1_23 Depth=2
incl %ebx
cmpb $0, 2(%rbp)
leaq 2(%rbp), %rbp
je .LBB1_25
.LBB1_23: # %.lr.ph
# Parent Loop BB1_19 Depth=1
# => This Inner Loop Header: Depth=2
movl %r14d, %r15d
movl $.L.str.3, %esi
movq %rbp, %rdi
leaq 76(%rsp), %rdx
xorl %eax, %eax
callq __isoc23_sscanf
leaq 2(%rbp), %rdi
movl $.L.str.3, %esi
movq %r12, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
addq $4, %rbp
movl $.L.str.3, %esi
movq %rbp, %rdi
movq %r13, %rdx
xorl %eax, %eax
callq __isoc23_sscanf
xorl %r14d, %r14d
cmpl $428, %ebx # imm = 0x1AC
sete %r14b
movl $0, %eax
cmovel %eax, %ebx
addl %r15d, %r14d
cmpl $520, %r14d # imm = 0x208
jg .LBB1_22
# %bb.24: # in Loop: Header=BB1_23 Depth=2
movl 76(%rsp), %eax
movslq %r14d, %rcx
movslq %ebx, %rdx
imulq $1712, %rcx, %rcx # imm = 0x6B0
leaq (%rsp,%rcx), %rsi
addq $1785392, %rsi # imm = 0x1B3E30
movl %eax, (%rsi,%rdx,4)
movl 72(%rsp), %eax
leaq 893440(%rsp,%rcx), %rsi
movl %eax, (%rsi,%rdx,4)
movl 68(%rsp), %eax
addq %rsp, %rcx
addq $1488, %rcx # imm = 0x5D0
movl %eax, (%rcx,%rdx,4)
jmp .LBB1_22
.p2align 4, 0x90
.LBB1_17: # %.loopexit
# in Loop: Header=BB1_19 Depth=1
movq %r12, %r15
jmp .LBB1_18
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z4blurPiS_S_S_S_S_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z4blurPiS_S_S_S_S_,@object # @_Z4blurPiS_S_S_S_S_
.section .rodata,"a",@progbits
.globl _Z4blurPiS_S_S_S_S_
.p2align 3, 0x0
_Z4blurPiS_S_S_S_S_:
.quad _Z19__device_stub__blurPiS_S_S_S_S_
.size _Z4blurPiS_S_S_S_S_, 8
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "David.ps"
.size .L.str, 9
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "\n%[^\n]"
.size .L.str.2, 7
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "%2x"
.size .L.str.3, 4
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "DavidBlur.ps"
.size .L.str.4, 13
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "w"
.size .L.str.5, 2
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "\n%s"
.size .L.str.6, 4
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "%02x%02x%02x"
.size .L.str.8, 13
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%.6lf seconds elapsed\n"
.size .L.str.9, 23
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z4blurPiS_S_S_S_S_"
.size .L__unnamed_1, 20
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z19__device_stub__blurPiS_S_S_S_S_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z4blurPiS_S_S_S_S_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} | code for sm_80
Function : _Z17AddArraysAtDevicePiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ IADD3 R7, R0, R3, RZ ; /* 0x0000000300077210 */
/* 0x004fca0007ffe0ff */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} | .file "tmpxft_00128627_00000000-6_cudaAddMatrices.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
.type _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i, @function
_Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i:
.LFB2095:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17AddArraysAtDevicePiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i, .-_Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
.globl _Z17AddArraysAtDevicePiS_i
.type _Z17AddArraysAtDevicePiS_i, @function
_Z17AddArraysAtDevicePiS_i:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z17AddArraysAtDevicePiS_i, .-_Z17AddArraysAtDevicePiS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Problem with memory allocation\n"
.align 8
.LC1:
.string "Problem with copying from host to device\n"
.align 8
.LC2:
.string "Problem with copying from device to host\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Numbers added on GPU!!\n"
.LC4:
.string "Num %d: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000, %edi
call malloc@PLT
movq %rax, %rbp
movl $0, %eax
movl $1000, %ecx
.L12:
movl %eax, (%rbx,%rax,4)
movl %ecx, %edx
subl %eax, %edx
movl %edx, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $1000, %rax
jne .L12
movq %rsp, %rdi
movl $4000, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L25
leaq 8(%rsp), %rdi
movl $4000, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L26
movl $1, %ecx
movl $4000, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L27
movl $1, %ecx
movl $4000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L28
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $4, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L18:
movl $2, %ecx
movl $4000, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L30
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r12d
leaq .LC4(%rip), %r13
.L20:
movl -4(%rbx,%r12,4), %ecx
movl %r12d, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %r12
cmpq $1001, %r12
jne .L20
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
jmp .L14
.L25:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L27:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L28:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L29:
movl $1000, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
jmp .L18
.L30:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z17AddArraysAtDevicePiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z17AddArraysAtDevicePiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMalloc(&d_b, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMemcpy(d_a, h_a, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(hipMemcpy(d_b, h_b, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
hipFree(d_a);
hipFree(d_b);
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMalloc(&d_b, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMemcpy(d_a, h_a, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(hipMemcpy(d_b, h_b, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
hipFree(d_a);
hipFree(d_b);
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17AddArraysAtDevicePiS_i
.globl _Z17AddArraysAtDevicePiS_i
.p2align 8
.type _Z17AddArraysAtDevicePiS_i,@function
_Z17AddArraysAtDevicePiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17AddArraysAtDevicePiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17AddArraysAtDevicePiS_i, .Lfunc_end0-_Z17AddArraysAtDevicePiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17AddArraysAtDevicePiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17AddArraysAtDevicePiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
__global__ void AddArraysAtDevice(int *a, int *b, int count){
int t_id = blockIdx.x * blockDim.x + threadIdx.x;
if(t_id < count){
a[t_id] += b[t_id];
}
}
int main(){
int count = 1000;
int *h_a = (int*)malloc(sizeof(int) * 1000);
int *h_b = (int*)malloc(sizeof(int) * 1000);
for(int i=0; i<count; i++){
h_a[i] = i;
h_b[i] = count-i;
}
int *d_a, *d_b;
if(hipMalloc(&d_a, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMalloc(&d_b, sizeof(int)*count) != hipSuccess){
printf("Problem with memory allocation\n");
return 0;
}
if(hipMemcpy(d_a, h_a, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
if(hipMemcpy(d_b, h_b, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess){
printf("Problem with copying from host to device\n");
return 0;
}
AddArraysAtDevice<<<count / 256 + 1, 256>>>(d_a, d_b, count);
if(hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess){
printf("Problem with copying from device to host\n");
return 0;
}
printf("Numbers added on GPU!!\n");
for(int i=0; i<count; i++){
printf("Num %d: %d\n", i+1, h_a[i]);
}
free(h_a);
free(h_b);
hipFree(d_a);
hipFree(d_b);
return 0;
} | .text
.file "cudaAddMatrices.hip"
.globl _Z32__device_stub__AddArraysAtDevicePiS_i # -- Begin function _Z32__device_stub__AddArraysAtDevicePiS_i
.p2align 4, 0x90
.type _Z32__device_stub__AddArraysAtDevicePiS_i,@function
_Z32__device_stub__AddArraysAtDevicePiS_i: # @_Z32__device_stub__AddArraysAtDevicePiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17AddArraysAtDevicePiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z32__device_stub__AddArraysAtDevicePiS_i, .Lfunc_end0-_Z32__device_stub__AddArraysAtDevicePiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4000, %edi # imm = 0xFA0
callq malloc
movq %rax, %rbx
movl $4000, %edi # imm = 0xFA0
callq malloc
movq %rax, %r14
movl $1000, %eax # imm = 0x3E8
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %eax, (%r14,%rcx,4)
incq %rcx
decq %rax
jne .LBB1_1
# %bb.2:
movq %rsp, %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 8(%rsp), %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
testl %eax, %eax
je .LBB1_7
.LBB1_4:
movl $.Lstr.5, %edi
.LBB1_5:
callq puts@PLT
.LBB1_6:
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_7:
.cfi_def_cfa_offset 48
movq (%rsp), %rdi
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 8(%rsp), %rdi
movl $4000, %edx # imm = 0xFA0
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB1_10
.LBB1_9:
movl $.Lstr.3, %edi
jmp .LBB1_5
.LBB1_10:
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq (%rsp), %rdi
movq 8(%rsp), %rsi
movl $1000, %edx # imm = 0x3E8
callq _Z32__device_stub__AddArraysAtDevicePiS_i
.LBB1_12:
movq (%rsp), %rsi
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB1_14
# %bb.13:
movl $.Lstr.1, %edi
jmp .LBB1_5
.LBB1_14:
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_15: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
incq %r15
movl $.L.str.4, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
cmpq $1000, %r15 # imm = 0x3E8
jne .LBB1_15
# %bb.16:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
jmp .LBB1_6
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17AddArraysAtDevicePiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17AddArraysAtDevicePiS_i,@object # @_Z17AddArraysAtDevicePiS_i
.section .rodata,"a",@progbits
.globl _Z17AddArraysAtDevicePiS_i
.p2align 3, 0x0
_Z17AddArraysAtDevicePiS_i:
.quad _Z32__device_stub__AddArraysAtDevicePiS_i
.size _Z17AddArraysAtDevicePiS_i, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Num %d: %d\n"
.size .L.str.4, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z17AddArraysAtDevicePiS_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Numbers added on GPU!!"
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Problem with copying from device to host"
.size .Lstr.1, 41
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Problem with copying from host to device"
.size .Lstr.3, 41
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Problem with memory allocation"
.size .Lstr.5, 31
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__AddArraysAtDevicePiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17AddArraysAtDevicePiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z17AddArraysAtDevicePiS_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06270 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE R2, R4, R5, c[0x0][0x168] ; /* 0x00005a0004027625 */
/* 0x000fc800078e0205 */
/*0090*/ IMAD.WIDE R4, R4, R5, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fe400078e0205 */
/*00a0*/ LDG.E R3, [R2.64] ; /* 0x0000000402037981 */
/* 0x000ea8000c1e1900 */
/*00b0*/ LDG.E R0, [R4.64] ; /* 0x0000000404007981 */
/* 0x000ea4000c1e1900 */
/*00c0*/ IADD3 R7, R0, R3, RZ ; /* 0x0000000300077210 */
/* 0x004fca0007ffe0ff */
/*00d0*/ STG.E [R4.64], R7 ; /* 0x0000000704007986 */
/* 0x000fe2000c101904 */
/*00e0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*00f0*/ BRA 0xf0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0100*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0110*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0120*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0130*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0140*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0150*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z17AddArraysAtDevicePiS_i
.globl _Z17AddArraysAtDevicePiS_i
.p2align 8
.type _Z17AddArraysAtDevicePiS_i,@function
_Z17AddArraysAtDevicePiS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_i32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_ashrrev_i32_e32 v2, 31, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[0:1], off
s_waitcnt vmcnt(0)
v_add_nc_u32_e32 v2, v3, v2
global_store_b32 v[0:1], v2, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z17AddArraysAtDevicePiS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z17AddArraysAtDevicePiS_i, .Lfunc_end0-_Z17AddArraysAtDevicePiS_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z17AddArraysAtDevicePiS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z17AddArraysAtDevicePiS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00128627_00000000-6_cudaAddMatrices.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2073:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2073:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
.type _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i, @function
_Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i:
.LFB2095:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z17AddArraysAtDevicePiS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2095:
.size _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i, .-_Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
.globl _Z17AddArraysAtDevicePiS_i
.type _Z17AddArraysAtDevicePiS_i, @function
_Z17AddArraysAtDevicePiS_i:
.LFB2096:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2096:
.size _Z17AddArraysAtDevicePiS_i, .-_Z17AddArraysAtDevicePiS_i
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "Problem with memory allocation\n"
.align 8
.LC1:
.string "Problem with copying from host to device\n"
.align 8
.LC2:
.string "Problem with copying from device to host\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC3:
.string "Numbers added on GPU!!\n"
.LC4:
.string "Num %d: %d\n"
.text
.globl main
.type main, @function
main:
.LFB2070:
.cfi_startproc
endbr64
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $56, %rsp
.cfi_def_cfa_offset 96
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
movl $4000, %edi
call malloc@PLT
movq %rax, %rbx
movl $4000, %edi
call malloc@PLT
movq %rax, %rbp
movl $0, %eax
movl $1000, %ecx
.L12:
movl %eax, (%rbx,%rax,4)
movl %ecx, %edx
subl %eax, %edx
movl %edx, 0(%rbp,%rax,4)
addq $1, %rax
cmpq $1000, %rax
jne .L12
movq %rsp, %rdi
movl $4000, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L25
leaq 8(%rsp), %rdi
movl $4000, %esi
call cudaMalloc@PLT
testl %eax, %eax
jne .L26
movl $1, %ecx
movl $4000, %edx
movq %rbx, %rsi
movq (%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L27
movl $1, %ecx
movl $4000, %edx
movq %rbp, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L28
movl $256, 28(%rsp)
movl $1, 32(%rsp)
movl $4, 16(%rsp)
movl $1, 20(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 28(%rsp), %rdx
movl $1, %ecx
movq 16(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L29
.L18:
movl $2, %ecx
movl $4000, %edx
movq (%rsp), %rsi
movq %rbx, %rdi
call cudaMemcpy@PLT
testl %eax, %eax
jne .L30
leaq .LC3(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %r12d
leaq .LC4(%rip), %r13
.L20:
movl -4(%rbx,%r12,4), %ecx
movl %r12d, %edx
movq %r13, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $1, %r12
cmpq $1001, %r12
jne .L20
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq (%rsp), %rdi
call cudaFree@PLT
movq 8(%rsp), %rdi
call cudaFree@PLT
jmp .L14
.L25:
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L14:
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L31
movl $0, %eax
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L26:
.cfi_restore_state
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L27:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L28:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L29:
movl $1000, %edx
movq 8(%rsp), %rsi
movq (%rsp), %rdi
call _Z40__device_stub__Z17AddArraysAtDevicePiS_iPiS_i
jmp .L18
.L30:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
jmp .L14
.L31:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2070:
.size main, .-main
.section .rodata.str1.1
.LC5:
.string "_Z17AddArraysAtDevicePiS_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2098:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC5(%rip), %rdx
movq %rdx, %rcx
leaq _Z17AddArraysAtDevicePiS_i(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2098:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "cudaAddMatrices.hip"
.globl _Z32__device_stub__AddArraysAtDevicePiS_i # -- Begin function _Z32__device_stub__AddArraysAtDevicePiS_i
.p2align 4, 0x90
.type _Z32__device_stub__AddArraysAtDevicePiS_i,@function
_Z32__device_stub__AddArraysAtDevicePiS_i: # @_Z32__device_stub__AddArraysAtDevicePiS_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z17AddArraysAtDevicePiS_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end0:
.size _Z32__device_stub__AddArraysAtDevicePiS_i, .Lfunc_end0-_Z32__device_stub__AddArraysAtDevicePiS_i
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
subq $16, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movl $4000, %edi # imm = 0xFA0
callq malloc
movq %rax, %rbx
movl $4000, %edi # imm = 0xFA0
callq malloc
movq %rax, %r14
movl $1000, %eax # imm = 0x3E8
xorl %ecx, %ecx
.p2align 4, 0x90
.LBB1_1: # =>This Inner Loop Header: Depth=1
movl %ecx, (%rbx,%rcx,4)
movl %eax, (%r14,%rcx,4)
incq %rcx
decq %rax
jne .LBB1_1
# %bb.2:
movq %rsp, %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
testl %eax, %eax
jne .LBB1_4
# %bb.3:
leaq 8(%rsp), %rdi
movl $4000, %esi # imm = 0xFA0
callq hipMalloc
testl %eax, %eax
je .LBB1_7
.LBB1_4:
movl $.Lstr.5, %edi
.LBB1_5:
callq puts@PLT
.LBB1_6:
xorl %eax, %eax
addq $16, %rsp
.cfi_def_cfa_offset 32
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
retq
.LBB1_7:
.cfi_def_cfa_offset 48
movq (%rsp), %rdi
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB1_9
# %bb.8:
movq 8(%rsp), %rdi
movl $4000, %edx # imm = 0xFA0
movq %r14, %rsi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB1_10
.LBB1_9:
movl $.Lstr.3, %edi
jmp .LBB1_5
.LBB1_10:
movabsq $4294967300, %rdi # imm = 0x100000004
leaq 252(%rdi), %rdx
movl $1, %esi
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB1_12
# %bb.11:
movq (%rsp), %rdi
movq 8(%rsp), %rsi
movl $1000, %edx # imm = 0x3E8
callq _Z32__device_stub__AddArraysAtDevicePiS_i
.LBB1_12:
movq (%rsp), %rsi
movl $4000, %edx # imm = 0xFA0
movq %rbx, %rdi
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
je .LBB1_14
# %bb.13:
movl $.Lstr.1, %edi
jmp .LBB1_5
.LBB1_14:
movl $.Lstr, %edi
callq puts@PLT
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_15: # =>This Inner Loop Header: Depth=1
movl (%rbx,%r15,4), %edx
incq %r15
movl $.L.str.4, %edi
movl %r15d, %esi
xorl %eax, %eax
callq printf
cmpq $1000, %r15 # imm = 0x3E8
jne .LBB1_15
# %bb.16:
movq %rbx, %rdi
callq free
movq %r14, %rdi
callq free
movq (%rsp), %rdi
callq hipFree
movq 8(%rsp), %rdi
callq hipFree
jmp .LBB1_6
.Lfunc_end1:
.size main, .Lfunc_end1-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB2_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB2_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z17AddArraysAtDevicePiS_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end2:
.size __hip_module_ctor, .Lfunc_end2-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB3_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB3_2:
retq
.Lfunc_end3:
.size __hip_module_dtor, .Lfunc_end3-__hip_module_dtor
.cfi_endproc
# -- End function
.type _Z17AddArraysAtDevicePiS_i,@object # @_Z17AddArraysAtDevicePiS_i
.section .rodata,"a",@progbits
.globl _Z17AddArraysAtDevicePiS_i
.p2align 3, 0x0
_Z17AddArraysAtDevicePiS_i:
.quad _Z32__device_stub__AddArraysAtDevicePiS_i
.size _Z17AddArraysAtDevicePiS_i, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "Num %d: %d\n"
.size .L.str.4, 12
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z17AddArraysAtDevicePiS_i"
.size .L__unnamed_1, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Numbers added on GPU!!"
.size .Lstr, 23
.type .Lstr.1,@object # @str.1
.Lstr.1:
.asciz "Problem with copying from device to host"
.size .Lstr.1, 41
.type .Lstr.3,@object # @str.3
.Lstr.3:
.asciz "Problem with copying from host to device"
.size .Lstr.3, 41
.type .Lstr.5,@object # @str.5
.Lstr.5:
.asciz "Problem with memory allocation"
.size .Lstr.5, 31
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z32__device_stub__AddArraysAtDevicePiS_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z17AddArraysAtDevicePiS_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | // the GPU code can be found in power_gpu.cu
// jiabing jin, sept 2017
////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "cuda.h"
const int BLOCK_SIZE =256;
// #include "power_gpu.cu"
// Input Array Variables
float* h_MatA = NULL;
float* d_MatA = NULL;
// Output Array
float* h_VecV = NULL;
float* d_VecV = NULL;
float* h_VecW = NULL;
float* d_VecW = NULL;
float* h_NormW = NULL;
float* d_NormW = NULL;
// Variables to change
int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize
int BlockSize = 256; // number of threads in each block
const float EPS = 0.000005; // tolerence of the error
int max_iteration = 100; // the maximum iteration steps
// Functions
void Cleanup(void);
void InitOne(float*, int);
void UploadArray(float*, int);
float CPUReduce(float*, int);
void Arguments(int, char**);
void checkCardVersion(void);
void ParseArguments(int, char**);
// Kernels
__global__ void Av_Product(float* , float* , float* , int );
__global__ void FindNormW(float* , float* , int );
__global__ void NormalizeW(float* ,float* , int );
__global__ void ComputeLamda( float* ,float* , float* ,int );
void CPU_AvProduct()
{
int N = GlobalSize;
int matIndex =0;
for(int i=0;i<N;i++)
{
h_VecW[i] = 0;
for(int j=0;j<N;j++)
{
matIndex = i*N + j;
h_VecW[i] += h_MatA[matIndex] * h_VecV[j];
}
}
}
void CPU_NormalizeW()
{
int N = GlobalSize;
float normW=0;
for(int i=0;i<N;i++)
normW += h_VecW[i] * h_VecW[i];
normW = sqrt(normW);
for(int i=0;i<N;i++)
h_VecV[i] = h_VecW[i]/normW;
}
float CPU_ComputeLamda()
{
int N = GlobalSize;
float lamda =0;
for(int i=0;i<N;i++)
lamda += h_VecV[i] * h_VecW[i];
return lamda;
}
void RunCPUPowerMethod()
{
printf("*************************************\n");
float oldLamda =0;
float lamda=0;
//AvProduct
CPU_AvProduct();
//power loop
for (int i=0;i<max_iteration;i++)
{
CPU_NormalizeW();
CPU_AvProduct();
lamda= CPU_ComputeLamda();
printf("CPU lamda at %d: %f \n", i, lamda);
// If residual is lass than epsilon break
if(abs(oldLamda - lamda) < EPS)
break;
oldLamda = lamda;
}
printf("*************************************\n");
}
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int aBegin = N * BLOCK_SIZE * bx;
int aEnd = aBegin + N - 1;
int step = BLOCK_SIZE;
int bBegin = 0;//BLOCK_SIZE * bx;
int bIndex=0;
int aIndex =0;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += step, b += step)
{
__shared__ float As[BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE];
aIndex = a + tx;
if( aIndex < N*N)
As[tx] = g_MatA[aIndex];
else
As[tx] = 0;
bIndex = b+tx;
if(bIndex<N)
bs[tx] = g_VecV[bIndex];
else
bs[tx] = 0;
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[k] * bs[k];
}//}
__syncthreads();
}
g_VecW[ BLOCK_SIZE * bx + tx] = Csub;
}
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdataVW[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid];
}
else {
sdataVW[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]);
}
__global__ void FindNormW(float* g_VecW, float * g_NormW, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdata[tid] = g_VecW[globalid];
}
else {
sdata[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
sdata[tid] = sdata[tid] * sdata[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_NormW,sdata[0]);
}
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sNormData[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid==0) sNormData[0] = g_NormW[0];
__syncthreads();
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecW[globalid]/sNormData[0];
}
}
// Host code
int main(int argc, char** argv)
{
struct timespec t_start,t_end,t_start2;
double runtime;
double Av_runtime = 0.0;
double Find_runtime = 0.0;
double Norm_runtime = 0.0;
double Compute_runtime = 0.0;
ParseArguments(argc, argv);
int N = GlobalSize;
printf("Matrix size %d X %d \n", N, N);
size_t vec_size = N * sizeof(float);
size_t mat_size = N * N * sizeof(float);
size_t norm_size = sizeof(float);
// Allocate normalized value in host memory
h_NormW = (float*)malloc(norm_size);
// Allocate input matrix in host memory
h_MatA = (float*)malloc(mat_size);
// Allocate initial vector V in host memory
h_VecV = (float*)malloc(vec_size);
// Allocate W vector for computations
h_VecW = (float*)malloc(vec_size);
// Initialize input matrix
UploadArray(h_MatA, N);
InitOne(h_VecV,N);
printf("Power method in CPU starts\n");
clock_gettime(CLOCK_REALTIME,&t_start);
RunCPUPowerMethod(); // the lamda is already solved here
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("CPU: run time = %f secs.\n",runtime);
printf("Power method in CPU is finished\n");
/////////////////////////////////////////////////
// This is the starting points of GPU
printf("Power method in GPU starts\n");
checkCardVersion();
// Initialize input matrix
InitOne(h_VecV,N);
clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count
// Set the kernel arguments
int threadsPerBlock = BlockSize;
int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
/* Host variables for convergence */
float OldLamda = 0;
float temp = 0;
/* Device variable for convergence */
float* Lamda = 0;
// Allocate matrix and vectors in device memory
cudaMalloc((void**)&d_MatA, mat_size);
cudaMalloc((void**)&d_VecV, vec_size);
cudaMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device
cudaMalloc((void**)&d_NormW, norm_size);
/* Allocate memory for device lamda */
cudaMalloc((void**)&Lamda, sizeof(float));
//Copy from host memory to device memory
cudaMemcpy(d_MatA, h_MatA, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_VecV, h_VecV, vec_size, cudaMemcpyHostToDevice);
cudaMemcpy(Lamda, &OldLamda, sizeof(float), cudaMemcpyHostToDevice);
// cutilCheckError(cutStopTimer(timer_mem));
//Power method loops
clock_gettime(CLOCK_REALTIME,&t_start2);
/* First matrix vector multiplication */
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
/* Measure time for one matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize(); //Needed, kind of barrier to sychronize all threads
// This part is the main code of the iteration process for the Power Method in GPU.
// Please finish this part based on the given code. Do not forget the command line
// cudaThreadSynchronize() after callig the function every time in CUDA to synchoronize the threads
////////////////////////////////////////////
// /// // // // // // //
// //
//power loop
for (int i=0;i<max_iteration;i++)
{
/* Measure time for creating the normalized vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
FindNormW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time for normalizing the vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
NormalizeW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, d_VecV, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time for matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_start2);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time computing the Lamda */
clock_gettime(CLOCK_REALTIME,&t_start2);
ComputeLamda<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecV, d_VecW, Lamda, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
// If residual is lass than epsilon break
cudaMemcpy(&OldLamda, Lamda, sizeof(float), cudaMemcpyDeviceToHost);
printf("GPU lamda at %d: %f \n", i, OldLamda);
if(abs(OldLamda - temp) < EPS){
break;
}
temp = OldLamda;
}
// //
// //
// //
// //
// //
// //
// /// // /// // // // // // // // //
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("GPU: run time = %f secs.\n",runtime);
printf("GPU: Av time = %f secs.\n",Av_runtime);
printf("GPU: compute time = %f secs.\n",Compute_runtime);
printf("GPU: find time = %f secs.\n",Find_runtime);
printf("GPU: norm time = %f secs.\n",Norm_runtime);
// printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU));
if (Lamda)
cudaFree(Lamda);
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_MatA)
cudaFree(d_MatA);
if (d_VecV)
cudaFree(d_VecV);
if (d_VecW)
cudaFree(d_VecW);
if (d_NormW)
cudaFree(d_NormW);
// Free host memory
if (h_MatA)
free(h_MatA);
if (h_VecV)
free(h_VecV);
if (h_VecW)
free(h_VecW);
if (h_NormW)
free(h_NormW);
exit(0);
}
// Allocates an array with zero value.
void InitOne(float* data, int n)
{
for (int i = 0; i < n; i++)
data[i] = 0;
data[0]=1;
}
void UploadArray(float* data, int n)
{
int total = n*n;
int value=1;
for (int i = 0; i < total; i++)
{
data[i] = (int) (rand() % (int)(101));//1;//value;
value ++; if(value>n) value =1;
// data[i] = 1;
}
}
// Obtain program arguments
void Arguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0)
{
max_iteration = atoi(argv[i+1]);
i = i + 1;
}
}
}
void checkCardVersion()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) {
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
// if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) {
// BlockSize = atoi(argv[i+1]);
// i = i + 1;
// }
}
} | .file "tmpxft_001a01db_00000000-6_power_gpu.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2070:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2070:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z13CPU_AvProductv
.type _Z13CPU_AvProductv, @function
_Z13CPU_AvProductv:
.LFB2057:
.cfi_startproc
endbr64
movl GlobalSize(%rip), %eax
testl %eax, %eax
jle .L3
cltq
leaq 0(,%rax,4), %rdi
movl $0, %r8d
movl $0, %esi
.L6:
movq h_VecW(%rip), %rax
movl $0x00000000, (%rax,%rsi)
movl $0, %eax
.L5:
movq %rsi, %rdx
addq h_VecW(%rip), %rdx
movq %rax, %rcx
addq h_MatA(%rip), %rcx
movss (%rcx,%r8), %xmm0
movq h_VecV(%rip), %rcx
mulss (%rcx,%rax), %xmm0
addss (%rdx), %xmm0
movss %xmm0, (%rdx)
addq $4, %rax
cmpq %rdi, %rax
jne .L5
addq $4, %rsi
addq %rdi, %r8
cmpq %rdi, %rsi
jne .L6
.L3:
ret
.cfi_endproc
.LFE2057:
.size _Z13CPU_AvProductv, .-_Z13CPU_AvProductv
.globl _Z14CPU_NormalizeWv
.type _Z14CPU_NormalizeWv, @function
_Z14CPU_NormalizeWv:
.LFB2058:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
movl GlobalSize(%rip), %ebx
testl %ebx, %ebx
jle .L17
movq h_VecW(%rip), %rax
movslq %ebx, %rdx
leaq (%rax,%rdx,4), %rdx
pxor %xmm0, %xmm0
.L11:
movss (%rax), %xmm1
mulss %xmm1, %xmm1
addss %xmm1, %xmm0
addq $4, %rax
cmpq %rdx, %rax
jne .L11
pxor %xmm1, %xmm1
ucomiss %xmm0, %xmm1
ja .L22
.L10:
sqrtss %xmm0, %xmm0
testl %ebx, %ebx
jle .L9
.L14:
movslq %ebx, %rcx
salq $2, %rcx
movl $0, %eax
.L16:
movq h_VecW(%rip), %rdx
movss (%rdx,%rax), %xmm1
divss %xmm0, %xmm1
movq h_VecV(%rip), %rdx
movss %xmm1, (%rdx,%rax)
addq $4, %rax
cmpq %rax, %rcx
jne .L16
.L9:
popq %rbx
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L17:
.cfi_restore_state
pxor %xmm0, %xmm0
jmp .L10
.L22:
call sqrtf@PLT
jmp .L14
.cfi_endproc
.LFE2058:
.size _Z14CPU_NormalizeWv, .-_Z14CPU_NormalizeWv
.globl _Z16CPU_ComputeLamdav
.type _Z16CPU_ComputeLamdav, @function
_Z16CPU_ComputeLamdav:
.LFB2059:
.cfi_startproc
endbr64
movl GlobalSize(%rip), %edx
testl %edx, %edx
jle .L26
movq h_VecV(%rip), %rsi
movq h_VecW(%rip), %rcx
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
pxor %xmm1, %xmm1
.L25:
movss (%rsi,%rax), %xmm0
mulss (%rcx,%rax), %xmm0
addss %xmm0, %xmm1
addq $4, %rax
cmpq %rdx, %rax
jne .L25
.L23:
movaps %xmm1, %xmm0
ret
.L26:
pxor %xmm1, %xmm1
jmp .L23
.cfi_endproc
.LFE2059:
.size _Z16CPU_ComputeLamdav, .-_Z16CPU_ComputeLamdav
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC1:
.string "*************************************\n"
.section .rodata.str1.1,"aMS",@progbits,1
.LC2:
.string "CPU lamda at %d: %f \n"
.text
.globl _Z17RunCPUPowerMethodv
.type _Z17RunCPUPowerMethodv, @function
_Z17RunCPUPowerMethodv:
.LFB2060:
.cfi_startproc
endbr64
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $24, %rsp
.cfi_def_cfa_offset 48
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z13CPU_AvProductv
cmpl $0, max_iteration(%rip)
jle .L29
movl $0, %ebx
movl $0x00000000, 8(%rsp)
leaq .LC2(%rip), %rbp
.L30:
call _Z14CPU_NormalizeWv
call _Z13CPU_AvProductv
movss 8(%rsp), %xmm2
movss %xmm2, 12(%rsp)
call _Z16CPU_ComputeLamdav
movss %xmm0, 8(%rsp)
cvtss2sd %xmm0, %xmm0
movl %ebx, %edx
movq %rbp, %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movss 12(%rsp), %xmm2
subss 8(%rsp), %xmm2
movaps %xmm2, %xmm0
andps .LC3(%rip), %xmm0
movss .LC4(%rip), %xmm3
comiss %xmm0, %xmm3
ja .L29
addl $1, %ebx
cmpl %ebx, max_iteration(%rip)
jg .L30
.L29:
leaq .LC1(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addq $24, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2060:
.size _Z17RunCPUPowerMethodv, .-_Z17RunCPUPowerMethodv
.globl _Z7Cleanupv
.type _Z7Cleanupv, @function
_Z7Cleanupv:
.LFB2062:
.cfi_startproc
endbr64
pushq %rax
.cfi_def_cfa_offset 16
popq %rax
.cfi_def_cfa_offset 8
subq $8, %rsp
.cfi_def_cfa_offset 16
movq d_MatA(%rip), %rdi
testq %rdi, %rdi
je .L34
call cudaFree@PLT
.L34:
movq d_VecV(%rip), %rdi
testq %rdi, %rdi
je .L35
call cudaFree@PLT
.L35:
movq d_VecW(%rip), %rdi
testq %rdi, %rdi
je .L36
call cudaFree@PLT
.L36:
movq d_NormW(%rip), %rdi
testq %rdi, %rdi
je .L37
call cudaFree@PLT
.L37:
movq h_MatA(%rip), %rdi
testq %rdi, %rdi
je .L38
call free@PLT
.L38:
movq h_VecV(%rip), %rdi
testq %rdi, %rdi
je .L39
call free@PLT
.L39:
movq h_VecW(%rip), %rdi
testq %rdi, %rdi
je .L40
call free@PLT
.L40:
movq h_NormW(%rip), %rdi
testq %rdi, %rdi
je .L41
call free@PLT
.L41:
movl $0, %edi
call exit@PLT
.cfi_endproc
.LFE2062:
.size _Z7Cleanupv, .-_Z7Cleanupv
.globl _Z7InitOnePfi
.type _Z7InitOnePfi, @function
_Z7InitOnePfi:
.LFB2063:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L44
movq %rdi, %rax
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rdx
.L45:
movl $0x00000000, (%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L45
.L44:
movl $0x3f800000, (%rdi)
ret
.cfi_endproc
.LFE2063:
.size _Z7InitOnePfi, .-_Z7InitOnePfi
.globl _Z11UploadArrayPfi
.type _Z11UploadArrayPfi, @function
_Z11UploadArrayPfi:
.LFB2064:
.cfi_startproc
endbr64
movl %esi, %eax
imull %esi, %eax
testl %eax, %eax
jle .L54
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
movl %esi, %r12d
movq %rdi, %rbp
cltq
leaq (%rdi,%rax,4), %r14
movl $1, %ebx
movl $1, %r13d
.L50:
call rand@PLT
movslq %eax, %rdx
imulq $680390859, %rdx, %rdx
sarq $36, %rdx
movl %eax, %ecx
sarl $31, %ecx
subl %ecx, %edx
imull $101, %edx, %edx
subl %edx, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
movss %xmm0, 0(%rbp)
addl $1, %ebx
cmpl %ebx, %r12d
cmovl %r13d, %ebx
addq $4, %rbp
cmpq %r14, %rbp
jne .L50
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
ret
.cfi_endproc
.LFE2064:
.size _Z11UploadArrayPfi, .-_Z11UploadArrayPfi
.section .rodata.str1.1
.LC6:
.string "--size"
.LC7:
.string "-size"
.LC8:
.string "--max_iteration"
.LC9:
.string "-max_iteration"
.text
.globl _Z9ArgumentsiPPc
.type _Z9ArgumentsiPPc, @function
_Z9ArgumentsiPPc:
.LFB2065:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L66
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r13d
movq %rsi, %rbp
movl $0, %ebx
leaq .LC6(%rip), %r14
jmp .L63
.L61:
movq 8(%rbp,%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, max_iteration(%rip)
addl $1, %ebx
.L62:
addl $1, %ebx
cmpl %ebx, %r13d
jle .L69
.L63:
movslq %ebx, %rax
leaq 0(,%rax,8), %r15
movq 0(%rbp,%rax,8), %r12
movq %r14, %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
je .L59
leaq .LC7(%rip), %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L60
.L59:
movq 8(%rbp,%r15), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, GlobalSize(%rip)
addl $1, %ebx
.L60:
movslq %ebx, %rax
leaq 0(,%rax,8), %r15
movq 0(%rbp,%rax,8), %r12
leaq .LC8(%rip), %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
je .L61
leaq .LC9(%rip), %rsi
movq %r12, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L62
jmp .L61
.L69:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L66:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2065:
.size _Z9ArgumentsiPPc, .-_Z9ArgumentsiPPc
.section .rodata.str1.8
.align 8
.LC10:
.string "This GPU has major architecture %d, minor %d \n"
.align 8
.LC11:
.string "Need compute capability 2 or higher.\n"
.text
.globl _Z16checkCardVersionv
.type _Z16checkCardVersionv, @function
_Z16checkCardVersionv:
.LFB2066:
.cfi_startproc
endbr64
subq $1048, %rsp
.cfi_def_cfa_offset 1056
movq %fs:40, %rax
movq %rax, 1032(%rsp)
xorl %eax, %eax
movq %rsp, %rdi
movl $0, %esi
call cudaGetDeviceProperties_v2@PLT
movl 364(%rsp), %ecx
movl 360(%rsp), %edx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
cmpl $1, 360(%rsp)
jle .L74
movq 1032(%rsp), %rax
subq %fs:40, %rax
jne .L75
addq $1048, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L74:
.cfi_restore_state
leaq .LC11(%rip), %rdx
movl $2, %esi
movq stderr(%rip), %rdi
movl $0, %eax
call __fprintf_chk@PLT
movl $1, %edi
call exit@PLT
.L75:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2066:
.size _Z16checkCardVersionv, .-_Z16checkCardVersionv
.globl _Z14ParseArgumentsiPPc
.type _Z14ParseArgumentsiPPc, @function
_Z14ParseArgumentsiPPc:
.LFB2067:
.cfi_startproc
endbr64
testl %edi, %edi
jle .L83
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $8, %rsp
.cfi_def_cfa_offset 64
movl %edi, %r14d
movq %rsi, %r12
movl $0, %ebx
leaq .LC6(%rip), %r15
jmp .L80
.L78:
movq 8(%r12,%r13), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, GlobalSize(%rip)
addl $1, %ebx
.L79:
addl $1, %ebx
cmpl %ebx, %r14d
jle .L86
.L80:
movslq %ebx, %rax
leaq 0(,%rax,8), %r13
movq (%r12,%rax,8), %rbp
movq %r15, %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
je .L78
leaq .LC7(%rip), %rsi
movq %rbp, %rdi
call strcmp@PLT
testl %eax, %eax
jne .L79
jmp .L78
.L86:
addq $8, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L83:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
.cfi_restore 14
.cfi_restore 15
ret
.cfi_endproc
.LFE2067:
.size _Z14ParseArgumentsiPPc, .-_Z14ParseArgumentsiPPc
.globl _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i:
.LFB2092:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L91
.L87:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L92
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L91:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10Av_ProductPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L87
.L92:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2092:
.size _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i
.globl _Z10Av_ProductPfS_S_i
.type _Z10Av_ProductPfS_S_i, @function
_Z10Av_ProductPfS_S_i:
.LFB2093:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _Z10Av_ProductPfS_S_i, .-_Z10Av_ProductPfS_S_i
.globl _Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i
.type _Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i, @function
_Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i:
.LFB2094:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L99
.L95:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L100
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L99:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z12ComputeLamdaPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L95
.L100:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2094:
.size _Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i, .-_Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i
.globl _Z12ComputeLamdaPfS_S_i
.type _Z12ComputeLamdaPfS_S_i, @function
_Z12ComputeLamdaPfS_S_i:
.LFB2095:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2095:
.size _Z12ComputeLamdaPfS_S_i, .-_Z12ComputeLamdaPfS_S_i
.globl _Z31__device_stub__Z9FindNormWPfS_iPfS_i
.type _Z31__device_stub__Z9FindNormWPfS_iPfS_i, @function
_Z31__device_stub__Z9FindNormWPfS_iPfS_i:
.LFB2096:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L107
.L103:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L108
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L107:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z9FindNormWPfS_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L103
.L108:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2096:
.size _Z31__device_stub__Z9FindNormWPfS_iPfS_i, .-_Z31__device_stub__Z9FindNormWPfS_iPfS_i
.globl _Z9FindNormWPfS_i
.type _Z9FindNormWPfS_i, @function
_Z9FindNormWPfS_i:
.LFB2097:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z9FindNormWPfS_iPfS_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2097:
.size _Z9FindNormWPfS_i, .-_Z9FindNormWPfS_i
.globl _Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i
.type _Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i, @function
_Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i:
.LFB2098:
.cfi_startproc
endbr64
subq $152, %rsp
.cfi_def_cfa_offset 160
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movq %rdx, 8(%rsp)
movl %ecx, 4(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 8(%rsp), %rax
movq %rax, 112(%rsp)
leaq 4(%rsp), %rax
movq %rax, 120(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L115
.L111:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L116
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L115:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 168
pushq 40(%rsp)
.cfi_def_cfa_offset 176
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z10NormalizeWPfS_S_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 160
jmp .L111
.L116:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2098:
.size _Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i, .-_Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i
.globl _Z10NormalizeWPfS_S_i
.type _Z10NormalizeWPfS_S_i, @function
_Z10NormalizeWPfS_S_i:
.LFB2099:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2099:
.size _Z10NormalizeWPfS_S_i, .-_Z10NormalizeWPfS_S_i
.section .rodata.str1.1
.LC13:
.string "Matrix size %d X %d \n"
.LC14:
.string "Power method in CPU starts\n"
.LC16:
.string "CPU: run time = %f secs.\n"
.section .rodata.str1.8
.align 8
.LC17:
.string "Power method in CPU is finished\n"
.section .rodata.str1.1
.LC18:
.string "Power method in GPU starts\n"
.LC19:
.string "GPU lamda at %d: %f \n"
.LC20:
.string "GPU: run time = %f secs.\n"
.LC21:
.string "GPU: Av time = %f secs.\n"
.LC22:
.string "GPU: compute time = %f secs.\n"
.LC23:
.string "GPU: find time = %f secs.\n"
.LC24:
.string "GPU: norm time = %f secs.\n"
.text
.globl main
.type main, @function
main:
.LFB2061:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $168, %rsp
.cfi_def_cfa_offset 224
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
call _Z14ParseArgumentsiPPc
movl GlobalSize(%rip), %r15d
movl %r15d, 44(%rsp)
movl %r15d, %ecx
movl %r15d, %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movslq %r15d, %r12
salq $2, %r12
movl %r15d, %r13d
imull %r15d, %r13d
movslq %r13d, %r13
salq $2, %r13
movl $4, %edi
call malloc@PLT
movq %rax, h_NormW(%rip)
movq %r13, %rdi
call malloc@PLT
movq %rax, %rbx
movq %rax, h_MatA(%rip)
movq %r12, %rdi
call malloc@PLT
movq %rax, h_VecV(%rip)
movq %r12, %rdi
call malloc@PLT
movq %rax, h_VecW(%rip)
movl %r15d, %esi
movq %rbx, %rdi
call _Z11UploadArrayPfi
movl %r15d, %esi
movq h_VecV(%rip), %rdi
call _Z7InitOnePfi
leaq .LC14(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq 96(%rsp), %rbx
movq %rbx, %rsi
movl $0, %edi
call clock_gettime@PLT
call _Z17RunCPUPowerMethodv
leaq 112(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movq 120(%rsp), %rax
subq 104(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC15(%rip), %xmm0
movq 112(%rsp), %rax
subq 96(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
leaq .LC16(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq .LC17(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
leaq .LC18(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
call _Z16checkCardVersionv
movl %r15d, %esi
movq h_VecV(%rip), %rdi
call _Z7InitOnePfi
movq %rbx, %rsi
movl $0, %edi
call clock_gettime@PLT
movl BlockSize(%rip), %ebx
leal -1(%r15,%rbx), %eax
cltd
idivl %ebx
movl %eax, %ebp
movl $0x00000000, 60(%rsp)
movq $0, 64(%rsp)
movq %r13, %rsi
leaq d_MatA(%rip), %rdi
call cudaMalloc@PLT
movq %r12, %rsi
leaq d_VecV(%rip), %rdi
call cudaMalloc@PLT
movq %r12, %rsi
leaq d_VecW(%rip), %rdi
call cudaMalloc@PLT
movl $4, %esi
leaq d_NormW(%rip), %rdi
call cudaMalloc@PLT
leaq 64(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl $1, %ecx
movq %r13, %rdx
movq h_MatA(%rip), %rsi
movq d_MatA(%rip), %rdi
call cudaMemcpy@PLT
movl $1, %ecx
movq %r12, %rdx
movq h_VecV(%rip), %rsi
movq d_VecV(%rip), %rdi
call cudaMemcpy@PLT
leaq 60(%rsp), %rsi
movl $1, %ecx
movl $4, %edx
movq 64(%rsp), %rdi
call cudaMemcpy@PLT
leaq 128(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
leal 0(,%rbx,4), %r12d
movslq %r12d, %r12
movl %ebx, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl %ebp, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $0, %r9d
movq %r12, %r8
movq 84(%rsp), %rdx
movl $1, %ecx
movq 72(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L133
.L120:
leaq 112(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movq 112(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
pxor %xmm2, %xmm2
addsd %xmm2, %xmm0
movq 120(%rsp), %rax
subq 136(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC15(%rip), %xmm1
addsd %xmm1, %xmm0
addsd %xmm2, %xmm0
movsd %xmm0, 8(%rsp)
call cudaThreadSynchronize@PLT
cmpl $0, max_iteration(%rip)
jle .L130
movl $0, %r15d
movl $0x00000000, 40(%rsp)
movq $0x000000000, 32(%rsp)
movq $0x000000000, 24(%rsp)
movq $0x000000000, 16(%rsp)
leaq 128(%rsp), %r14
leaq 112(%rsp), %r13
jmp .L126
.L133:
movl 44(%rsp), %ecx
movq d_VecW(%rip), %rdx
movq d_VecV(%rip), %rsi
movq d_MatA(%rip), %rdi
call _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i
jmp .L120
.L134:
movl 44(%rsp), %edx
movq d_NormW(%rip), %rsi
movq d_VecW(%rip), %rdi
call _Z31__device_stub__Z9FindNormWPfS_iPfS_i
jmp .L122
.L135:
movl 44(%rsp), %ecx
movq d_VecV(%rip), %rdx
movq d_NormW(%rip), %rsi
movq d_VecW(%rip), %rdi
call _Z35__device_stub__Z10NormalizeWPfS_S_iPfS_S_i
jmp .L123
.L136:
movl 44(%rsp), %ecx
movq d_VecW(%rip), %rdx
movq d_VecV(%rip), %rsi
movq d_MatA(%rip), %rdi
call _Z35__device_stub__Z10Av_ProductPfS_S_iPfS_S_i
jmp .L124
.L125:
movq %r13, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 112(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
movsd 32(%rsp), %xmm6
addsd %xmm6, %xmm0
movq 120(%rsp), %rax
subq 136(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC15(%rip), %xmm1
addsd %xmm1, %xmm0
addsd %xmm6, %xmm0
movsd %xmm0, 32(%rsp)
call cudaThreadSynchronize@PLT
leaq 60(%rsp), %rdi
movl $2, %ecx
movl $4, %edx
movq 64(%rsp), %rsi
call cudaMemcpy@PLT
pxor %xmm0, %xmm0
cvtss2sd 60(%rsp), %xmm0
movl %r15d, %edx
leaq .LC19(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movss 60(%rsp), %xmm1
movaps %xmm1, %xmm0
subss 40(%rsp), %xmm0
andps .LC3(%rip), %xmm0
movss .LC4(%rip), %xmm2
comiss %xmm0, %xmm2
ja .L121
addl $1, %r15d
cmpl %r15d, max_iteration(%rip)
jle .L121
movss %xmm1, 40(%rsp)
.L126:
movq %r14, %rsi
movl $0, %edi
call clock_gettime@PLT
movl %ebx, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl %ebp, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $0, %r9d
movq %r12, %r8
movq 84(%rsp), %rdx
movl $1, %ecx
movq 72(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L134
.L122:
movq %r13, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 112(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
movsd 16(%rsp), %xmm3
addsd %xmm3, %xmm0
movq 120(%rsp), %rax
subq 136(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC15(%rip), %xmm1
addsd %xmm1, %xmm0
addsd %xmm3, %xmm0
movsd %xmm0, 16(%rsp)
call cudaThreadSynchronize@PLT
movq %r14, %rsi
movl $0, %edi
call clock_gettime@PLT
movl %ebx, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl %ebp, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $0, %r9d
movq %r12, %r8
movq 84(%rsp), %rdx
movl $1, %ecx
movq 72(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L135
.L123:
movq %r13, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 112(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
movsd 24(%rsp), %xmm4
addsd %xmm4, %xmm0
movq 120(%rsp), %rax
subq 136(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC15(%rip), %xmm1
addsd %xmm1, %xmm0
addsd %xmm4, %xmm0
movsd %xmm0, 24(%rsp)
call cudaThreadSynchronize@PLT
movq %r14, %rsi
movl $0, %edi
call clock_gettime@PLT
movl %ebx, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl %ebp, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $0, %r9d
movq %r12, %r8
movq 84(%rsp), %rdx
movl $1, %ecx
movq 72(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L136
.L124:
movq %r13, %rsi
movl $0, %edi
call clock_gettime@PLT
movq 112(%rsp), %rax
subq 128(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
movsd 8(%rsp), %xmm5
addsd %xmm5, %xmm0
movq 120(%rsp), %rax
subq 136(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
mulsd .LC15(%rip), %xmm1
addsd %xmm1, %xmm0
addsd %xmm5, %xmm0
movsd %xmm0, 8(%rsp)
call cudaThreadSynchronize@PLT
movq %r14, %rsi
movl $0, %edi
call clock_gettime@PLT
movl %ebx, 84(%rsp)
movl $1, 88(%rsp)
movl $1, 92(%rsp)
movl %ebp, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $0, %r9d
movq %r12, %r8
movq 84(%rsp), %rdx
movl $1, %ecx
movq 72(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
jne .L125
movl 44(%rsp), %ecx
movq 64(%rsp), %rdx
movq d_VecW(%rip), %rsi
movq d_VecV(%rip), %rdi
call _Z37__device_stub__Z12ComputeLamdaPfS_S_iPfS_S_i
jmp .L125
.L130:
movq $0x000000000, 32(%rsp)
movq $0x000000000, 24(%rsp)
movq $0x000000000, 16(%rsp)
.L121:
leaq 112(%rsp), %rsi
movl $0, %edi
call clock_gettime@PLT
movq 120(%rsp), %rax
subq 104(%rsp), %rax
pxor %xmm0, %xmm0
cvtsi2sdq %rax, %xmm0
mulsd .LC15(%rip), %xmm0
movq 112(%rsp), %rax
subq 96(%rsp), %rax
pxor %xmm1, %xmm1
cvtsi2sdq %rax, %xmm1
addsd %xmm1, %xmm0
leaq .LC20(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 8(%rsp), %xmm0
leaq .LC21(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 32(%rsp), %xmm0
leaq .LC22(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 16(%rsp), %xmm0
leaq .LC23(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movsd 24(%rsp), %xmm0
leaq .LC24(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movq 64(%rsp), %rdi
testq %rdi, %rdi
je .L127
call cudaFree@PLT
.L127:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L137
call _Z7Cleanupv
.L137:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2061:
.size main, .-main
.section .rodata.str1.1
.LC25:
.string "_Z10NormalizeWPfS_S_i"
.LC26:
.string "_Z9FindNormWPfS_i"
.LC27:
.string "_Z12ComputeLamdaPfS_S_i"
.LC28:
.string "_Z10Av_ProductPfS_S_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2101:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC25(%rip), %rdx
movq %rdx, %rcx
leaq _Z10NormalizeWPfS_S_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC26(%rip), %rdx
movq %rdx, %rcx
leaq _Z9FindNormWPfS_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC27(%rip), %rdx
movq %rdx, %rcx
leaq _Z12ComputeLamdaPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC28(%rip), %rdx
movq %rdx, %rcx
leaq _Z10Av_ProductPfS_S_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2101:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.globl max_iteration
.data
.align 4
.type max_iteration, @object
.size max_iteration, 4
max_iteration:
.long 100
.globl BlockSize
.align 4
.type BlockSize, @object
.size BlockSize, 4
BlockSize:
.long 256
.globl GlobalSize
.align 4
.type GlobalSize, @object
.size GlobalSize, 4
GlobalSize:
.long 8000
.globl d_NormW
.bss
.align 8
.type d_NormW, @object
.size d_NormW, 8
d_NormW:
.zero 8
.globl h_NormW
.align 8
.type h_NormW, @object
.size h_NormW, 8
h_NormW:
.zero 8
.globl d_VecW
.align 8
.type d_VecW, @object
.size d_VecW, 8
d_VecW:
.zero 8
.globl h_VecW
.align 8
.type h_VecW, @object
.size h_VecW, 8
h_VecW:
.zero 8
.globl d_VecV
.align 8
.type d_VecV, @object
.size d_VecV, 8
d_VecV:
.zero 8
.globl h_VecV
.align 8
.type h_VecV, @object
.size h_VecV, 8
h_VecV:
.zero 8
.globl d_MatA
.align 8
.type d_MatA, @object
.size d_MatA, 8
d_MatA:
.zero 8
.globl h_MatA
.align 8
.type h_MatA, @object
.size h_MatA, 8
h_MatA:
.zero 8
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC3:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC4:
.long 916964780
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC15:
.long -400107883
.long 1041313291
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | // the GPU code can be found in power_gpu.cu
// jiabing jin, sept 2017
////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "cuda.h"
const int BLOCK_SIZE =256;
// #include "power_gpu.cu"
// Input Array Variables
float* h_MatA = NULL;
float* d_MatA = NULL;
// Output Array
float* h_VecV = NULL;
float* d_VecV = NULL;
float* h_VecW = NULL;
float* d_VecW = NULL;
float* h_NormW = NULL;
float* d_NormW = NULL;
// Variables to change
int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize
int BlockSize = 256; // number of threads in each block
const float EPS = 0.000005; // tolerence of the error
int max_iteration = 100; // the maximum iteration steps
// Functions
void Cleanup(void);
void InitOne(float*, int);
void UploadArray(float*, int);
float CPUReduce(float*, int);
void Arguments(int, char**);
void checkCardVersion(void);
void ParseArguments(int, char**);
// Kernels
__global__ void Av_Product(float* , float* , float* , int );
__global__ void FindNormW(float* , float* , int );
__global__ void NormalizeW(float* ,float* , int );
__global__ void ComputeLamda( float* ,float* , float* ,int );
void CPU_AvProduct()
{
int N = GlobalSize;
int matIndex =0;
for(int i=0;i<N;i++)
{
h_VecW[i] = 0;
for(int j=0;j<N;j++)
{
matIndex = i*N + j;
h_VecW[i] += h_MatA[matIndex] * h_VecV[j];
}
}
}
void CPU_NormalizeW()
{
int N = GlobalSize;
float normW=0;
for(int i=0;i<N;i++)
normW += h_VecW[i] * h_VecW[i];
normW = sqrt(normW);
for(int i=0;i<N;i++)
h_VecV[i] = h_VecW[i]/normW;
}
float CPU_ComputeLamda()
{
int N = GlobalSize;
float lamda =0;
for(int i=0;i<N;i++)
lamda += h_VecV[i] * h_VecW[i];
return lamda;
}
void RunCPUPowerMethod()
{
printf("*************************************\n");
float oldLamda =0;
float lamda=0;
//AvProduct
CPU_AvProduct();
//power loop
for (int i=0;i<max_iteration;i++)
{
CPU_NormalizeW();
CPU_AvProduct();
lamda= CPU_ComputeLamda();
printf("CPU lamda at %d: %f \n", i, lamda);
// If residual is lass than epsilon break
if(abs(oldLamda - lamda) < EPS)
break;
oldLamda = lamda;
}
printf("*************************************\n");
}
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int aBegin = N * BLOCK_SIZE * bx;
int aEnd = aBegin + N - 1;
int step = BLOCK_SIZE;
int bBegin = 0;//BLOCK_SIZE * bx;
int bIndex=0;
int aIndex =0;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += step, b += step)
{
__shared__ float As[BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE];
aIndex = a + tx;
if( aIndex < N*N)
As[tx] = g_MatA[aIndex];
else
As[tx] = 0;
bIndex = b+tx;
if(bIndex<N)
bs[tx] = g_VecV[bIndex];
else
bs[tx] = 0;
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[k] * bs[k];
}//}
__syncthreads();
}
g_VecW[ BLOCK_SIZE * bx + tx] = Csub;
}
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdataVW[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid];
}
else {
sdataVW[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]);
}
__global__ void FindNormW(float* g_VecW, float * g_NormW, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdata[tid] = g_VecW[globalid];
}
else {
sdata[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
sdata[tid] = sdata[tid] * sdata[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_NormW,sdata[0]);
}
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sNormData[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid==0) sNormData[0] = g_NormW[0];
__syncthreads();
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecW[globalid]/sNormData[0];
}
}
// Host code
int main(int argc, char** argv)
{
struct timespec t_start,t_end,t_start2;
double runtime;
double Av_runtime = 0.0;
double Find_runtime = 0.0;
double Norm_runtime = 0.0;
double Compute_runtime = 0.0;
ParseArguments(argc, argv);
int N = GlobalSize;
printf("Matrix size %d X %d \n", N, N);
size_t vec_size = N * sizeof(float);
size_t mat_size = N * N * sizeof(float);
size_t norm_size = sizeof(float);
// Allocate normalized value in host memory
h_NormW = (float*)malloc(norm_size);
// Allocate input matrix in host memory
h_MatA = (float*)malloc(mat_size);
// Allocate initial vector V in host memory
h_VecV = (float*)malloc(vec_size);
// Allocate W vector for computations
h_VecW = (float*)malloc(vec_size);
// Initialize input matrix
UploadArray(h_MatA, N);
InitOne(h_VecV,N);
printf("Power method in CPU starts\n");
clock_gettime(CLOCK_REALTIME,&t_start);
RunCPUPowerMethod(); // the lamda is already solved here
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("CPU: run time = %f secs.\n",runtime);
printf("Power method in CPU is finished\n");
/////////////////////////////////////////////////
// This is the starting points of GPU
printf("Power method in GPU starts\n");
checkCardVersion();
// Initialize input matrix
InitOne(h_VecV,N);
clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count
// Set the kernel arguments
int threadsPerBlock = BlockSize;
int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
/* Host variables for convergence */
float OldLamda = 0;
float temp = 0;
/* Device variable for convergence */
float* Lamda = 0;
// Allocate matrix and vectors in device memory
cudaMalloc((void**)&d_MatA, mat_size);
cudaMalloc((void**)&d_VecV, vec_size);
cudaMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device
cudaMalloc((void**)&d_NormW, norm_size);
/* Allocate memory for device lamda */
cudaMalloc((void**)&Lamda, sizeof(float));
//Copy from host memory to device memory
cudaMemcpy(d_MatA, h_MatA, mat_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_VecV, h_VecV, vec_size, cudaMemcpyHostToDevice);
cudaMemcpy(Lamda, &OldLamda, sizeof(float), cudaMemcpyHostToDevice);
// cutilCheckError(cutStopTimer(timer_mem));
//Power method loops
clock_gettime(CLOCK_REALTIME,&t_start2);
/* First matrix vector multiplication */
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
/* Measure time for one matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize(); //Needed, kind of barrier to sychronize all threads
// This part is the main code of the iteration process for the Power Method in GPU.
// Please finish this part based on the given code. Do not forget the command line
// cudaThreadSynchronize() after callig the function every time in CUDA to synchoronize the threads
////////////////////////////////////////////
// /// // // // // // //
// //
//power loop
for (int i=0;i<max_iteration;i++)
{
/* Measure time for creating the normalized vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
FindNormW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time for normalizing the vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
NormalizeW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, d_VecV, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time for matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_start2);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
/* Measure time computing the Lamda */
clock_gettime(CLOCK_REALTIME,&t_start2);
ComputeLamda<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecV, d_VecW, Lamda, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
cudaThreadSynchronize();
// If residual is lass than epsilon break
cudaMemcpy(&OldLamda, Lamda, sizeof(float), cudaMemcpyDeviceToHost);
printf("GPU lamda at %d: %f \n", i, OldLamda);
if(abs(OldLamda - temp) < EPS){
break;
}
temp = OldLamda;
}
// //
// //
// //
// //
// //
// //
// /// // /// // // // // // // // //
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("GPU: run time = %f secs.\n",runtime);
printf("GPU: Av time = %f secs.\n",Av_runtime);
printf("GPU: compute time = %f secs.\n",Compute_runtime);
printf("GPU: find time = %f secs.\n",Find_runtime);
printf("GPU: norm time = %f secs.\n",Norm_runtime);
// printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU));
if (Lamda)
cudaFree(Lamda);
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_MatA)
cudaFree(d_MatA);
if (d_VecV)
cudaFree(d_VecV);
if (d_VecW)
cudaFree(d_VecW);
if (d_NormW)
cudaFree(d_NormW);
// Free host memory
if (h_MatA)
free(h_MatA);
if (h_VecV)
free(h_VecV);
if (h_VecW)
free(h_VecW);
if (h_NormW)
free(h_NormW);
exit(0);
}
// Allocates an array with zero value.
void InitOne(float* data, int n)
{
for (int i = 0; i < n; i++)
data[i] = 0;
data[0]=1;
}
void UploadArray(float* data, int n)
{
int total = n*n;
int value=1;
for (int i = 0; i < total; i++)
{
data[i] = (int) (rand() % (int)(101));//1;//value;
value ++; if(value>n) value =1;
// data[i] = 1;
}
}
// Obtain program arguments
void Arguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0)
{
max_iteration = atoi(argv[i+1]);
i = i + 1;
}
}
}
void checkCardVersion()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) {
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
// if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) {
// BlockSize = atoi(argv[i+1]);
// i = i + 1;
// }
}
} | // the GPU code can be found in power_gpu.cu
// jiabing jin, sept 2017
////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "hip/hip_runtime.h"
const int BLOCK_SIZE =256;
// #include "power_gpu.cu"
// Input Array Variables
float* h_MatA = NULL;
float* d_MatA = NULL;
// Output Array
float* h_VecV = NULL;
float* d_VecV = NULL;
float* h_VecW = NULL;
float* d_VecW = NULL;
float* h_NormW = NULL;
float* d_NormW = NULL;
// Variables to change
int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize
int BlockSize = 256; // number of threads in each block
const float EPS = 0.000005; // tolerence of the error
int max_iteration = 100; // the maximum iteration steps
// Functions
void Cleanup(void);
void InitOne(float*, int);
void UploadArray(float*, int);
float CPUReduce(float*, int);
void Arguments(int, char**);
void checkCardVersion(void);
void ParseArguments(int, char**);
// Kernels
__global__ void Av_Product(float* , float* , float* , int );
__global__ void FindNormW(float* , float* , int );
__global__ void NormalizeW(float* ,float* , int );
__global__ void ComputeLamda( float* ,float* , float* ,int );
void CPU_AvProduct()
{
int N = GlobalSize;
int matIndex =0;
for(int i=0;i<N;i++)
{
h_VecW[i] = 0;
for(int j=0;j<N;j++)
{
matIndex = i*N + j;
h_VecW[i] += h_MatA[matIndex] * h_VecV[j];
}
}
}
void CPU_NormalizeW()
{
int N = GlobalSize;
float normW=0;
for(int i=0;i<N;i++)
normW += h_VecW[i] * h_VecW[i];
normW = sqrt(normW);
for(int i=0;i<N;i++)
h_VecV[i] = h_VecW[i]/normW;
}
float CPU_ComputeLamda()
{
int N = GlobalSize;
float lamda =0;
for(int i=0;i<N;i++)
lamda += h_VecV[i] * h_VecW[i];
return lamda;
}
void RunCPUPowerMethod()
{
printf("*************************************\n");
float oldLamda =0;
float lamda=0;
//AvProduct
CPU_AvProduct();
//power loop
for (int i=0;i<max_iteration;i++)
{
CPU_NormalizeW();
CPU_AvProduct();
lamda= CPU_ComputeLamda();
printf("CPU lamda at %d: %f \n", i, lamda);
// If residual is lass than epsilon break
if(abs(oldLamda - lamda) < EPS)
break;
oldLamda = lamda;
}
printf("*************************************\n");
}
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int aBegin = N * BLOCK_SIZE * bx;
int aEnd = aBegin + N - 1;
int step = BLOCK_SIZE;
int bBegin = 0;//BLOCK_SIZE * bx;
int bIndex=0;
int aIndex =0;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += step, b += step)
{
__shared__ float As[BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE];
aIndex = a + tx;
if( aIndex < N*N)
As[tx] = g_MatA[aIndex];
else
As[tx] = 0;
bIndex = b+tx;
if(bIndex<N)
bs[tx] = g_VecV[bIndex];
else
bs[tx] = 0;
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[k] * bs[k];
}//}
__syncthreads();
}
g_VecW[ BLOCK_SIZE * bx + tx] = Csub;
}
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdataVW[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid];
}
else {
sdataVW[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]);
}
__global__ void FindNormW(float* g_VecW, float * g_NormW, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdata[tid] = g_VecW[globalid];
}
else {
sdata[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
sdata[tid] = sdata[tid] * sdata[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_NormW,sdata[0]);
}
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sNormData[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid==0) sNormData[0] = g_NormW[0];
__syncthreads();
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecW[globalid]/sNormData[0];
}
}
// Host code
int main(int argc, char** argv)
{
struct timespec t_start,t_end,t_start2;
double runtime;
double Av_runtime = 0.0;
double Find_runtime = 0.0;
double Norm_runtime = 0.0;
double Compute_runtime = 0.0;
ParseArguments(argc, argv);
int N = GlobalSize;
printf("Matrix size %d X %d \n", N, N);
size_t vec_size = N * sizeof(float);
size_t mat_size = N * N * sizeof(float);
size_t norm_size = sizeof(float);
// Allocate normalized value in host memory
h_NormW = (float*)malloc(norm_size);
// Allocate input matrix in host memory
h_MatA = (float*)malloc(mat_size);
// Allocate initial vector V in host memory
h_VecV = (float*)malloc(vec_size);
// Allocate W vector for computations
h_VecW = (float*)malloc(vec_size);
// Initialize input matrix
UploadArray(h_MatA, N);
InitOne(h_VecV,N);
printf("Power method in CPU starts\n");
clock_gettime(CLOCK_REALTIME,&t_start);
RunCPUPowerMethod(); // the lamda is already solved here
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("CPU: run time = %f secs.\n",runtime);
printf("Power method in CPU is finished\n");
/////////////////////////////////////////////////
// This is the starting points of GPU
printf("Power method in GPU starts\n");
checkCardVersion();
// Initialize input matrix
InitOne(h_VecV,N);
clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count
// Set the kernel arguments
int threadsPerBlock = BlockSize;
int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
/* Host variables for convergence */
float OldLamda = 0;
float temp = 0;
/* Device variable for convergence */
float* Lamda = 0;
// Allocate matrix and vectors in device memory
hipMalloc((void**)&d_MatA, mat_size);
hipMalloc((void**)&d_VecV, vec_size);
hipMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device
hipMalloc((void**)&d_NormW, norm_size);
/* Allocate memory for device lamda */
hipMalloc((void**)&Lamda, sizeof(float));
//Copy from host memory to device memory
hipMemcpy(d_MatA, h_MatA, mat_size, hipMemcpyHostToDevice);
hipMemcpy(d_VecV, h_VecV, vec_size, hipMemcpyHostToDevice);
hipMemcpy(Lamda, &OldLamda, sizeof(float), hipMemcpyHostToDevice);
// cutilCheckError(cutStopTimer(timer_mem));
//Power method loops
clock_gettime(CLOCK_REALTIME,&t_start2);
/* First matrix vector multiplication */
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
/* Measure time for one matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize(); //Needed, kind of barrier to sychronize all threads
// This part is the main code of the iteration process for the Power Method in GPU.
// Please finish this part based on the given code. Do not forget the command line
// cudaThreadSynchronize() after callig the function every time in CUDA to synchoronize the threads
////////////////////////////////////////////
// /// // // // // // //
// //
//power loop
for (int i=0;i<max_iteration;i++)
{
/* Measure time for creating the normalized vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
FindNormW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time for normalizing the vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
NormalizeW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, d_VecV, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time for matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_start2);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time computing the Lamda */
clock_gettime(CLOCK_REALTIME,&t_start2);
ComputeLamda<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecV, d_VecW, Lamda, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
// If residual is lass than epsilon break
hipMemcpy(&OldLamda, Lamda, sizeof(float), hipMemcpyDeviceToHost);
printf("GPU lamda at %d: %f \n", i, OldLamda);
if(abs(OldLamda - temp) < EPS){
break;
}
temp = OldLamda;
}
// //
// //
// //
// //
// //
// //
// /// // /// // // // // // // // //
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("GPU: run time = %f secs.\n",runtime);
printf("GPU: Av time = %f secs.\n",Av_runtime);
printf("GPU: compute time = %f secs.\n",Compute_runtime);
printf("GPU: find time = %f secs.\n",Find_runtime);
printf("GPU: norm time = %f secs.\n",Norm_runtime);
// printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU));
if (Lamda)
hipFree(Lamda);
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_MatA)
hipFree(d_MatA);
if (d_VecV)
hipFree(d_VecV);
if (d_VecW)
hipFree(d_VecW);
if (d_NormW)
hipFree(d_NormW);
// Free host memory
if (h_MatA)
free(h_MatA);
if (h_VecV)
free(h_VecV);
if (h_VecW)
free(h_VecW);
if (h_NormW)
free(h_NormW);
exit(0);
}
// Allocates an array with zero value.
void InitOne(float* data, int n)
{
for (int i = 0; i < n; i++)
data[i] = 0;
data[0]=1;
}
void UploadArray(float* data, int n)
{
int total = n*n;
int value=1;
for (int i = 0; i < total; i++)
{
data[i] = (int) (rand() % (int)(101));//1;//value;
value ++; if(value>n) value =1;
// data[i] = 1;
}
}
// Obtain program arguments
void Arguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0)
{
max_iteration = atoi(argv[i+1]);
i = i + 1;
}
}
}
void checkCardVersion()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) {
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
// if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) {
// BlockSize = atoi(argv[i+1]);
// i = i + 1;
// }
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | // the GPU code can be found in power_gpu.cu
// jiabing jin, sept 2017
////////////////////////////////////////////
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "hip/hip_runtime.h"
const int BLOCK_SIZE =256;
// #include "power_gpu.cu"
// Input Array Variables
float* h_MatA = NULL;
float* d_MatA = NULL;
// Output Array
float* h_VecV = NULL;
float* d_VecV = NULL;
float* h_VecW = NULL;
float* d_VecW = NULL;
float* h_NormW = NULL;
float* d_NormW = NULL;
// Variables to change
int GlobalSize = 8000; // this is the dimension of the matrix, GlobalSize*GlobalSize
int BlockSize = 256; // number of threads in each block
const float EPS = 0.000005; // tolerence of the error
int max_iteration = 100; // the maximum iteration steps
// Functions
void Cleanup(void);
void InitOne(float*, int);
void UploadArray(float*, int);
float CPUReduce(float*, int);
void Arguments(int, char**);
void checkCardVersion(void);
void ParseArguments(int, char**);
// Kernels
__global__ void Av_Product(float* , float* , float* , int );
__global__ void FindNormW(float* , float* , int );
__global__ void NormalizeW(float* ,float* , int );
__global__ void ComputeLamda( float* ,float* , float* ,int );
void CPU_AvProduct()
{
int N = GlobalSize;
int matIndex =0;
for(int i=0;i<N;i++)
{
h_VecW[i] = 0;
for(int j=0;j<N;j++)
{
matIndex = i*N + j;
h_VecW[i] += h_MatA[matIndex] * h_VecV[j];
}
}
}
void CPU_NormalizeW()
{
int N = GlobalSize;
float normW=0;
for(int i=0;i<N;i++)
normW += h_VecW[i] * h_VecW[i];
normW = sqrt(normW);
for(int i=0;i<N;i++)
h_VecV[i] = h_VecW[i]/normW;
}
float CPU_ComputeLamda()
{
int N = GlobalSize;
float lamda =0;
for(int i=0;i<N;i++)
lamda += h_VecV[i] * h_VecW[i];
return lamda;
}
void RunCPUPowerMethod()
{
printf("*************************************\n");
float oldLamda =0;
float lamda=0;
//AvProduct
CPU_AvProduct();
//power loop
for (int i=0;i<max_iteration;i++)
{
CPU_NormalizeW();
CPU_AvProduct();
lamda= CPU_ComputeLamda();
printf("CPU lamda at %d: %f \n", i, lamda);
// If residual is lass than epsilon break
if(abs(oldLamda - lamda) < EPS)
break;
oldLamda = lamda;
}
printf("*************************************\n");
}
__global__ void Av_Product(float* g_MatA, float* g_VecV, float* g_VecW, int N)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int aBegin = N * BLOCK_SIZE * bx;
int aEnd = aBegin + N - 1;
int step = BLOCK_SIZE;
int bBegin = 0;//BLOCK_SIZE * bx;
int bIndex=0;
int aIndex =0;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += step, b += step)
{
__shared__ float As[BLOCK_SIZE];
__shared__ float bs[BLOCK_SIZE];
aIndex = a + tx;
if( aIndex < N*N)
As[tx] = g_MatA[aIndex];
else
As[tx] = 0;
bIndex = b+tx;
if(bIndex<N)
bs[tx] = g_VecV[bIndex];
else
bs[tx] = 0;
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[k] * bs[k];
}//}
__syncthreads();
}
g_VecW[ BLOCK_SIZE * bx + tx] = Csub;
}
__global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdataVW[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid];
}
else {
sdataVW[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]);
}
__global__ void FindNormW(float* g_VecW, float * g_NormW, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdata[tid] = g_VecW[globalid];
}
else {
sdata[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
sdata[tid] = sdata[tid] * sdata[tid];
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdata[tid] = sdata[tid] + sdata[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_NormW,sdata[0]);
}
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sNormData[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid==0) sNormData[0] = g_NormW[0];
__syncthreads();
// For thread ids greater than data space
if (globalid < N) {
g_VecV[globalid] = g_VecW[globalid]/sNormData[0];
}
}
// Host code
int main(int argc, char** argv)
{
struct timespec t_start,t_end,t_start2;
double runtime;
double Av_runtime = 0.0;
double Find_runtime = 0.0;
double Norm_runtime = 0.0;
double Compute_runtime = 0.0;
ParseArguments(argc, argv);
int N = GlobalSize;
printf("Matrix size %d X %d \n", N, N);
size_t vec_size = N * sizeof(float);
size_t mat_size = N * N * sizeof(float);
size_t norm_size = sizeof(float);
// Allocate normalized value in host memory
h_NormW = (float*)malloc(norm_size);
// Allocate input matrix in host memory
h_MatA = (float*)malloc(mat_size);
// Allocate initial vector V in host memory
h_VecV = (float*)malloc(vec_size);
// Allocate W vector for computations
h_VecW = (float*)malloc(vec_size);
// Initialize input matrix
UploadArray(h_MatA, N);
InitOne(h_VecV,N);
printf("Power method in CPU starts\n");
clock_gettime(CLOCK_REALTIME,&t_start);
RunCPUPowerMethod(); // the lamda is already solved here
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("CPU: run time = %f secs.\n",runtime);
printf("Power method in CPU is finished\n");
/////////////////////////////////////////////////
// This is the starting points of GPU
printf("Power method in GPU starts\n");
checkCardVersion();
// Initialize input matrix
InitOne(h_VecV,N);
clock_gettime(CLOCK_REALTIME,&t_start); // Here I start to count
// Set the kernel arguments
int threadsPerBlock = BlockSize;
int sharedMemSize = threadsPerBlock * sizeof(float); // in per block, the memory is shared
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
/* Host variables for convergence */
float OldLamda = 0;
float temp = 0;
/* Device variable for convergence */
float* Lamda = 0;
// Allocate matrix and vectors in device memory
hipMalloc((void**)&d_MatA, mat_size);
hipMalloc((void**)&d_VecV, vec_size);
hipMalloc((void**)&d_VecW, vec_size); // This vector is only used by the device
hipMalloc((void**)&d_NormW, norm_size);
/* Allocate memory for device lamda */
hipMalloc((void**)&Lamda, sizeof(float));
//Copy from host memory to device memory
hipMemcpy(d_MatA, h_MatA, mat_size, hipMemcpyHostToDevice);
hipMemcpy(d_VecV, h_VecV, vec_size, hipMemcpyHostToDevice);
hipMemcpy(Lamda, &OldLamda, sizeof(float), hipMemcpyHostToDevice);
// cutilCheckError(cutStopTimer(timer_mem));
//Power method loops
clock_gettime(CLOCK_REALTIME,&t_start2);
/* First matrix vector multiplication */
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
/* Measure time for one matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize(); //Needed, kind of barrier to sychronize all threads
// This part is the main code of the iteration process for the Power Method in GPU.
// Please finish this part based on the given code. Do not forget the command line
// cudaThreadSynchronize() after callig the function every time in CUDA to synchoronize the threads
////////////////////////////////////////////
// /// // // // // // //
// //
//power loop
for (int i=0;i<max_iteration;i++)
{
/* Measure time for creating the normalized vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
FindNormW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Find_runtime += Find_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time for normalizing the vector */
clock_gettime(CLOCK_REALTIME,&t_start2);
NormalizeW<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecW, d_NormW, d_VecV, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Norm_runtime += Norm_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time for matrix vector multiplication */
clock_gettime(CLOCK_REALTIME,&t_start2);
Av_Product<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_MatA, d_VecV, d_VecW, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Av_runtime += Av_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
/* Measure time computing the Lamda */
clock_gettime(CLOCK_REALTIME,&t_start2);
ComputeLamda<<<blocksPerGrid, threadsPerBlock, sharedMemSize>>>(d_VecV, d_VecW, Lamda, N);
clock_gettime(CLOCK_REALTIME,&t_end);
Compute_runtime += Compute_runtime + (t_end.tv_sec - t_start2.tv_sec) + 1e-9*(t_end.tv_nsec - t_start2.tv_nsec);
hipDeviceSynchronize();
// If residual is lass than epsilon break
hipMemcpy(&OldLamda, Lamda, sizeof(float), hipMemcpyDeviceToHost);
printf("GPU lamda at %d: %f \n", i, OldLamda);
if(abs(OldLamda - temp) < EPS){
break;
}
temp = OldLamda;
}
// //
// //
// //
// //
// //
// //
// /// // /// // // // // // // // //
clock_gettime(CLOCK_REALTIME,&t_end);
runtime = (t_end.tv_sec - t_start.tv_sec) + 1e-9*(t_end.tv_nsec - t_start.tv_nsec);
printf("GPU: run time = %f secs.\n",runtime);
printf("GPU: Av time = %f secs.\n",Av_runtime);
printf("GPU: compute time = %f secs.\n",Compute_runtime);
printf("GPU: find time = %f secs.\n",Find_runtime);
printf("GPU: norm time = %f secs.\n",Norm_runtime);
// printf("Overall CPU Execution Time: %f (ms) \n", cutGetTimerValue(timer_CPU));
if (Lamda)
hipFree(Lamda);
Cleanup();
}
void Cleanup(void)
{
// Free device memory
if (d_MatA)
hipFree(d_MatA);
if (d_VecV)
hipFree(d_VecV);
if (d_VecW)
hipFree(d_VecW);
if (d_NormW)
hipFree(d_NormW);
// Free host memory
if (h_MatA)
free(h_MatA);
if (h_VecV)
free(h_VecV);
if (h_VecW)
free(h_VecW);
if (h_NormW)
free(h_NormW);
exit(0);
}
// Allocates an array with zero value.
void InitOne(float* data, int n)
{
for (int i = 0; i < n; i++)
data[i] = 0;
data[0]=1;
}
void UploadArray(float* data, int n)
{
int total = n*n;
int value=1;
for (int i = 0; i < total; i++)
{
data[i] = (int) (rand() % (int)(101));//1;//value;
value ++; if(value>n) value =1;
// data[i] = 1;
}
}
// Obtain program arguments
void Arguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--max_iteration") == 0 || strcmp(argv[i], "-max_iteration") == 0)
{
max_iteration = atoi(argv[i+1]);
i = i + 1;
}
}
}
void checkCardVersion()
{
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("This GPU has major architecture %d, minor %d \n",prop.major,prop.minor);
if(prop.major < 2)
{
fprintf(stderr,"Need compute capability 2 or higher.\n");
exit(1);
}
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0) {
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
// if (strcmp(argv[i], "--blocksize") == 0 || strcmp(argv[i], "-blocksize") == 0) {
// BlockSize = atoi(argv[i+1]);
// i = i + 1;
// }
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z10Av_ProductPfS_S_i
.globl _Z10Av_ProductPfS_S_i
.p2align 8
.type _Z10Av_ProductPfS_S_i,@function
_Z10Av_ProductPfS_S_i:
s_load_b32 s2, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_cmp_lt_i32 s2, 1
s_cbranch_scc1 .LBB0_13
s_load_b128 s[4:7], s[0:1], 0x0
v_dual_mov_b32 v5, 0 :: v_dual_lshlrev_b32 v6, 2, v0
s_mul_i32 s3, s2, s15
s_mul_i32 s9, s2, s2
s_delay_alu instid0(VALU_DEP_1)
v_dual_mov_b32 v2, 0 :: v_dual_add_nc_u32 v7, 0x400, v6
s_lshl_b32 s3, s3, 8
s_mov_b32 s10, 0
s_add_i32 s8, s3, s2
.LBB0_2:
v_add_nc_u32_e32 v3, s3, v0
s_mov_b32 s11, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_le_i32_e64 s9, v3
s_xor_b32 s11, exec_lo, s11
s_cbranch_execz .LBB0_4
ds_store_b32 v6, v2
.LBB0_4:
s_and_not1_saveexec_b32 s11, s11
s_cbranch_execz .LBB0_6
v_ashrrev_i32_e32 v4, 31, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[3:4]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v4, vcc_lo
global_load_b32 v1, v[3:4], off
s_waitcnt vmcnt(0)
ds_store_b32 v6, v1
.LBB0_6:
s_or_b32 exec_lo, exec_lo, s11
v_add_nc_u32_e32 v1, s10, v0
s_mov_b32 s11, exec_lo
s_delay_alu instid0(VALU_DEP_1)
v_cmpx_le_i32_e64 s2, v1
s_xor_b32 s11, exec_lo, s11
s_cbranch_execz .LBB0_8
ds_store_b32 v7, v2
.LBB0_8:
s_and_not1_saveexec_b32 s11, s11
s_cbranch_execz .LBB0_10
v_lshlrev_b64 v[3:4], 2, v[1:2]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v3, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v4, vcc_lo, s7, v4, vcc_lo
global_load_b32 v1, v[3:4], off
s_waitcnt vmcnt(0)
ds_store_b32 v7, v1
.LBB0_10:
s_or_b32 exec_lo, exec_lo, s11
s_mov_b32 s11, 0
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
.LBB0_11:
v_mov_b32_e32 v1, s11
s_add_i32 s11, s11, 4
s_delay_alu instid0(SALU_CYCLE_1)
s_cmpk_eq_i32 s11, 0x400
ds_load_2addr_stride64_b32 v[3:4], v1 offset1:4
s_waitcnt lgkmcnt(0)
v_fmac_f32_e32 v5, v3, v4
s_cbranch_scc0 .LBB0_11
s_addk_i32 s3, 0x100
s_addk_i32 s10, 0x100
s_cmp_ge_i32 s3, s8
s_barrier
buffer_gl0_inv
s_cbranch_scc0 .LBB0_2
s_branch .LBB0_14
.LBB0_13:
v_mov_b32_e32 v5, 0
.LBB0_14:
s_load_b64 s[0:1], s[0:1], 0x10
v_lshl_add_u32 v0, s15, 8, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_ashrrev_i32_e32 v1, 31, v0
v_lshlrev_b64 v[0:1], 2, v[0:1]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
global_store_b32 v[0:1], v5, off
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10Av_ProductPfS_S_i
.amdhsa_group_segment_fixed_size 2048
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 28
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 8
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z10Av_ProductPfS_S_i, .Lfunc_end0-_Z10Av_ProductPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z12ComputeLamdaPfS_S_i
.globl _Z12ComputeLamdaPfS_S_i
.p2align 8
.type _Z12ComputeLamdaPfS_S_i,@function
_Z12ComputeLamdaPfS_S_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x2c
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB1_2
s_load_b128 s[4:7], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v3, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v4, vcc_lo, s5, v2, vcc_lo
v_add_co_u32 v1, vcc_lo, s6, v1
v_add_co_ci_u32_e32 v2, vcc_lo, s7, v2, vcc_lo
global_load_b32 v3, v[3:4], off
global_load_b32 v1, v[1:2], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v2, v3, v1
.LBB1_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s2, 2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB1_4
.p2align 6
.LBB1_3:
s_or_b32 exec_lo, exec_lo, s4
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s2, 4
s_mov_b32 s2, s3
.LBB1_4:
buffer_gl0_inv
s_cbranch_scc1 .LBB1_7
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB1_3
v_add_nc_u32_e32 v2, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB1_3
.LBB1_7:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB1_11
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v0, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB1_11
s_load_b64 s[0:1], s[0:1], 0x10
s_bcnt1_i32_b32 s2, s2
v_mov_b32_e32 v3, 0
v_cvt_f32_ubyte0_e32 v1, s2
s_mov_b32 s2, 0
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
v_mov_b32_e32 v0, 0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_dual_mul_f32 v2, v0, v1 :: v_dual_mov_b32 v1, s3
.LBB1_10:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v2
global_atomic_cmpswap_b32 v0, v3, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB1_10
.LBB1_11:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z12ComputeLamdaPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 5
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z12ComputeLamdaPfS_S_i, .Lfunc_end1-_Z12ComputeLamdaPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z9FindNormWPfS_i
.globl _Z9FindNormWPfS_i
.p2align 8
.type _Z9FindNormWPfS_i,@function
_Z9FindNormWPfS_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
v_mov_b32_e32 v2, 0
v_cmp_gt_u32_e32 vcc_lo, s3, v1
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB2_2
s_load_b64 s[4:5], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[1:2], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v1, vcc_lo, s4, v1
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v2, vcc_lo, s5, v2, vcc_lo
global_load_b32 v2, v[1:2], off
.LBB2_2:
s_or_b32 exec_lo, exec_lo, s3
v_lshl_add_u32 v1, v0, 2, 0
s_cmp_lt_u32 s2, 2
s_waitcnt vmcnt(0)
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
ds_load_b32 v2, v1
s_waitcnt lgkmcnt(0)
v_mul_f32_e32 v2, v2, v2
ds_store_b32 v1, v2
s_waitcnt lgkmcnt(0)
s_barrier
s_branch .LBB2_4
.p2align 6
.LBB2_3:
s_or_b32 exec_lo, exec_lo, s4
s_waitcnt lgkmcnt(0)
s_barrier
s_cmp_lt_u32 s2, 4
s_mov_b32 s2, s3
.LBB2_4:
buffer_gl0_inv
s_cbranch_scc1 .LBB2_7
s_lshr_b32 s3, s2, 1
s_mov_b32 s4, exec_lo
v_cmpx_gt_u32_e64 s3, v0
s_cbranch_execz .LBB2_3
v_add_nc_u32_e32 v2, s3, v0
s_delay_alu instid0(VALU_DEP_1)
v_lshl_add_u32 v2, v2, 2, 0
ds_load_b32 v3, v1
ds_load_b32 v2, v2
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v2, v3, v2
ds_store_b32 v1, v2
s_branch .LBB2_3
.LBB2_7:
s_mov_b32 s2, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB2_11
s_mov_b32 s2, exec_lo
s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mbcnt_lo_u32_b32 v0, s2, 0
v_cmp_eq_u32_e32 vcc_lo, 0, v0
s_and_b32 s3, exec_lo, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_mov_b32 exec_lo, s3
s_cbranch_execz .LBB2_11
s_load_b64 s[0:1], s[0:1], 0x8
s_bcnt1_i32_b32 s2, s2
v_mov_b32_e32 v3, 0
v_cvt_f32_ubyte0_e32 v1, s2
s_mov_b32 s2, 0
s_waitcnt lgkmcnt(0)
s_load_b32 s3, s[0:1], 0x0
v_mov_b32_e32 v0, 0
ds_load_b32 v0, v0
s_waitcnt lgkmcnt(0)
v_dual_mul_f32 v2, v0, v1 :: v_dual_mov_b32 v1, s3
.LBB2_10:
s_delay_alu instid0(VALU_DEP_1)
v_add_f32_e32 v0, v1, v2
global_atomic_cmpswap_b32 v0, v3, v[0:1], s[0:1] glc
s_waitcnt vmcnt(0)
v_cmp_eq_u32_e32 vcc_lo, v0, v1
v_mov_b32_e32 v1, v0
s_or_b32 s2, vcc_lo, s2
s_delay_alu instid0(SALU_CYCLE_1)
s_and_not1_b32 exec_lo, exec_lo, s2
s_cbranch_execnz .LBB2_10
.LBB2_11:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z9FindNormWPfS_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 4
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end2:
.size _Z9FindNormWPfS_i, .Lfunc_end2-_Z9FindNormWPfS_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z10NormalizeWPfS_S_i
.globl _Z10NormalizeWPfS_S_i
.p2align 8
.type _Z10NormalizeWPfS_S_i,@function
_Z10NormalizeWPfS_S_i:
s_load_b32 s2, s[0:1], 0x2c
s_mov_b32 s3, exec_lo
v_cmpx_eq_u32_e32 0, v0
s_cbranch_execz .LBB3_2
s_load_b64 s[4:5], s[0:1], 0x8
s_waitcnt lgkmcnt(0)
s_load_b32 s4, s[4:5], 0x0
s_waitcnt lgkmcnt(0)
v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s4
ds_store_b32 v1, v2
.LBB3_2:
s_or_b32 exec_lo, exec_lo, s3
s_load_b32 s3, s[0:1], 0x18
s_waitcnt lgkmcnt(0)
s_and_b32 s2, 0xffff, s2
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_barrier
buffer_gl0_inv
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB3_4
s_load_b64 s[2:3], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_load_b64 s[0:1], s[0:1], 0x10
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s2, v0
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s3, v1, vcc_lo
global_load_b32 v2, v[2:3], off
v_mov_b32_e32 v3, 0
ds_load_b32 v3, v3
s_waitcnt vmcnt(0) lgkmcnt(0)
v_div_scale_f32 v4, null, v3, v3, v2
v_div_scale_f32 v7, vcc_lo, v2, v3, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_1)
v_rcp_f32_e32 v5, v4
s_waitcnt_depctr 0xfff
v_fma_f32 v6, -v4, v5, 1.0
v_fmac_f32_e32 v5, v6, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_f32_e32 v6, v7, v5
v_fma_f32 v8, -v4, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fmac_f32_e32 v6, v8, v5
v_fma_f32 v4, -v4, v6, v7
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
v_div_fmas_f32 v4, v4, v5, v6
v_add_co_u32 v0, vcc_lo, s0, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
v_div_fixup_f32 v2, v4, v3, v2
global_store_b32 v[0:1], v2, off
.LBB3_4:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z10NormalizeWPfS_S_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 288
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 9
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end3:
.size _Z10NormalizeWPfS_S_i, .Lfunc_end3-_Z10NormalizeWPfS_S_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
.group_segment_fixed_size: 2048
.kernarg_segment_align: 8
.kernarg_segment_size: 28
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10Av_ProductPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10Av_ProductPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 8
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
- .offset: 152
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z12ComputeLamdaPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z12ComputeLamdaPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 5
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
- .offset: 144
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z9FindNormWPfS_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z9FindNormWPfS_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 4
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 4
.value_kind: hidden_block_count_x
- .offset: 36
.size: 4
.value_kind: hidden_block_count_y
- .offset: 40
.size: 4
.value_kind: hidden_block_count_z
- .offset: 44
.size: 2
.value_kind: hidden_group_size_x
- .offset: 46
.size: 2
.value_kind: hidden_group_size_y
- .offset: 48
.size: 2
.value_kind: hidden_group_size_z
- .offset: 50
.size: 2
.value_kind: hidden_remainder_x
- .offset: 52
.size: 2
.value_kind: hidden_remainder_y
- .offset: 54
.size: 2
.value_kind: hidden_remainder_z
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 96
.size: 2
.value_kind: hidden_grid_dims
- .offset: 152
.size: 4
.value_kind: hidden_dynamic_lds_size
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 288
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z10NormalizeWPfS_S_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z10NormalizeWPfS_S_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 9
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
cudaMalloc((InnerArray**)&d_A,nBytes);
cudaMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
cudaDeviceReset();
return EXIT_SUCCESS;
} | code for sm_80
Function : _Z6warmupP10InnerArrayS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fca00078e0005 */
/*0090*/ IADD3 R6, P0, R2, 0x1000000, RZ ; /* 0x0100000002067810 */
/* 0x000fe40007f1e0ff */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ IADD3.X R7, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff077210 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*00d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0005 */
/*00e0*/ IADD3 R8, P0, R4, 0x1000000, RZ ; /* 0x0100000004087810 */
/* 0x000fc80007f1e0ff */
/*00f0*/ IADD3.X R9, RZ, R5, RZ, P0, !PT ; /* 0x00000005ff097210 */
/* 0x000fe200007fe4ff */
/*0100*/ FADD R11, R2, 10 ; /* 0x41200000020b7421 */
/* 0x004fe40000000000 */
/*0110*/ FADD R13, R6, 20 ; /* 0x41a00000060d7421 */
/* 0x008fc60000000000 */
/*0120*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x000fe8000c101904 */
/*0130*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z14testInnerArrayP10InnerArrayS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fca00078e0005 */
/*0090*/ IADD3 R6, P0, R2, 0x1000000, RZ ; /* 0x0100000002067810 */
/* 0x000fe40007f1e0ff */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ IADD3.X R7, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff077210 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*00d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0005 */
/*00e0*/ IADD3 R8, P0, R4, 0x1000000, RZ ; /* 0x0100000004087810 */
/* 0x000fc80007f1e0ff */
/*00f0*/ IADD3.X R9, RZ, R5, RZ, P0, !PT ; /* 0x00000005ff097210 */
/* 0x000fe200007fe4ff */
/*0100*/ FADD R11, R2, 10 ; /* 0x41200000020b7421 */
/* 0x004fe40000000000 */
/*0110*/ FADD R13, R6, 20 ; /* 0x41a00000060d7421 */
/* 0x008fc60000000000 */
/*0120*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x000fe8000c101904 */
/*0130*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
cudaMalloc((InnerArray**)&d_A,nBytes);
cudaMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
cudaDeviceReset();
return EXIT_SUCCESS;
} | .file "tmpxft_00064e86_00000000-6_simpleMathSoA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7secondsv
.type _Z7secondsv, @function
_Z7secondsv:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
addsd %xmm1, %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z7secondsv, .-_Z7secondsv
.globl _Z17initialInnerArrayP10InnerArrayi
.type _Z17initialInnerArrayP10InnerArrayi, @function
_Z17initialInnerArrayP10InnerArrayi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L12
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L9:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC1(%rip), %xmm0
movss %xmm0, 16777216(%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L9
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2058:
.size _Z17initialInnerArrayP10InnerArrayi, .-_Z17initialInnerArrayP10InnerArrayi
.globl _Z18testInnerArrayHostP10InnerArrayS0_i
.type _Z18testInnerArrayHostP10InnerArrayS0_i, @function
_Z18testInnerArrayHostP10InnerArrayS0_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L15
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
movss .LC2(%rip), %xmm2
movss .LC3(%rip), %xmm1
.L17:
movaps %xmm2, %xmm0
addss (%rdi,%rax), %xmm0
movss %xmm0, (%rsi,%rax)
movaps %xmm1, %xmm0
addss 16777216(%rdi,%rax), %xmm0
movss %xmm0, 16777216(%rsi,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L17
.L15:
ret
.cfi_endproc
.LFE2059:
.size _Z18testInnerArrayHostP10InnerArrayS0_i, .-_Z18testInnerArrayHostP10InnerArrayS0_i
.globl _Z19testInnerArrayHost2P10InnerArrayS0_i
.type _Z19testInnerArrayHost2P10InnerArrayS0_i, @function
_Z19testInnerArrayHost2P10InnerArrayS0_i:
.LFB2060:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L19
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
movss .LC2(%rip), %xmm2
movss .LC3(%rip), %xmm1
.L21:
movaps %xmm2, %xmm0
addss (%rdi,%rax), %xmm0
movss %xmm0, (%rsi,%rax)
movaps %xmm1, %xmm0
addss 16777216(%rdi,%rax), %xmm0
movss %xmm0, 16777216(%rsi,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L21
.L19:
ret
.cfi_endproc
.LFE2060:
.size _Z19testInnerArrayHost2P10InnerArrayS0_i, .-_Z19testInnerArrayHost2P10InnerArrayS0_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "printout idx %d: x %f y %f \n"
.text
.globl _Z16printfHostResultP10InnerArrayi
.type _Z16printfHostResultP10InnerArrayi, @function
_Z16printfHostResultP10InnerArrayi:
.LFB2061:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L28
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movslq %esi, %r12
movl $0, %ebx
leaq .LC4(%rip), %r13
.L25:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 16777216(%rbp,%rbx,4), %xmm1
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq %r12, %rbx
jne .L25
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2061:
.size _Z16printfHostResultP10InnerArrayi, .-_Z16printfHostResultP10InnerArrayi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "different on x %dth element: host %f gpu %f \n"
.align 8
.LC8:
.string "different on y %dth element: host %f gpu %f \n"
.section .rodata.str1.1
.LC9:
.string "Arrays do not match.\n\n"
.text
.globl _Z15checkInnerArrayP10InnerArrayS0_i
.type _Z15checkInnerArrayP10InnerArrayS0_i, @function
_Z15checkInnerArrayP10InnerArrayS0_i:
.LFB2062:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L45
subq $8, %rsp
.cfi_def_cfa_offset 16
movq %rdi, %rcx
movslq %edx, %rdx
movl $0, %eax
movss .LC5(%rip), %xmm4
movsd .LC6(%rip), %xmm3
movaps %xmm4, %xmm6
movapd %xmm3, %xmm5
.L38:
movss (%rcx,%rax,4), %xmm0
movss (%rsi,%rax,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm4, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm3, %xmm2
ja .L48
movss 16777216(%rcx,%rax,4), %xmm0
movss 16777216(%rsi,%rax,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm6, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm5, %xmm2
ja .L49
addq $1, %rax
cmpq %rdx, %rax
jne .L38
jmp .L31
.L48:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %eax, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L35:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L31:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %eax, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L35
.L45:
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z15checkInnerArrayP10InnerArrayS0_i, .-_Z15checkInnerArrayP10InnerArrayS0_i
.globl _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
.type _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i, @function
_Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i:
.LFB2088:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L54
.L50:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L55
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14testInnerArrayP10InnerArrayS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L50
.L55:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i, .-_Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
.globl _Z14testInnerArrayP10InnerArrayS0_i
.type _Z14testInnerArrayP10InnerArrayS0_i, @function
_Z14testInnerArrayP10InnerArrayS0_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14testInnerArrayP10InnerArrayS0_i, .-_Z14testInnerArrayP10InnerArrayS0_i
.globl _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
.type _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i, @function
_Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i:
.LFB2090:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L62
.L58:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L63
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6warmupP10InnerArrayS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L58
.L63:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i, .-_Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.type _Z6warmupP10InnerArrayS0_i, @function
_Z6warmupP10InnerArrayS0_i:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z6warmupP10InnerArrayS0_i, .-_Z6warmupP10InnerArrayS0_i
.section .rodata.str1.1
.LC10:
.string "%s test struct of array at "
.LC11:
.string "device %d: %s \n\n"
.section .rodata.str1.8
.align 8
.LC12:
.string "warmup <<<%3d,%3d>>> elapsed %f sec \n"
.align 8
.LC13:
.string "innerarray <<<%3d,%3d>>> elapsed %f sec \n"
.text
.globl main
.type main, @function
main:
.LFB2063:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1112, %rsp
.cfi_def_cfa_offset 1168
movl %edi, %r15d
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
leaq 64(%rsp), %rbx
movl $0, %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movq (%r12), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rcx
movl $0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
movl $33554432, %edi
call malloc@PLT
movq %rax, %rbx
movl $33554432, %edi
call malloc@PLT
movq %rax, %rbp
movl $33554432, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $33554432, %edi
call malloc@PLT
movq %rax, %r13
movl $4194304, %esi
movq %rbx, %rdi
call _Z17initialInnerArrayP10InnerArrayi
movl $4194304, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z18testInnerArrayHostP10InnerArrayS0_i
movl $4194304, %edx
movq %r13, %rsi
movq %rbx, %rdi
call _Z18testInnerArrayHostP10InnerArrayS0_i
movl $4194304, %edx
movq %r13, %rsi
movq %rbp, %rdi
call _Z15checkInnerArrayP10InnerArrayS0_i
leaq 24(%rsp), %rdi
movl $33554432, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $33554432, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $33554432, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, %r14d
cmpl $1, %r15d
jg .L73
.L67:
movl %r14d, %r15d
movl $1, 44(%rsp)
movl $1, 48(%rsp)
leal 4194303(%r14), %eax
movl $0, %edx
divl %r14d
movl %eax, %r12d
movl $1, 56(%rsp)
movl $1, 60(%rsp)
call _Z7secondsv
movsd %xmm0, 8(%rsp)
movl %r12d, 52(%rsp)
movl %r14d, 40(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L68:
call cudaDeviceSynchronize@PLT
call _Z7secondsv
subsd 8(%rsp), %xmm0
movl %r15d, %ecx
movl %r12d, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call _Z7secondsv
movsd %xmm0, 8(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L75
.L69:
call cudaDeviceSynchronize@PLT
call _Z7secondsv
subsd 8(%rsp), %xmm0
movl %r15d, %ecx
movl %r12d, %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $33554432, %edx
movq 32(%rsp), %rsi
movq (%rsp), %r14
movq %r14, %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movq %r14, %rsi
movq %rbp, %rdi
call _Z15checkInnerArrayP10InnerArrayS0_i
call cudaGetLastError@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L76
movl $0, %eax
addq $1112, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L73:
.cfi_restore_state
movq 8(%r12), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r14d
jmp .L67
.L74:
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
jmp .L68
.L75:
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
jmp .L69
.L76:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z6warmupP10InnerArrayS0_i"
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z14testInnerArrayP10InnerArrayS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z6warmupP10InnerArrayS0_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z14testInnerArrayP10InnerArrayS0_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1120403456
.align 4
.LC2:
.long 1092616192
.align 4
.LC3:
.long 1101004800
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC6:
.long -500134854
.long 1044740494
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
cudaMalloc((InnerArray**)&d_A,nBytes);
cudaMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
cudaDeviceReset();
return EXIT_SUCCESS;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
hipSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
hipMalloc((InnerArray**)&d_A,nBytes);
hipMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
hipGetLastError();
// free memories
hipFree(d_A);
hipFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
hipDeviceReset();
return EXIT_SUCCESS;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
hipSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
hipMalloc((InnerArray**)&d_A,nBytes);
hipMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
hipGetLastError();
// free memories
hipFree(d_A);
hipFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
hipDeviceReset();
return EXIT_SUCCESS;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14testInnerArrayP10InnerArrayS0_i
.globl _Z14testInnerArrayP10InnerArrayS0_i
.p2align 8
.type _Z14testInnerArrayP10InnerArrayS0_i,@function
_Z14testInnerArrayP10InnerArrayS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, 0x1000000, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
s_clause 0x1
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f32_e32 v4, 0x41200000, v2
s_waitcnt vmcnt(0)
v_add_f32_e32 v5, 0x41a00000, v3
v_add_co_u32 v2, vcc_lo, 0x1000000, v0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14testInnerArrayP10InnerArrayS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14testInnerArrayP10InnerArrayS0_i, .Lfunc_end0-_Z14testInnerArrayP10InnerArrayS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z6warmupP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.p2align 8
.type _Z6warmupP10InnerArrayS0_i,@function
_Z6warmupP10InnerArrayS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, 0x1000000, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
s_clause 0x1
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f32_e32 v4, 0x41200000, v2
s_waitcnt vmcnt(0)
v_add_f32_e32 v5, 0x41a00000, v3
v_add_co_u32 v2, vcc_lo, 0x1000000, v0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6warmupP10InnerArrayS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z6warmupP10InnerArrayS0_i, .Lfunc_end1-_Z6warmupP10InnerArrayS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14testInnerArrayP10InnerArrayS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14testInnerArrayP10InnerArrayS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6warmupP10InnerArrayS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6warmupP10InnerArrayS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
hipSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
hipMalloc((InnerArray**)&d_A,nBytes);
hipMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
hipDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
hipGetLastError();
// free memories
hipFree(d_A);
hipFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
hipDeviceReset();
return EXIT_SUCCESS;
} | .text
.file "simpleMathSoA.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7secondsv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z7secondsv
.p2align 4, 0x90
.type _Z7secondsv,@function
_Z7secondsv: # @_Z7secondsv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm1
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7secondsv, .Lfunc_end0-_Z7secondsv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z17initialInnerArrayP10InnerArrayi
.LCPI1_0:
.long 0x42c80000 # float 100
.text
.globl _Z17initialInnerArrayP10InnerArrayi
.p2align 4, 0x90
.type _Z17initialInnerArrayP10InnerArrayi,@function
_Z17initialInnerArrayP10InnerArrayi: # @_Z17initialInnerArrayP10InnerArrayi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, (%rbx,%r15,4)
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI1_0(%rip), %xmm0
movss %xmm0, 16777216(%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z17initialInnerArrayP10InnerArrayi, .Lfunc_end1-_Z17initialInnerArrayP10InnerArrayi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z18testInnerArrayHostP10InnerArrayS0_i
.LCPI2_0:
.long 0x41200000 # float 10
.LCPI2_1:
.long 0x41a00000 # float 20
.text
.globl _Z18testInnerArrayHostP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z18testInnerArrayHostP10InnerArrayS0_i,@function
_Z18testInnerArrayHostP10InnerArrayS0_i: # @_Z18testInnerArrayHostP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ecx, %ecx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, (%rsi,%rcx,4)
movss 16777216(%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, 16777216(%rsi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z18testInnerArrayHostP10InnerArrayS0_i, .Lfunc_end2-_Z18testInnerArrayHostP10InnerArrayS0_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z19testInnerArrayHost2P10InnerArrayS0_i
.LCPI3_0:
.long 0x41200000 # float 10
.LCPI3_1:
.long 0x41a00000 # float 20
.text
.globl _Z19testInnerArrayHost2P10InnerArrayS0_i
.p2align 4, 0x90
.type _Z19testInnerArrayHost2P10InnerArrayS0_i,@function
_Z19testInnerArrayHost2P10InnerArrayS0_i: # @_Z19testInnerArrayHost2P10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ecx, %ecx
movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, (%rsi,%rcx,4)
movss 16777216(%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, 16777216(%rsi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z19testInnerArrayHost2P10InnerArrayS0_i, .Lfunc_end3-_Z19testInnerArrayHost2P10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z16printfHostResultP10InnerArrayi # -- Begin function _Z16printfHostResultP10InnerArrayi
.p2align 4, 0x90
.type _Z16printfHostResultP10InnerArrayi,@function
_Z16printfHostResultP10InnerArrayi: # @_Z16printfHostResultP10InnerArrayi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB4_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r15d
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss 16777216(%rbx,%r14,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
movl %r14d, %esi
movb $2, %al
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB4_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB4_4: # %._crit_edge
retq
.Lfunc_end4:
.size _Z16printfHostResultP10InnerArrayi, .Lfunc_end4-_Z16printfHostResultP10InnerArrayi
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z15checkInnerArrayP10InnerArrayS0_i
.LCPI5_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI5_1:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.text
.globl _Z15checkInnerArrayP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z15checkInnerArrayP10InnerArrayS0_i,@function
_Z15checkInnerArrayP10InnerArrayS0_i: # @_Z15checkInnerArrayP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB5_8
# %bb.1: # %.lr.ph.preheader
movq %rsi, %rax
movl %edx, %ecx
xorl %esi, %esi
movaps .LCPI5_0(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
movsd .LCPI5_1(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm4
subss %xmm1, %xmm4
andps %xmm2, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm3, %xmm4
ja .LBB5_3
# %bb.4: # in Loop: Header=BB5_2 Depth=1
movss 16777216(%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm4
subss %xmm1, %xmm4
andps %xmm2, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm3, %xmm4
ja .LBB5_5
# %bb.7: # in Loop: Header=BB5_2 Depth=1
incq %rsi
cmpq %rsi, %rcx
jne .LBB5_2
.LBB5_8: # %.critedge
retq
.LBB5_3:
movl $.L.str.1, %edi
jmp .LBB5_6
.LBB5_5:
movl $.L.str.2, %edi
.LBB5_6:
pushq %rax
.cfi_def_cfa_offset 16
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
popq %rax
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end5:
.size _Z15checkInnerArrayP10InnerArrayS0_i, .Lfunc_end5-_Z15checkInnerArrayP10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z29__device_stub__testInnerArrayP10InnerArrayS0_i # -- Begin function _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z29__device_stub__testInnerArrayP10InnerArrayS0_i,@function
_Z29__device_stub__testInnerArrayP10InnerArrayS0_i: # @_Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14testInnerArrayP10InnerArrayS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end6:
.size _Z29__device_stub__testInnerArrayP10InnerArrayS0_i, .Lfunc_end6-_Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z21__device_stub__warmupP10InnerArrayS0_i # -- Begin function _Z21__device_stub__warmupP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z21__device_stub__warmupP10InnerArrayS0_i,@function
_Z21__device_stub__warmupP10InnerArrayS0_i: # @_Z21__device_stub__warmupP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6warmupP10InnerArrayS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end7:
.size _Z21__device_stub__warmupP10InnerArrayS0_i, .Lfunc_end7-_Z21__device_stub__warmupP10InnerArrayS0_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI8_0:
.long 0x42c80000 # float 100
.LCPI8_1:
.long 0x41200000 # float 10
.LCPI8_2:
.long 0x41a00000 # float 20
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI8_3:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI8_4:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.LCPI8_5:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1608, %rsp # imm = 0x648
.cfi_def_cfa_offset 1664
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r13
movl %edi, 56(%rsp) # 4-byte Spill
leaq 136(%rsp), %rbx
movq %rbx, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movq (%r13), %rsi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl $.L.str.5, %edi
xorl %esi, %esi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq hipSetDevice
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %rbx
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r15
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r14
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r12
movl $4194304, %ebp # imm = 0x400000
.p2align 4, 0x90
.LBB8_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI8_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, -16777216(%rbx,%rbp,4)
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI8_0(%rip), %xmm0
movss %xmm0, (%rbx,%rbp,4)
incq %rbp
cmpq $8388608, %rbp # imm = 0x800000
jne .LBB8_1
# %bb.2: # %.lr.ph.i56.preheader
movl $4194304, %eax # imm = 0x400000
movss .LCPI8_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI8_2(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB8_3: # %.lr.ph.i56
# =>This Inner Loop Header: Depth=1
movss -16777216(%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, -16777216(%r15,%rax,4)
movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, (%r15,%rax,4)
incq %rax
cmpq $8388608, %rax # imm = 0x800000
jne .LBB8_3
# %bb.4: # %.lr.ph.i60.preheader
movl $4194304, %eax # imm = 0x400000
.p2align 4, 0x90
.LBB8_5: # %.lr.ph.i60
# =>This Inner Loop Header: Depth=1
movss -16777216(%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, -16777216(%r12,%rax,4)
movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, (%r12,%rax,4)
incq %rax
cmpq $8388608, %rax # imm = 0x800000
jne .LBB8_5
# %bb.6: # %.lr.ph.i65.preheader
xorl %esi, %esi
movaps .LCPI8_3(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN]
movsd .LCPI8_4(%rip), %xmm4 # xmm4 = mem[0],zero
.p2align 4, 0x90
.LBB8_7: # %.lr.ph.i65
# =>This Inner Loop Header: Depth=1
movss (%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r12,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_8
# %bb.9: # in Loop: Header=BB8_7 Depth=1
movss 16777216(%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%r12,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_10
# %bb.26: # in Loop: Header=BB8_7 Depth=1
incq %rsi
cmpq $4194304, %rsi # imm = 0x400000
jne .LBB8_7
jmp .LBB8_12
.LBB8_8:
movl $.L.str.1, %edi
jmp .LBB8_11
.LBB8_10:
movl $.L.str.2, %edi
.LBB8_11:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
callq puts@PLT
.LBB8_12: # %_Z15checkInnerArrayP10InnerArrayS0_i.exit
movabsq $4294967296, %rbp # imm = 0x100000000
leaq 72(%rsp), %rdi
movl $33554432, %esi # imm = 0x2000000
callq hipMalloc
leaq 64(%rsp), %rdi
movl $33554432, %esi # imm = 0x2000000
callq hipMalloc
movq 72(%rsp), %rdi
movl $33554432, %edx # imm = 0x2000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
cmpl $2, 56(%rsp) # 4-byte Folded Reload
jl .LBB8_13
# %bb.14:
movq 8(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, %r13d
movq %rbp, %rcx
orq %rbp, %r13
jmp .LBB8_15
.LBB8_13:
movq %rbp, %rcx
leaq 128(%rbp), %r13
.LBB8_15:
leal 4194303(%r13), %eax
xorl %edx, %edx
divl %r13d
movl %eax, %ebp
movq %rbp, 56(%rsp) # 8-byte Spill
orq %rcx, %rbp
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI8_5(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 80(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_17
# %bb.16:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl $4194304, 12(%rsp) # imm = 0x400000
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 120(%rsp), %rax
movq %rax, 24(%rsp)
leaq 12(%rsp), %rax
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6warmupP10InnerArrayS0_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_17:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI8_5(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 80(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.6, %edi
movq 56(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movl %r13d, %edx
movb $1, %al
callq printf
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI8_5(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 80(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_19
# %bb.18:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl $4194304, 12(%rsp) # imm = 0x400000
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 120(%rsp), %rax
movq %rax, 24(%rsp)
leaq 12(%rsp), %rax
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z14testInnerArrayP10InnerArrayS0_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_19:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI8_5(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 80(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.7, %edi
movq 56(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movl %r13d, %edx
movb $1, %al
callq printf
movq 64(%rsp), %rsi
movl $33554432, %edx # imm = 0x2000000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %esi, %esi
movaps .LCPI8_3(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN]
movsd .LCPI8_4(%rip), %xmm4 # xmm4 = mem[0],zero
.p2align 4, 0x90
.LBB8_20: # %.lr.ph.i75
# =>This Inner Loop Header: Depth=1
movss (%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r14,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_21
# %bb.22: # in Loop: Header=BB8_20 Depth=1
movss 16777216(%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%r14,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_23
# %bb.27: # in Loop: Header=BB8_20 Depth=1
incq %rsi
cmpq $4194304, %rsi # imm = 0x400000
jne .LBB8_20
jmp .LBB8_25
.LBB8_21:
movl $.L.str.1, %edi
jmp .LBB8_24
.LBB8_23:
movl $.L.str.2, %edi
.LBB8_24:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
callq puts@PLT
.LBB8_25: # %_Z15checkInnerArrayP10InnerArrayS0_i.exit80
callq hipGetLastError
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r14, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $1608, %rsp # imm = 0x648
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size main, .Lfunc_end8-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14testInnerArrayP10InnerArrayS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6warmupP10InnerArrayS0_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "printout idx %d: x %f y %f \n"
.size .L.str, 29
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "different on x %dth element: host %f gpu %f \n"
.size .L.str.1, 46
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "different on y %dth element: host %f gpu %f \n"
.size .L.str.2, 46
.type _Z14testInnerArrayP10InnerArrayS0_i,@object # @_Z14testInnerArrayP10InnerArrayS0_i
.section .rodata,"a",@progbits
.globl _Z14testInnerArrayP10InnerArrayS0_i
.p2align 3, 0x0
_Z14testInnerArrayP10InnerArrayS0_i:
.quad _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.size _Z14testInnerArrayP10InnerArrayS0_i, 8
.type _Z6warmupP10InnerArrayS0_i,@object # @_Z6warmupP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.p2align 3, 0x0
_Z6warmupP10InnerArrayS0_i:
.quad _Z21__device_stub__warmupP10InnerArrayS0_i
.size _Z6warmupP10InnerArrayS0_i, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "%s test struct of array at "
.size .L.str.4, 28
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "device %d: %s \n\n"
.size .L.str.5, 17
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "warmup <<<%3d,%3d>>> elapsed %f sec \n"
.size .L.str.6, 38
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "innerarray <<<%3d,%3d>>> elapsed %f sec \n"
.size .L.str.7, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14testInnerArrayP10InnerArrayS0_i"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z6warmupP10InnerArrayS0_i"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Arrays do not match.\n"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.addrsig_sym _Z21__device_stub__warmupP10InnerArrayS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14testInnerArrayP10InnerArrayS0_i
.addrsig_sym _Z6warmupP10InnerArrayS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z6warmupP10InnerArrayS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fca00078e0005 */
/*0090*/ IADD3 R6, P0, R2, 0x1000000, RZ ; /* 0x0100000002067810 */
/* 0x000fe40007f1e0ff */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ IADD3.X R7, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff077210 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*00d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0005 */
/*00e0*/ IADD3 R8, P0, R4, 0x1000000, RZ ; /* 0x0100000004087810 */
/* 0x000fc80007f1e0ff */
/*00f0*/ IADD3.X R9, RZ, R5, RZ, P0, !PT ; /* 0x00000005ff097210 */
/* 0x000fe200007fe4ff */
/*0100*/ FADD R11, R2, 10 ; /* 0x41200000020b7421 */
/* 0x004fe40000000000 */
/*0110*/ FADD R13, R6, 20 ; /* 0x41a00000060d7421 */
/* 0x008fc60000000000 */
/*0120*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x000fe8000c101904 */
/*0130*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
..........
Function : _Z14testInnerArrayP10InnerArrayS0_i
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R4, SR_CTAID.X ; /* 0x0000000000047919 */
/* 0x000e280000002500 */
/*0020*/ S2R R3, SR_TID.X ; /* 0x0000000000037919 */
/* 0x000e240000002100 */
/*0030*/ IMAD R4, R4, c[0x0][0x0], R3 ; /* 0x0000000004047a24 */
/* 0x001fca00078e0203 */
/*0040*/ ISETP.GE.U32.AND P0, PT, R4, c[0x0][0x170], PT ; /* 0x00005c0004007a0c */
/* 0x000fda0003f06070 */
/*0050*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*0060*/ HFMA2.MMA R5, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff057435 */
/* 0x000fe200000001ff */
/*0070*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fd20000000a00 */
/*0080*/ IMAD.WIDE.U32 R2, R4, R5, c[0x0][0x160] ; /* 0x0000580004027625 */
/* 0x000fca00078e0005 */
/*0090*/ IADD3 R6, P0, R2, 0x1000000, RZ ; /* 0x0100000002067810 */
/* 0x000fe40007f1e0ff */
/*00a0*/ LDG.E R2, [R2.64] ; /* 0x0000000402027981 */
/* 0x000ea4000c1e1900 */
/*00b0*/ IADD3.X R7, RZ, R3, RZ, P0, !PT ; /* 0x00000003ff077210 */
/* 0x000fca00007fe4ff */
/*00c0*/ LDG.E R6, [R6.64] ; /* 0x0000000406067981 */
/* 0x000ee2000c1e1900 */
/*00d0*/ IMAD.WIDE.U32 R4, R4, R5, c[0x0][0x168] ; /* 0x00005a0004047625 */
/* 0x000fca00078e0005 */
/*00e0*/ IADD3 R8, P0, R4, 0x1000000, RZ ; /* 0x0100000004087810 */
/* 0x000fc80007f1e0ff */
/*00f0*/ IADD3.X R9, RZ, R5, RZ, P0, !PT ; /* 0x00000005ff097210 */
/* 0x000fe200007fe4ff */
/*0100*/ FADD R11, R2, 10 ; /* 0x41200000020b7421 */
/* 0x004fe40000000000 */
/*0110*/ FADD R13, R6, 20 ; /* 0x41a00000060d7421 */
/* 0x008fc60000000000 */
/*0120*/ STG.E [R4.64], R11 ; /* 0x0000000b04007986 */
/* 0x000fe8000c101904 */
/*0130*/ STG.E [R8.64], R13 ; /* 0x0000000d08007986 */
/* 0x000fe2000c101904 */
/*0140*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*0150*/ BRA 0x150; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0160*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0170*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0180*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0190*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*01f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z14testInnerArrayP10InnerArrayS0_i
.globl _Z14testInnerArrayP10InnerArrayS0_i
.p2align 8
.type _Z14testInnerArrayP10InnerArrayS0_i,@function
_Z14testInnerArrayP10InnerArrayS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB0_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, 0x1000000, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
s_clause 0x1
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f32_e32 v4, 0x41200000, v2
s_waitcnt vmcnt(0)
v_add_f32_e32 v5, 0x41a00000, v3
v_add_co_u32 v2, vcc_lo, 0x1000000, v0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z14testInnerArrayP10InnerArrayS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z14testInnerArrayP10InnerArrayS0_i, .Lfunc_end0-_Z14testInnerArrayP10InnerArrayS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.protected _Z6warmupP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.p2align 8
.type _Z6warmupP10InnerArrayS0_i,@function
_Z6warmupP10InnerArrayS0_i:
s_clause 0x1
s_load_b32 s2, s[0:1], 0x24
s_load_b32 s3, s[0:1], 0x10
s_waitcnt lgkmcnt(0)
s_and_b32 s2, s2, 0xffff
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[1:2], null, s15, s2, v[0:1]
s_mov_b32 s2, exec_lo
v_cmpx_gt_u32_e64 s3, v1
s_cbranch_execz .LBB1_2
s_load_b128 s[0:3], s[0:1], 0x0
v_mov_b32_e32 v2, 0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[0:1], 2, v[1:2]
s_waitcnt lgkmcnt(0)
v_add_co_u32 v2, vcc_lo, s0, v0
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
v_add_co_u32 v4, vcc_lo, 0x1000000, v2
s_delay_alu instid0(VALU_DEP_2)
v_add_co_ci_u32_e32 v5, vcc_lo, 0, v3, vcc_lo
v_add_co_u32 v0, vcc_lo, s2, v0
s_clause 0x1
global_load_b32 v2, v[2:3], off
global_load_b32 v3, v[4:5], off
v_add_co_ci_u32_e32 v1, vcc_lo, s3, v1, vcc_lo
s_waitcnt vmcnt(1)
v_add_f32_e32 v4, 0x41200000, v2
s_waitcnt vmcnt(0)
v_add_f32_e32 v5, 0x41a00000, v3
v_add_co_u32 v2, vcc_lo, 0x1000000, v0
v_add_co_ci_u32_e32 v3, vcc_lo, 0, v1, vcc_lo
s_clause 0x1
global_store_b32 v[0:1], v4, off
global_store_b32 v[2:3], v5, off
.LBB1_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6warmupP10InnerArrayS0_i
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 280
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 6
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end1:
.size _Z6warmupP10InnerArrayS0_i, .Lfunc_end1-_Z6warmupP10InnerArrayS0_i
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z14testInnerArrayP10InnerArrayS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z14testInnerArrayP10InnerArrayS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .offset: 16
.size: 4
.value_kind: by_value
- .offset: 24
.size: 4
.value_kind: hidden_block_count_x
- .offset: 28
.size: 4
.value_kind: hidden_block_count_y
- .offset: 32
.size: 4
.value_kind: hidden_block_count_z
- .offset: 36
.size: 2
.value_kind: hidden_group_size_x
- .offset: 38
.size: 2
.value_kind: hidden_group_size_y
- .offset: 40
.size: 2
.value_kind: hidden_group_size_z
- .offset: 42
.size: 2
.value_kind: hidden_remainder_x
- .offset: 44
.size: 2
.value_kind: hidden_remainder_y
- .offset: 46
.size: 2
.value_kind: hidden_remainder_z
- .offset: 64
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 72
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 88
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 280
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6warmupP10InnerArrayS0_i
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6warmupP10InnerArrayS0_i.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 6
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_00064e86_00000000-6_simpleMathSoA.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2066:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2066:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z7secondsv
.type _Z7secondsv, @function
_Z7secondsv:
.LFB2057:
.cfi_startproc
endbr64
subq $56, %rsp
.cfi_def_cfa_offset 64
movq %fs:40, %rax
movq %rax, 40(%rsp)
xorl %eax, %eax
leaq 8(%rsp), %rsi
leaq 16(%rsp), %rdi
call gettimeofday@PLT
pxor %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LC0(%rip), %xmm0
pxor %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
addsd %xmm1, %xmm0
movq 40(%rsp), %rax
subq %fs:40, %rax
jne .L6
addq $56, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L6:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2057:
.size _Z7secondsv, .-_Z7secondsv
.globl _Z17initialInnerArrayP10InnerArrayi
.type _Z17initialInnerArrayP10InnerArrayi, @function
_Z17initialInnerArrayP10InnerArrayi:
.LFB2058:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L12
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
pushq %rbx
.cfi_def_cfa_offset 24
.cfi_offset 3, -24
subq $8, %rsp
.cfi_def_cfa_offset 32
movq %rdi, %rbx
movslq %esi, %rsi
leaq (%rdi,%rsi,4), %rbp
.L9:
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC1(%rip), %xmm0
movss %xmm0, (%rbx)
call rand@PLT
movzbl %al, %eax
pxor %xmm0, %xmm0
cvtsi2ssl %eax, %xmm0
divss .LC1(%rip), %xmm0
movss %xmm0, 16777216(%rbx)
addq $4, %rbx
cmpq %rbp, %rbx
jne .L9
addq $8, %rsp
.cfi_def_cfa_offset 24
popq %rbx
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
ret
.L12:
.cfi_restore 3
.cfi_restore 6
ret
.cfi_endproc
.LFE2058:
.size _Z17initialInnerArrayP10InnerArrayi, .-_Z17initialInnerArrayP10InnerArrayi
.globl _Z18testInnerArrayHostP10InnerArrayS0_i
.type _Z18testInnerArrayHostP10InnerArrayS0_i, @function
_Z18testInnerArrayHostP10InnerArrayS0_i:
.LFB2059:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L15
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
movss .LC2(%rip), %xmm2
movss .LC3(%rip), %xmm1
.L17:
movaps %xmm2, %xmm0
addss (%rdi,%rax), %xmm0
movss %xmm0, (%rsi,%rax)
movaps %xmm1, %xmm0
addss 16777216(%rdi,%rax), %xmm0
movss %xmm0, 16777216(%rsi,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L17
.L15:
ret
.cfi_endproc
.LFE2059:
.size _Z18testInnerArrayHostP10InnerArrayS0_i, .-_Z18testInnerArrayHostP10InnerArrayS0_i
.globl _Z19testInnerArrayHost2P10InnerArrayS0_i
.type _Z19testInnerArrayHost2P10InnerArrayS0_i, @function
_Z19testInnerArrayHost2P10InnerArrayS0_i:
.LFB2060:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L19
movslq %edx, %rdx
salq $2, %rdx
movl $0, %eax
movss .LC2(%rip), %xmm2
movss .LC3(%rip), %xmm1
.L21:
movaps %xmm2, %xmm0
addss (%rdi,%rax), %xmm0
movss %xmm0, (%rsi,%rax)
movaps %xmm1, %xmm0
addss 16777216(%rdi,%rax), %xmm0
movss %xmm0, 16777216(%rsi,%rax)
addq $4, %rax
cmpq %rdx, %rax
jne .L21
.L19:
ret
.cfi_endproc
.LFE2060:
.size _Z19testInnerArrayHost2P10InnerArrayS0_i, .-_Z19testInnerArrayHost2P10InnerArrayS0_i
.section .rodata.str1.1,"aMS",@progbits,1
.LC4:
.string "printout idx %d: x %f y %f \n"
.text
.globl _Z16printfHostResultP10InnerArrayi
.type _Z16printfHostResultP10InnerArrayi, @function
_Z16printfHostResultP10InnerArrayi:
.LFB2061:
.cfi_startproc
endbr64
testl %esi, %esi
jle .L28
pushq %r13
.cfi_def_cfa_offset 16
.cfi_offset 13, -16
pushq %r12
.cfi_def_cfa_offset 24
.cfi_offset 12, -24
pushq %rbp
.cfi_def_cfa_offset 32
.cfi_offset 6, -32
pushq %rbx
.cfi_def_cfa_offset 40
.cfi_offset 3, -40
subq $8, %rsp
.cfi_def_cfa_offset 48
movq %rdi, %rbp
movslq %esi, %r12
movl $0, %ebx
leaq .LC4(%rip), %r13
.L25:
pxor %xmm0, %xmm0
cvtss2sd 0(%rbp,%rbx,4), %xmm0
pxor %xmm1, %xmm1
cvtss2sd 16777216(%rbp,%rbx,4), %xmm1
movl %ebx, %edx
movq %r13, %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
addq $1, %rbx
cmpq %r12, %rbx
jne .L25
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %rbp
.cfi_def_cfa_offset 24
popq %r12
.cfi_def_cfa_offset 16
popq %r13
.cfi_def_cfa_offset 8
ret
.L28:
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
.cfi_restore 13
ret
.cfi_endproc
.LFE2061:
.size _Z16printfHostResultP10InnerArrayi, .-_Z16printfHostResultP10InnerArrayi
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC7:
.string "different on x %dth element: host %f gpu %f \n"
.align 8
.LC8:
.string "different on y %dth element: host %f gpu %f \n"
.section .rodata.str1.1
.LC9:
.string "Arrays do not match.\n\n"
.text
.globl _Z15checkInnerArrayP10InnerArrayS0_i
.type _Z15checkInnerArrayP10InnerArrayS0_i, @function
_Z15checkInnerArrayP10InnerArrayS0_i:
.LFB2062:
.cfi_startproc
endbr64
testl %edx, %edx
jle .L45
subq $8, %rsp
.cfi_def_cfa_offset 16
movq %rdi, %rcx
movslq %edx, %rdx
movl $0, %eax
movss .LC5(%rip), %xmm4
movsd .LC6(%rip), %xmm3
movaps %xmm4, %xmm6
movapd %xmm3, %xmm5
.L38:
movss (%rcx,%rax,4), %xmm0
movss (%rsi,%rax,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm4, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm3, %xmm2
ja .L48
movss 16777216(%rcx,%rax,4), %xmm0
movss 16777216(%rsi,%rax,4), %xmm1
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm6, %xmm2
cvtss2sd %xmm2, %xmm2
comisd %xmm5, %xmm2
ja .L49
addq $1, %rax
cmpq %rdx, %rax
jne .L38
jmp .L31
.L48:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %eax, %edx
leaq .LC7(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
.L35:
leaq .LC9(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
.L31:
addq $8, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L49:
.cfi_restore_state
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
movl %eax, %edx
leaq .LC8(%rip), %rsi
movl $2, %edi
movl $2, %eax
call __printf_chk@PLT
jmp .L35
.L45:
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2062:
.size _Z15checkInnerArrayP10InnerArrayS0_i, .-_Z15checkInnerArrayP10InnerArrayS0_i
.globl _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
.type _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i, @function
_Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i:
.LFB2088:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L54
.L50:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L55
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L54:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z14testInnerArrayP10InnerArrayS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L50
.L55:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2088:
.size _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i, .-_Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
.globl _Z14testInnerArrayP10InnerArrayS0_i
.type _Z14testInnerArrayP10InnerArrayS0_i, @function
_Z14testInnerArrayP10InnerArrayS0_i:
.LFB2089:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2089:
.size _Z14testInnerArrayP10InnerArrayS0_i, .-_Z14testInnerArrayP10InnerArrayS0_i
.globl _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
.type _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i, @function
_Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i:
.LFB2090:
.cfi_startproc
endbr64
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 24(%rsp)
movq %rsi, 16(%rsp)
movl %edx, 12(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
leaq 24(%rsp), %rax
movq %rax, 96(%rsp)
leaq 16(%rsp), %rax
movq %rax, 104(%rsp)
leaq 12(%rsp), %rax
movq %rax, 112(%rsp)
movl $1, 48(%rsp)
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
leaq 40(%rsp), %rcx
leaq 32(%rsp), %rdx
leaq 60(%rsp), %rsi
leaq 48(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L62
.L58:
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L63
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L62:
.cfi_restore_state
pushq 40(%rsp)
.cfi_def_cfa_offset 152
pushq 40(%rsp)
.cfi_def_cfa_offset 160
leaq 112(%rsp), %r9
movq 76(%rsp), %rcx
movl 84(%rsp), %r8d
movq 64(%rsp), %rsi
movl 72(%rsp), %edx
leaq _Z6warmupP10InnerArrayS0_i(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 144
jmp .L58
.L63:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2090:
.size _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i, .-_Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.type _Z6warmupP10InnerArrayS0_i, @function
_Z6warmupP10InnerArrayS0_i:
.LFB2091:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2091:
.size _Z6warmupP10InnerArrayS0_i, .-_Z6warmupP10InnerArrayS0_i
.section .rodata.str1.1
.LC10:
.string "%s test struct of array at "
.LC11:
.string "device %d: %s \n\n"
.section .rodata.str1.8
.align 8
.LC12:
.string "warmup <<<%3d,%3d>>> elapsed %f sec \n"
.align 8
.LC13:
.string "innerarray <<<%3d,%3d>>> elapsed %f sec \n"
.text
.globl main
.type main, @function
main:
.LFB2063:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $1112, %rsp
.cfi_def_cfa_offset 1168
movl %edi, %r15d
movq %rsi, %r12
movq %fs:40, %rax
movq %rax, 1096(%rsp)
xorl %eax, %eax
leaq 64(%rsp), %rbx
movl $0, %esi
movq %rbx, %rdi
call cudaGetDeviceProperties_v2@PLT
movq (%r12), %rdx
leaq .LC10(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbx, %rcx
movl $0, %edx
leaq .LC11(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $0, %edi
call cudaSetDevice@PLT
movl $33554432, %edi
call malloc@PLT
movq %rax, %rbx
movl $33554432, %edi
call malloc@PLT
movq %rax, %rbp
movl $33554432, %edi
call malloc@PLT
movq %rax, (%rsp)
movl $33554432, %edi
call malloc@PLT
movq %rax, %r13
movl $4194304, %esi
movq %rbx, %rdi
call _Z17initialInnerArrayP10InnerArrayi
movl $4194304, %edx
movq %rbp, %rsi
movq %rbx, %rdi
call _Z18testInnerArrayHostP10InnerArrayS0_i
movl $4194304, %edx
movq %r13, %rsi
movq %rbx, %rdi
call _Z18testInnerArrayHostP10InnerArrayS0_i
movl $4194304, %edx
movq %r13, %rsi
movq %rbp, %rdi
call _Z15checkInnerArrayP10InnerArrayS0_i
leaq 24(%rsp), %rdi
movl $33554432, %esi
call cudaMalloc@PLT
leaq 32(%rsp), %rdi
movl $33554432, %esi
call cudaMalloc@PLT
movl $1, %ecx
movl $33554432, %edx
movq %rbx, %rsi
movq 24(%rsp), %rdi
call cudaMemcpy@PLT
movl $128, %r14d
cmpl $1, %r15d
jg .L73
.L67:
movl %r14d, %r15d
movl $1, 44(%rsp)
movl $1, 48(%rsp)
leal 4194303(%r14), %eax
movl $0, %edx
divl %r14d
movl %eax, %r12d
movl $1, 56(%rsp)
movl $1, 60(%rsp)
call _Z7secondsv
movsd %xmm0, 8(%rsp)
movl %r12d, 52(%rsp)
movl %r14d, 40(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L74
.L68:
call cudaDeviceSynchronize@PLT
call _Z7secondsv
subsd 8(%rsp), %xmm0
movl %r15d, %ecx
movl %r12d, %edx
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
call _Z7secondsv
movsd %xmm0, 8(%rsp)
movl 48(%rsp), %ecx
movl $0, %r9d
movl $0, %r8d
movq 40(%rsp), %rdx
movq 52(%rsp), %rdi
movl 60(%rsp), %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L75
.L69:
call cudaDeviceSynchronize@PLT
call _Z7secondsv
subsd 8(%rsp), %xmm0
movl %r15d, %ecx
movl %r12d, %edx
leaq .LC13(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
movl $2, %ecx
movl $33554432, %edx
movq 32(%rsp), %rsi
movq (%rsp), %r14
movq %r14, %rdi
call cudaMemcpy@PLT
movl $4194304, %edx
movq %r14, %rsi
movq %rbp, %rdi
call _Z15checkInnerArrayP10InnerArrayS0_i
call cudaGetLastError@PLT
movq 24(%rsp), %rdi
call cudaFree@PLT
movq 32(%rsp), %rdi
call cudaFree@PLT
movq %rbx, %rdi
call free@PLT
movq %rbp, %rdi
call free@PLT
movq %r13, %rdi
call free@PLT
movq %r14, %rdi
call free@PLT
call cudaDeviceReset@PLT
movq 1096(%rsp), %rax
subq %fs:40, %rax
jne .L76
movl $0, %eax
addq $1112, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L73:
.cfi_restore_state
movq 8(%r12), %rdi
movl $10, %edx
movl $0, %esi
call __isoc23_strtol@PLT
movl %eax, %r14d
jmp .L67
.L74:
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z40__device_stub__Z6warmupP10InnerArrayS0_iP10InnerArrayS0_i
jmp .L68
.L75:
movl $4194304, %edx
movq 32(%rsp), %rsi
movq 24(%rsp), %rdi
call _Z49__device_stub__Z14testInnerArrayP10InnerArrayS0_iP10InnerArrayS0_i
jmp .L69
.L76:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2063:
.size main, .-main
.section .rodata.str1.1
.LC14:
.string "_Z6warmupP10InnerArrayS0_i"
.section .rodata.str1.8
.align 8
.LC15:
.string "_Z14testInnerArrayP10InnerArrayS0_i"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2093:
.cfi_startproc
endbr64
pushq %rbx
.cfi_def_cfa_offset 16
.cfi_offset 3, -16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rbx
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC14(%rip), %rdx
movq %rdx, %rcx
leaq _Z6warmupP10InnerArrayS0_i(%rip), %rsi
movq %rax, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC15(%rip), %rdx
movq %rdx, %rcx
leaq _Z14testInnerArrayP10InnerArrayS0_i(%rip), %rsi
movq %rbx, %rdi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
popq %rbx
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2093:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.section .rodata.cst8,"aM",@progbits,8
.align 8
.LC0:
.long -1598689907
.long 1051772663
.section .rodata.cst4,"aM",@progbits,4
.align 4
.LC1:
.long 1120403456
.align 4
.LC2:
.long 1092616192
.align 4
.LC3:
.long 1101004800
.section .rodata.cst16,"aM",@progbits,16
.align 16
.LC5:
.long 2147483647
.long 0
.long 0
.long 0
.section .rodata.cst8
.align 8
.LC6:
.long -500134854
.long 1044740494
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "simpleMathSoA.hip"
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0 # -- Begin function _Z7secondsv
.LCPI0_0:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl _Z7secondsv
.p2align 4, 0x90
.type _Z7secondsv,@function
_Z7secondsv: # @_Z7secondsv
.cfi_startproc
# %bb.0:
subq $24, %rsp
.cfi_def_cfa_offset 32
movq %rsp, %rdi
leaq 16(%rsp), %rsi
callq gettimeofday
cvtsi2sdq (%rsp), %xmm1
cvtsi2sdq 8(%rsp), %xmm0
mulsd .LCPI0_0(%rip), %xmm0
addsd %xmm1, %xmm0
addq $24, %rsp
.cfi_def_cfa_offset 8
retq
.Lfunc_end0:
.size _Z7secondsv, .Lfunc_end0-_Z7secondsv
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z17initialInnerArrayP10InnerArrayi
.LCPI1_0:
.long 0x42c80000 # float 100
.text
.globl _Z17initialInnerArrayP10InnerArrayi
.p2align 4, 0x90
.type _Z17initialInnerArrayP10InnerArrayi,@function
_Z17initialInnerArrayP10InnerArrayi: # @_Z17initialInnerArrayP10InnerArrayi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB1_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r14d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB1_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI1_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, (%rbx,%r15,4)
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI1_0(%rip), %xmm0
movss %xmm0, 16777216(%rbx,%r15,4)
incq %r15
cmpq %r15, %r14
jne .LBB1_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB1_4: # %._crit_edge
retq
.Lfunc_end1:
.size _Z17initialInnerArrayP10InnerArrayi, .Lfunc_end1-_Z17initialInnerArrayP10InnerArrayi
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z18testInnerArrayHostP10InnerArrayS0_i
.LCPI2_0:
.long 0x41200000 # float 10
.LCPI2_1:
.long 0x41a00000 # float 20
.text
.globl _Z18testInnerArrayHostP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z18testInnerArrayHostP10InnerArrayS0_i,@function
_Z18testInnerArrayHostP10InnerArrayS0_i: # @_Z18testInnerArrayHostP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB2_3
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ecx, %ecx
movss .LCPI2_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI2_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB2_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, (%rsi,%rcx,4)
movss 16777216(%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, 16777216(%rsi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB2_2
.LBB2_3: # %._crit_edge
retq
.Lfunc_end2:
.size _Z18testInnerArrayHostP10InnerArrayS0_i, .Lfunc_end2-_Z18testInnerArrayHostP10InnerArrayS0_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function _Z19testInnerArrayHost2P10InnerArrayS0_i
.LCPI3_0:
.long 0x41200000 # float 10
.LCPI3_1:
.long 0x41a00000 # float 20
.text
.globl _Z19testInnerArrayHost2P10InnerArrayS0_i
.p2align 4, 0x90
.type _Z19testInnerArrayHost2P10InnerArrayS0_i,@function
_Z19testInnerArrayHost2P10InnerArrayS0_i: # @_Z19testInnerArrayHost2P10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB3_3
# %bb.1: # %.lr.ph.preheader
movl %edx, %eax
xorl %ecx, %ecx
movss .LCPI3_0(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI3_1(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB3_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, (%rsi,%rcx,4)
movss 16777216(%rdi,%rcx,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, 16777216(%rsi,%rcx,4)
incq %rcx
cmpq %rcx, %rax
jne .LBB3_2
.LBB3_3: # %._crit_edge
retq
.Lfunc_end3:
.size _Z19testInnerArrayHost2P10InnerArrayS0_i, .Lfunc_end3-_Z19testInnerArrayHost2P10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z16printfHostResultP10InnerArrayi # -- Begin function _Z16printfHostResultP10InnerArrayi
.p2align 4, 0x90
.type _Z16printfHostResultP10InnerArrayi,@function
_Z16printfHostResultP10InnerArrayi: # @_Z16printfHostResultP10InnerArrayi
.cfi_startproc
# %bb.0:
testl %esi, %esi
jle .LBB4_4
# %bb.1: # %.lr.ph.preheader
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %rbx
.cfi_def_cfa_offset 32
.cfi_offset %rbx, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rdi, %rbx
movl %esi, %r15d
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rbx,%r14,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movss 16777216(%rbx,%r14,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
cvtss2sd %xmm1, %xmm1
movl $.L.str, %edi
movl %r14d, %esi
movb $2, %al
callq printf
incq %r14
cmpq %r14, %r15
jne .LBB4_2
# %bb.3:
popq %rbx
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
.cfi_restore %rbx
.cfi_restore %r14
.cfi_restore %r15
.LBB4_4: # %._crit_edge
retq
.Lfunc_end4:
.size _Z16printfHostResultP10InnerArrayi, .Lfunc_end4-_Z16printfHostResultP10InnerArrayi
.cfi_endproc
# -- End function
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0 # -- Begin function _Z15checkInnerArrayP10InnerArrayS0_i
.LCPI5_0:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI5_1:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.text
.globl _Z15checkInnerArrayP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z15checkInnerArrayP10InnerArrayS0_i,@function
_Z15checkInnerArrayP10InnerArrayS0_i: # @_Z15checkInnerArrayP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
testl %edx, %edx
jle .LBB5_8
# %bb.1: # %.lr.ph.preheader
movq %rsi, %rax
movl %edx, %ecx
xorl %esi, %esi
movaps .LCPI5_0(%rip), %xmm2 # xmm2 = [NaN,NaN,NaN,NaN]
movsd .LCPI5_1(%rip), %xmm3 # xmm3 = mem[0],zero
.p2align 4, 0x90
.LBB5_2: # %.lr.ph
# =>This Inner Loop Header: Depth=1
movss (%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm4
subss %xmm1, %xmm4
andps %xmm2, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm3, %xmm4
ja .LBB5_3
# %bb.4: # in Loop: Header=BB5_2 Depth=1
movss 16777216(%rdi,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%rax,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm4
subss %xmm1, %xmm4
andps %xmm2, %xmm4
cvtss2sd %xmm4, %xmm4
ucomisd %xmm3, %xmm4
ja .LBB5_5
# %bb.7: # in Loop: Header=BB5_2 Depth=1
incq %rsi
cmpq %rsi, %rcx
jne .LBB5_2
.LBB5_8: # %.critedge
retq
.LBB5_3:
movl $.L.str.1, %edi
jmp .LBB5_6
.LBB5_5:
movl $.L.str.2, %edi
.LBB5_6:
pushq %rax
.cfi_def_cfa_offset 16
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
popq %rax
.cfi_def_cfa_offset 8
jmp puts@PLT # TAILCALL
.Lfunc_end5:
.size _Z15checkInnerArrayP10InnerArrayS0_i, .Lfunc_end5-_Z15checkInnerArrayP10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z29__device_stub__testInnerArrayP10InnerArrayS0_i # -- Begin function _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z29__device_stub__testInnerArrayP10InnerArrayS0_i,@function
_Z29__device_stub__testInnerArrayP10InnerArrayS0_i: # @_Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z14testInnerArrayP10InnerArrayS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end6:
.size _Z29__device_stub__testInnerArrayP10InnerArrayS0_i, .Lfunc_end6-_Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.cfi_endproc
# -- End function
.globl _Z21__device_stub__warmupP10InnerArrayS0_i # -- Begin function _Z21__device_stub__warmupP10InnerArrayS0_i
.p2align 4, 0x90
.type _Z21__device_stub__warmupP10InnerArrayS0_i,@function
_Z21__device_stub__warmupP10InnerArrayS0_i: # @_Z21__device_stub__warmupP10InnerArrayS0_i
.cfi_startproc
# %bb.0:
subq $104, %rsp
.cfi_def_cfa_offset 112
movq %rdi, 72(%rsp)
movq %rsi, 64(%rsp)
movl %edx, 12(%rsp)
leaq 72(%rsp), %rax
movq %rax, 80(%rsp)
leaq 64(%rsp), %rax
movq %rax, 88(%rsp)
leaq 12(%rsp), %rax
movq %rax, 96(%rsp)
leaq 48(%rsp), %rdi
leaq 32(%rsp), %rsi
leaq 24(%rsp), %rdx
leaq 16(%rsp), %rcx
callq __hipPopCallConfiguration
movq 48(%rsp), %rsi
movl 56(%rsp), %edx
movq 32(%rsp), %rcx
movl 40(%rsp), %r8d
leaq 80(%rsp), %r9
movl $_Z6warmupP10InnerArrayS0_i, %edi
pushq 16(%rsp)
.cfi_adjust_cfa_offset 8
pushq 32(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $120, %rsp
.cfi_adjust_cfa_offset -120
retq
.Lfunc_end7:
.size _Z21__device_stub__warmupP10InnerArrayS0_i, .Lfunc_end7-_Z21__device_stub__warmupP10InnerArrayS0_i
.cfi_endproc
# -- End function
.section .rodata.cst4,"aM",@progbits,4
.p2align 2, 0x0 # -- Begin function main
.LCPI8_0:
.long 0x42c80000 # float 100
.LCPI8_1:
.long 0x41200000 # float 10
.LCPI8_2:
.long 0x41a00000 # float 20
.section .rodata.cst16,"aM",@progbits,16
.p2align 4, 0x0
.LCPI8_3:
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.long 0x7fffffff # float NaN
.section .rodata.cst8,"aM",@progbits,8
.p2align 3, 0x0
.LCPI8_4:
.quad 0x3e45798ee2308c3a # double 1.0E-8
.LCPI8_5:
.quad 0x3eb0c6f7a0b5ed8d # double 9.9999999999999995E-7
.text
.globl main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $1608, %rsp # imm = 0x648
.cfi_def_cfa_offset 1664
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %r13
movl %edi, 56(%rsp) # 4-byte Spill
leaq 136(%rsp), %rbx
movq %rbx, %rdi
xorl %esi, %esi
callq hipGetDevicePropertiesR0600
movq (%r13), %rsi
movl $.L.str.4, %edi
xorl %eax, %eax
callq printf
movl $.L.str.5, %edi
xorl %esi, %esi
movq %rbx, %rdx
xorl %eax, %eax
callq printf
xorl %edi, %edi
callq hipSetDevice
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %rbx
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r15
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r14
movl $33554432, %edi # imm = 0x2000000
callq malloc
movq %rax, %r12
movl $4194304, %ebp # imm = 0x400000
.p2align 4, 0x90
.LBB8_1: # %.lr.ph.i
# =>This Inner Loop Header: Depth=1
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
movss .LCPI8_0(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
divss %xmm1, %xmm0
movss %xmm0, -16777216(%rbx,%rbp,4)
callq rand
movzbl %al, %eax
xorps %xmm0, %xmm0
cvtsi2ss %eax, %xmm0
divss .LCPI8_0(%rip), %xmm0
movss %xmm0, (%rbx,%rbp,4)
incq %rbp
cmpq $8388608, %rbp # imm = 0x800000
jne .LBB8_1
# %bb.2: # %.lr.ph.i56.preheader
movl $4194304, %eax # imm = 0x400000
movss .LCPI8_1(%rip), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss .LCPI8_2(%rip), %xmm1 # xmm1 = mem[0],zero,zero,zero
.p2align 4, 0x90
.LBB8_3: # %.lr.ph.i56
# =>This Inner Loop Header: Depth=1
movss -16777216(%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, -16777216(%r15,%rax,4)
movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, (%r15,%rax,4)
incq %rax
cmpq $8388608, %rax # imm = 0x800000
jne .LBB8_3
# %bb.4: # %.lr.ph.i60.preheader
movl $4194304, %eax # imm = 0x400000
.p2align 4, 0x90
.LBB8_5: # %.lr.ph.i60
# =>This Inner Loop Header: Depth=1
movss -16777216(%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm0, %xmm2
movss %xmm2, -16777216(%r12,%rax,4)
movss (%rbx,%rax,4), %xmm2 # xmm2 = mem[0],zero,zero,zero
addss %xmm1, %xmm2
movss %xmm2, (%r12,%rax,4)
incq %rax
cmpq $8388608, %rax # imm = 0x800000
jne .LBB8_5
# %bb.6: # %.lr.ph.i65.preheader
xorl %esi, %esi
movaps .LCPI8_3(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN]
movsd .LCPI8_4(%rip), %xmm4 # xmm4 = mem[0],zero
.p2align 4, 0x90
.LBB8_7: # %.lr.ph.i65
# =>This Inner Loop Header: Depth=1
movss (%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r12,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_8
# %bb.9: # in Loop: Header=BB8_7 Depth=1
movss 16777216(%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%r12,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_10
# %bb.26: # in Loop: Header=BB8_7 Depth=1
incq %rsi
cmpq $4194304, %rsi # imm = 0x400000
jne .LBB8_7
jmp .LBB8_12
.LBB8_8:
movl $.L.str.1, %edi
jmp .LBB8_11
.LBB8_10:
movl $.L.str.2, %edi
.LBB8_11:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
callq puts@PLT
.LBB8_12: # %_Z15checkInnerArrayP10InnerArrayS0_i.exit
movabsq $4294967296, %rbp # imm = 0x100000000
leaq 72(%rsp), %rdi
movl $33554432, %esi # imm = 0x2000000
callq hipMalloc
leaq 64(%rsp), %rdi
movl $33554432, %esi # imm = 0x2000000
callq hipMalloc
movq 72(%rsp), %rdi
movl $33554432, %edx # imm = 0x2000000
movq %rbx, %rsi
movl $1, %ecx
callq hipMemcpy
cmpl $2, 56(%rsp) # 4-byte Folded Reload
jl .LBB8_13
# %bb.14:
movq 8(%r13), %rdi
xorl %esi, %esi
movl $10, %edx
callq __isoc23_strtol
movl %eax, %r13d
movq %rbp, %rcx
orq %rbp, %r13
jmp .LBB8_15
.LBB8_13:
movq %rbp, %rcx
leaq 128(%rbp), %r13
.LBB8_15:
leal 4194303(%r13), %eax
xorl %edx, %edx
divl %r13d
movl %eax, %ebp
movq %rbp, 56(%rsp) # 8-byte Spill
orq %rcx, %rbp
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI8_5(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 80(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_17
# %bb.16:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl $4194304, 12(%rsp) # imm = 0x400000
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 120(%rsp), %rax
movq %rax, 24(%rsp)
leaq 12(%rsp), %rax
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z6warmupP10InnerArrayS0_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_17:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI8_5(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 80(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.6, %edi
movq 56(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movl %r13d, %edx
movb $1, %al
callq printf
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm0, %xmm0
cvtsi2sdq 16(%rsp), %xmm0
xorps %xmm1, %xmm1
cvtsi2sdq 24(%rsp), %xmm1
mulsd .LCPI8_5(%rip), %xmm1
addsd %xmm0, %xmm1
movsd %xmm1, 80(%rsp) # 8-byte Spill
movq %rbp, %rdi
movl $1, %esi
movq %r13, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB8_19
# %bb.18:
movq 72(%rsp), %rax
movq 64(%rsp), %rcx
movq %rax, 128(%rsp)
movq %rcx, 120(%rsp)
movl $4194304, 12(%rsp) # imm = 0x400000
leaq 128(%rsp), %rax
movq %rax, 16(%rsp)
leaq 120(%rsp), %rax
movq %rax, 24(%rsp)
leaq 12(%rsp), %rax
movq %rax, 32(%rsp)
leaq 40(%rsp), %rdi
leaq 104(%rsp), %rsi
leaq 96(%rsp), %rdx
leaq 88(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 104(%rsp), %rcx
movl 112(%rsp), %r8d
leaq 16(%rsp), %r9
movl $_Z14testInnerArrayP10InnerArrayS0_i, %edi
pushq 88(%rsp)
.cfi_adjust_cfa_offset 8
pushq 104(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB8_19:
callq hipDeviceSynchronize
leaq 16(%rsp), %rdi
leaq 40(%rsp), %rsi
callq gettimeofday
xorps %xmm1, %xmm1
cvtsi2sdq 16(%rsp), %xmm1
xorps %xmm0, %xmm0
cvtsi2sdq 24(%rsp), %xmm0
mulsd .LCPI8_5(%rip), %xmm0
addsd %xmm1, %xmm0
subsd 80(%rsp), %xmm0 # 8-byte Folded Reload
movl $.L.str.7, %edi
movq 56(%rsp), %rsi # 8-byte Reload
# kill: def $esi killed $esi killed $rsi
movl %r13d, %edx
movb $1, %al
callq printf
movq 64(%rsp), %rsi
movl $33554432, %edx # imm = 0x2000000
movq %r14, %rdi
movl $2, %ecx
callq hipMemcpy
xorl %esi, %esi
movaps .LCPI8_3(%rip), %xmm3 # xmm3 = [NaN,NaN,NaN,NaN]
movsd .LCPI8_4(%rip), %xmm4 # xmm4 = mem[0],zero
.p2align 4, 0x90
.LBB8_20: # %.lr.ph.i75
# =>This Inner Loop Header: Depth=1
movss (%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss (%r14,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_21
# %bb.22: # in Loop: Header=BB8_20 Depth=1
movss 16777216(%r15,%rsi,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
movss 16777216(%r14,%rsi,4), %xmm1 # xmm1 = mem[0],zero,zero,zero
movaps %xmm0, %xmm2
subss %xmm1, %xmm2
andps %xmm3, %xmm2
cvtss2sd %xmm2, %xmm2
ucomisd %xmm4, %xmm2
ja .LBB8_23
# %bb.27: # in Loop: Header=BB8_20 Depth=1
incq %rsi
cmpq $4194304, %rsi # imm = 0x400000
jne .LBB8_20
jmp .LBB8_25
.LBB8_21:
movl $.L.str.1, %edi
jmp .LBB8_24
.LBB8_23:
movl $.L.str.2, %edi
.LBB8_24:
cvtss2sd %xmm0, %xmm0
cvtss2sd %xmm1, %xmm1
# kill: def $esi killed $esi killed $rsi
movb $2, %al
callq printf
movl $.Lstr, %edi
callq puts@PLT
.LBB8_25: # %_Z15checkInnerArrayP10InnerArrayS0_i.exit80
callq hipGetLastError
movq 72(%rsp), %rdi
callq hipFree
movq 64(%rsp), %rdi
callq hipFree
movq %rbx, %rdi
callq free
movq %r15, %rdi
callq free
movq %r12, %rdi
callq free
movq %r14, %rdi
callq free
callq hipDeviceReset
xorl %eax, %eax
addq $1608, %rsp # imm = 0x648
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end8:
.size main, .Lfunc_end8-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
pushq %rbx
.cfi_def_cfa_offset 16
subq $32, %rsp
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -16
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB9_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB9_2:
movq __hip_gpubin_handle(%rip), %rbx
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z14testInnerArrayP10InnerArrayS0_i, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z6warmupP10InnerArrayS0_i, %esi
movl $.L__unnamed_2, %edx
movl $.L__unnamed_2, %ecx
movq %rbx, %rdi
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $32, %rsp
.cfi_def_cfa_offset 16
popq %rbx
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end9:
.size __hip_module_ctor, .Lfunc_end9-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB10_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB10_2:
retq
.Lfunc_end10:
.size __hip_module_dtor, .Lfunc_end10-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "printout idx %d: x %f y %f \n"
.size .L.str, 29
.type .L.str.1,@object # @.str.1
.L.str.1:
.asciz "different on x %dth element: host %f gpu %f \n"
.size .L.str.1, 46
.type .L.str.2,@object # @.str.2
.L.str.2:
.asciz "different on y %dth element: host %f gpu %f \n"
.size .L.str.2, 46
.type _Z14testInnerArrayP10InnerArrayS0_i,@object # @_Z14testInnerArrayP10InnerArrayS0_i
.section .rodata,"a",@progbits
.globl _Z14testInnerArrayP10InnerArrayS0_i
.p2align 3, 0x0
_Z14testInnerArrayP10InnerArrayS0_i:
.quad _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.size _Z14testInnerArrayP10InnerArrayS0_i, 8
.type _Z6warmupP10InnerArrayS0_i,@object # @_Z6warmupP10InnerArrayS0_i
.globl _Z6warmupP10InnerArrayS0_i
.p2align 3, 0x0
_Z6warmupP10InnerArrayS0_i:
.quad _Z21__device_stub__warmupP10InnerArrayS0_i
.size _Z6warmupP10InnerArrayS0_i, 8
.type .L.str.4,@object # @.str.4
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.4:
.asciz "%s test struct of array at "
.size .L.str.4, 28
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "device %d: %s \n\n"
.size .L.str.5, 17
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "warmup <<<%3d,%3d>>> elapsed %f sec \n"
.size .L.str.6, 38
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "innerarray <<<%3d,%3d>>> elapsed %f sec \n"
.size .L.str.7, 42
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z14testInnerArrayP10InnerArrayS0_i"
.size .L__unnamed_1, 36
.type .L__unnamed_2,@object # @1
.L__unnamed_2:
.asciz "_Z6warmupP10InnerArrayS0_i"
.size .L__unnamed_2, 27
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "Arrays do not match.\n"
.size .Lstr, 22
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z29__device_stub__testInnerArrayP10InnerArrayS0_i
.addrsig_sym _Z21__device_stub__warmupP10InnerArrayS0_i
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z14testInnerArrayP10InnerArrayS0_i
.addrsig_sym _Z6warmupP10InnerArrayS0_i
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
cudaDeviceProp prop ;
int count ;
cudaGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
cudaGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(cudaMalloc((void **)&d_J, size));
testCUDA(cudaMalloc((void **)&d_A, size));
testCUDA(cudaMalloc((void **)&d_out, size));
testCUDA(cudaMemcpy(d_A, &A, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_J, &J, size, cudaMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(cudaMalloc((void **)&d_pos_i, size));
testCUDA(cudaMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(cudaMemcpy(d_pos_i, &pos_j, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_pos_j, &pos_i, size, cudaMemcpyHostToDevice));
// Timer definition and start
float TimerV;
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(cudaEventRecord(stop, 0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(cudaMemcpy(&out, d_out, sizeof(float), cudaMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(cudaFree(d_A));
testCUDA(cudaFree(d_J));
testCUDA(cudaFree(d_out));
testCUDA(cudaFree(d_pos_i));
testCUDA(cudaFree(d_pos_j));
testCUDA(cudaFree(start));
testCUDA(cudaFree(stop));
return 0;
} | code for sm_80
Function : _Z21Compute_all_rotationsPfS_S_PKiS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0040*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000ff1e03f */
/*0050*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e620000002500 */
/*0060*/ MOV R8, 0x5 ; /* 0x0000000500087802 */
/* 0x000fe20000000f00 */
/*0070*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff047624 */
/* 0x000fe200078e00ff */
/*0080*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */
/* 0x000fe200087fe43f */
/*0090*/ MOV R5, c[0x0][0x184] ; /* 0x0000610000057a02 */
/* 0x000fca0000000f00 */
/*00a0*/ IMAD.U32 R3, RZ, RZ, UR5 ; /* 0x00000005ff037e24 */
/* 0x000fe2000f8e00ff */
/*00b0*/ SHF.R.U32.HI R2, RZ, 0x2, R0 ; /* 0x00000002ff027819 */
/* 0x001fe40000011600 */
/*00c0*/ LOP3.LUT R6, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300067812 */
/* 0x000fe200078ec0ff */
/*00d0*/ IMAD R7, R7, 0x64, RZ ; /* 0x0000006407077824 */
/* 0x002fc600078e02ff */
/*00e0*/ I2F.U32 R2, R2 ; /* 0x0000000200027306 */
/* 0x000e220000201000 */
/*00f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fce0003f05270 */
/*0100*/ FRND.FLOOR R10, R2 ; /* 0x00000002000a7307 */
/* 0x0010640000205000 */
/*0110*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x001fe20008000f00 */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fca0000000a00 */
/*0130*/ BSSY B0, 0x270 ; /* 0x0000013000007945 */
/* 0x000fe20003800000 */
/*0140*/ @!P0 BRA 0x230 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0150*/ ISETP.NE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f25270 */
/*0160*/ @!P1 BRA 0x1f0 ; /* 0x0000008000009947 */
/* 0x000fea0003800000 */
/*0170*/ ISETP.NE.AND P1, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f25270 */
/*0180*/ @!P1 MOV R12, 0x4 ; /* 0x00000004000c9802 */
/* 0x000fe20000000f00 */
/*0190*/ @P1 IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f1224 */
/* 0x000fe200078e0005 */
/*01a0*/ @P1 MOV R14, R4 ; /* 0x00000004000e1202 */
/* 0x000fe40000000f00 */
/*01b0*/ @P1 IADD3 R16, R8.reuse, -0x2, RZ ; /* 0xfffffffe08101810 */
/* 0x040fe20007ffe0ff */
/*01c0*/ @!P1 IMAD.WIDE R14, R9, R12, c[0x0][0x178] ; /* 0x00005e00090e9625 */
/* 0x000fe200078e020c */
/*01d0*/ @!P1 IADD3 R16, R8, -0x3, RZ ; /* 0xfffffffd08109810 */
/* 0x000fe20007ffe0ff */
/*01e0*/ BRA 0x260 ; /* 0x0000007000007947 */
/* 0x000fea0003800000 */
/*01f0*/ HFMA2.MMA R14, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0e7435 */
/* 0x000fe200000001ff */
/*0200*/ IADD3 R16, R8, -0x4, RZ ; /* 0xfffffffc08107810 */
/* 0x000fd20007ffe0ff */
/*0210*/ IMAD.WIDE R14, R9, R14, c[0x0][0x180] ; /* 0x00006000090e7625 */
/* 0x000fe200078e020e */
/*0220*/ BRA 0x260 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R14, 0x4 ; /* 0x00000004000e7802 */
/* 0x000fe40000000f00 */
/*0240*/ IADD3 R16, R8, -0x5, RZ ; /* 0xfffffffb08107810 */
/* 0x000fc60007ffe0ff */
/*0250*/ IMAD.WIDE R14, R9, R14, c[0x0][0x178] ; /* 0x00005e00090e7625 */
/* 0x000fc800078e020e */
/*0260*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0270*/ LDG.E R12, [R14.64] ; /* 0x000000040e0c7981 */
/* 0x000ea4000c1e1900 */
/*0280*/ IMAD R20, R12, 0xa, R7 ; /* 0x0000000a0c147824 */
/* 0x004fe200078e0207 */
/*0290*/ HFMA2.MMA R12, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0c7435 */
/* 0x000fc600000001ff */
/*02a0*/ I2F.U32 R13, R20 ; /* 0x00000014000d7306 */
/* 0x000e240000201000 */
/*02b0*/ FADD R21, R10, R13 ; /* 0x0000000d0a157221 */
/* 0x003fe40000000000 */
/*02c0*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0d7624 */
/* 0x000fc800078e00ff */
/*02d0*/ F2I.TRUNC.NTZ R19, R21 ; /* 0x0000001500137305 */
/* 0x000e22000020f100 */
/*02e0*/ IMAD R17, R13, 0xdc, R16 ; /* 0x000000dc0d117824 */
/* 0x000fc800078e0210 */
/*02f0*/ IMAD.WIDE R16, R17, R12, c[0x0][0x160] ; /* 0x0000580011107625 */
/* 0x000fcc00078e020c */
/*0300*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*0310*/ IMAD.WIDE R18, R19, R12, c[0x0][0x168] ; /* 0x00005a0013127625 */
/* 0x001fcc00078e020c */
/*0320*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea2000c1e1900 */
/*0330*/ LOP3.LUT P1, RZ, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100ff7812 */
/* 0x000fe2000782c0ff */
/*0340*/ BSSY B0, 0x4f0 ; /* 0x000001a000007945 */
/* 0x000fe20003800000 */
/*0350*/ FMUL R15, R18, R17 ; /* 0x00000011120f7220 */
/* 0x004fca0000400000 */
/*0360*/ STS [R0.X4], R15 ; /* 0x0000000f00007388 */
/* 0x0001e80000004800 */
/*0370*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0380*/ @P1 BRA 0x4e0 ; /* 0x0000015000001947 */
/* 0x000fea0003800000 */
/*0390*/ BSSY B1, 0x490 ; /* 0x000000f000017945 */
/* 0x001fe20003800000 */
/*03a0*/ @!P0 BRA 0x430 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*03b0*/ ISETP.NE.AND P2, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f45270 */
/*03c0*/ @P2 BRA 0x480 ; /* 0x000000b000002947 */
/* 0x000fea0003800000 */
/*03d0*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */
/* 0x000ea4000c1e1900 */
/*03e0*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*03f0*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0400*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0410*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e22000020f100 */
/*0420*/ BRA 0x480 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0430*/ LDG.E R14, [R2.64+-0x4] ; /* 0xfffffc04020e7981 */
/* 0x000ea4000c1e1900 */
/*0440*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*0450*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0460*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0470*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e24000020f100 */
/*0480*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0490*/ LDS R16, [R0.X4+0x4] ; /* 0x0000040000107984 */
/* 0x000fe20000004800 */
/*04a0*/ IMAD.WIDE R14, R11, R12, c[0x0][0x170] ; /* 0x00005c000b0e7625 */
/* 0x001fc600078e020c */
/*04b0*/ LDS R17, [R0.X4] ; /* 0x0000000000117984 */
/* 0x000e240000004800 */
/*04c0*/ FADD R17, R16, R17 ; /* 0x0000001110117221 */
/* 0x001fca0000000000 */
/*04d0*/ STG.E [R14.64], R17 ; /* 0x000000110e007986 */
/* 0x0001e4000c101904 */
/*04e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0500*/ ISETP.NE.AND P2, PT, R8, 0xdd, PT ; /* 0x000000dd0800780c */
/* 0x000fda0003f45270 */
/*0510*/ @!P2 EXIT ; /* 0x000000000000a94d */
/* 0x000fea0003800000 */
/*0520*/ IADD3 R17, R9, 0x1, RZ ; /* 0x0000000109117810 */
/* 0x000fe20007ffe0ff */
/*0530*/ BSSY B0, 0x630 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0540*/ MOV R14, R2 ; /* 0x00000002000e7202 */
/* 0x000fe40000000f00 */
/*0550*/ MOV R15, R3 ; /* 0x00000003000f7202 */
/* 0x000fe20000000f00 */
/*0560*/ IMAD.SHL.U32 R16, R17, 0x4, RZ ; /* 0x0000000411107824 */
/* 0x000fe200078e00ff */
/*0570*/ @!P0 BRA 0x620 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0580*/ ISETP.NE.AND P2, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f45270 */
/*0590*/ @!P2 BRA 0x600 ; /* 0x000000600000a947 */
/* 0x000fea0003800000 */
/*05a0*/ ISETP.NE.AND P2, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f45270 */
/*05b0*/ @P2 IADD3 R16, R16, 0x3, RZ ; /* 0x0000000310102810 */
/* 0x000fe20007ffe0ff */
/*05c0*/ @P2 IMAD.WIDE R14, R17, R12, c[0x0][0x180] ; /* 0x00006000110e2625 */
/* 0x000fc600078e020c */
/*05d0*/ @!P2 IADD3 R16, R16, 0x2, RZ ; /* 0x000000021010a810 */
/* 0x000fe20007ffe0ff */
/*05e0*/ @!P2 IMAD.WIDE R14, R17, R12, c[0x0][0x178] ; /* 0x00005e00110ea625 */
/* 0x000fe200078e020c */
/*05f0*/ BRA 0x620 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.WIDE R14, R17, R12, c[0x0][0x180] ; /* 0x00006000110e7625 */
/* 0x000fe200078e020c */
/*0610*/ MOV R16, R8 ; /* 0x0000000800107202 */
/* 0x000fc60000000f00 */
/*0620*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0630*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0640*/ IMAD R20, R14, 0xa, R7 ; /* 0x0000000a0e147824 */
/* 0x004fc800078e0207 */
/*0650*/ I2F.U32 R17, R20 ; /* 0x0000001400117306 */
/* 0x000e240000201000 */
/*0660*/ FADD R21, R10, R17 ; /* 0x000000110a157221 */
/* 0x001fe40000000000 */
/*0670*/ IMAD R17, R13, 0xdc, R16 ; /* 0x000000dc0d117824 */
/* 0x000fc800078e0210 */
/*0680*/ F2I.TRUNC.NTZ R19, R21 ; /* 0x0000001500137305 */
/* 0x000e22000020f100 */
/*0690*/ IMAD.WIDE R16, R17, R12, c[0x0][0x160] ; /* 0x0000580011107625 */
/* 0x000fcc00078e020c */
/*06a0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*06b0*/ IMAD.WIDE R18, R19, R12, c[0x0][0x168] ; /* 0x00005a0013127625 */
/* 0x001fcc00078e020c */
/*06c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea2000c1e1900 */
/*06d0*/ BSSY B0, 0x880 ; /* 0x000001a000007945 */
/* 0x000fe20003800000 */
/*06e0*/ FMUL R13, R18, R17 ; /* 0x00000011120d7220 */
/* 0x004fca0000400000 */
/*06f0*/ STS [R0.X4], R13 ; /* 0x0000000d00007388 */
/* 0x0001e80000004800 */
/*0700*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0710*/ @P1 BRA 0x870 ; /* 0x0000015000001947 */
/* 0x000fea0003800000 */
/*0720*/ BSSY B1, 0x820 ; /* 0x000000f000017945 */
/* 0x001fe20003800000 */
/*0730*/ @!P0 BRA 0x7c0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0740*/ ISETP.NE.AND P1, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f25270 */
/*0750*/ @P1 BRA 0x810 ; /* 0x000000b000001947 */
/* 0x000fea0003800000 */
/*0760*/ LDG.E R14, [R4.64+0x4] ; /* 0x00000404040e7981 */
/* 0x000ea4000c1e1900 */
/*0770*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*0780*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0790*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*07a0*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e22000020f100 */
/*07b0*/ BRA 0x810 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*07c0*/ LDG.E R14, [R2.64] ; /* 0x00000004020e7981 */
/* 0x000ea4000c1e1900 */
/*07d0*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*07e0*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*07f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0800*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e24000020f100 */
/*0810*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0820*/ LDS R14, [R0.X4+0x4] ; /* 0x00000400000e7984 */
/* 0x000fe20000004800 */
/*0830*/ IMAD.WIDE R12, R11, R12, c[0x0][0x170] ; /* 0x00005c000b0c7625 */
/* 0x001fc600078e020c */
/*0840*/ LDS R15, [R0.X4] ; /* 0x00000000000f7984 */
/* 0x000e240000004800 */
/*0850*/ FADD R15, R14, R15 ; /* 0x0000000f0e0f7221 */
/* 0x001fca0000000000 */
/*0860*/ STG.E [R12.64], R15 ; /* 0x0000000f0c007986 */
/* 0x0001e4000c101904 */
/*0870*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*0880*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0890*/ IADD3 R2, P1, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007f3e0ff */
/*08a0*/ IADD3 R4, P2, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007f5e0ff */
/*08b0*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe40000ffe4ff */
/*08c0*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x000fe400017fe4ff */
/*08d0*/ IADD3 R9, R9, 0x2, RZ ; /* 0x0000000209097810 */
/* 0x000fe40007ffe0ff */
/*08e0*/ IADD3 R8, R8, 0x8, RZ ; /* 0x0000000808087810 */
/* 0x000fe20007ffe0ff */
/*08f0*/ BRA 0x130 ; /* 0xfffff83000007947 */
/* 0x000fea000383ffff */
/*0900*/ BRA 0x900; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0980*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0990*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
cudaDeviceProp prop ;
int count ;
cudaGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
cudaGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(cudaMalloc((void **)&d_J, size));
testCUDA(cudaMalloc((void **)&d_A, size));
testCUDA(cudaMalloc((void **)&d_out, size));
testCUDA(cudaMemcpy(d_A, &A, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_J, &J, size, cudaMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(cudaMalloc((void **)&d_pos_i, size));
testCUDA(cudaMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(cudaMemcpy(d_pos_i, &pos_j, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_pos_j, &pos_i, size, cudaMemcpyHostToDevice));
// Timer definition and start
float TimerV;
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(cudaEventRecord(stop, 0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(cudaMemcpy(&out, d_out, sizeof(float), cudaMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(cudaFree(d_A));
testCUDA(cudaFree(d_J));
testCUDA(cudaFree(d_out));
testCUDA(cudaFree(d_pos_i));
testCUDA(cudaFree(d_pos_j));
testCUDA(cudaFree(start));
testCUDA(cudaFree(stop));
return 0;
} | .file "tmpxft_0007e846_00000000-6_version1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "There is an error in file %s at line %d\n"
.text
.globl _Z8testCUDA9cudaErrorPKci
.type _Z8testCUDA9cudaErrorPKci, @function
_Z8testCUDA9cudaErrorPKci:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edx, %ecx
movq %rsi, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z8testCUDA9cudaErrorPKci, .-_Z8testCUDA9cudaErrorPKci
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "r"
.LC2:
.string "TEST\n"
.text
.globl _Z9read_filePKcPfi
.type _Z9read_filePKcPfi, @function
_Z9read_filePKcPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %r14
movl %edx, %r12d
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
call fopen@PLT
movq %rax, %rbp
movl $0, %ebx
leaq 4(%rsp), %r13
movslq %r12d, %r12
movl $0, %r15d
jmp .L10
.L11:
movslq %ebx, %rdx
movb %al, 4(%rsp,%rdx)
leal 1(%rbx), %ebx
.L10:
movq %rbp, %rdi
call fgetc@PLT
cmpb $-1, %al
je .L18
cmpb $59, %al
je .L16
cmpb $10, %al
jne .L11
.L16:
movl $0, %esi
movq %r13, %rdi
call strtod@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r14,%r12,4)
movl $0, 0(%r13)
movl %r15d, %ebx
jmp .L10
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call fclose@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z9read_filePKcPfi, .-_Z9read_filePKcPfi
.section .rodata.str1.1
.LC3:
.string "files/%i/J_%i.txt"
.LC4:
.string "files/%i/A.txt"
.text
.globl _Z8get_dataPfS_
.type _Z8get_dataPfS_, @function
_Z8get_dataPfS_:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rdi, %r14
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movl $0, 112(%rsp)
movl $0, %r15d
movl $0, 4(%rsp)
movl $0, %r13d
leaq 16(%rsp), %r12
.L21:
leal 0(,%r15,4), %ebp
movl $0, %ebx
.L22:
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %rbx
.cfi_def_cfa_offset 208
movl %r13d, %r9d
leaq .LC3(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %r12, %rdi
movl $0, %eax
call __snprintf_chk@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
movl %ebp, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z9read_filePKcPfi
addl $1, %ebx
addl $4, %ebp
cmpl $55, %ebx
jne .L22
movl %r13d, %r9d
leaq .LC4(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %r12, %rdi
movl $0, %eax
call __snprintf_chk@PLT
movl 4(%rsp), %ebx
movl %ebx, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call _Z9read_filePKcPfi
addl $1, %r13d
movl %ebx, %eax
addl $100, %eax
movl %eax, 4(%rsp)
addl $55, %r15d
cmpl $10, %r13d
jne .L21
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z8get_dataPfS_, .-_Z8get_dataPfS_
.section .rodata.str1.1
.LC5:
.string "files/%i/out.txt"
.LC6:
.string "w"
.LC7:
.string "; "
.LC8:
.string "%f %s"
.LC9:
.string "\n"
.text
.globl _Z12write_resultPf
.type _Z12write_resultPf, @function
_Z12write_resultPf:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $152, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movl $0, 128(%rsp)
movl $0, 24(%rsp)
movl $0, 28(%rsp)
leaq .LC7(%rip), %r14
leaq .LC8(%rip), %r12
jmp .L32
.L40:
pxor %xmm0, %xmm0
cvtss2sd 0(%r13,%rbx,4), %xmm0
movq %r14, %rcx
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addq $1, %rbx
.L31:
cmpq $9, %rbx
jne .L40
movl 20(%rsp), %eax
addl $9, %eax
cltq
movq 8(%rsp), %rdi
pxor %xmm0, %xmm0
cvtss2sd (%rdi,%rax,4), %xmm0
leaq .LC9(%rip), %rcx
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addl $10, %r15d
movl 24(%rsp), %eax
leal 100(%rax), %ebx
cmpl %ebx, %r15d
je .L41
.L29:
movl %r15d, 20(%rsp)
movslq %r15d, %rax
movq 8(%rsp), %rdx
leaq (%rdx,%rax,4), %r13
movl $0, %ebx
jmp .L31
.L41:
movq %rbp, %rdi
call fclose@PLT
addl $1, 28(%rsp)
movl 28(%rsp), %eax
cmpl $10, %eax
je .L42
movl %ebx, 24(%rsp)
.L32:
leaq 32(%rsp), %rbx
movl 28(%rsp), %r9d
leaq .LC5(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %rbx, %rdi
movl $0, %eax
call __snprintf_chk@PLT
leaq .LC6(%rip), %rsi
movq %rbx, %rdi
call fopen@PLT
movq %rax, %rbp
movl 24(%rsp), %r15d
jmp .L29
.L42:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z12write_resultPf, .-_Z12write_resultPf
.globl _Z9positionsPiS_
.type _Z9positionsPiS_, @function
_Z9positionsPiS_:
.LFB2061:
.cfi_startproc
endbr64
movq %rdi, %r9
movl $0, %ecx
movl $0, %edi
.L46:
leal (%rdi,%rcx), %edx
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %edx, %r8d
sarl $31, %r8d
subl %r8d, %eax
movl %eax, (%r9,%rcx,4)
leal (%rax,%rax,4), %eax
addl %eax, %eax
subl %eax, %edx
movl %edx, (%rsi,%rcx,4)
cmpl $9, %edx
sete %al
movzbl %al, %eax
addl %eax, %edi
addq $1, %rcx
cmpq $55, %rcx
jne .L46
ret
.cfi_endproc
.LFE2061:
.size _Z9positionsPiS_, .-_Z9positionsPiS_
.globl _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
.type _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_, @function
_Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_:
.LFB2087:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L52
.L48:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L53
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z21Compute_all_rotationsPfS_S_PKiS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L48
.L53:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_, .-_Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.type _Z21Compute_all_rotationsPfS_S_PKiS1_, @function
_Z21Compute_all_rotationsPfS_S_PKiS1_:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, .-_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .rodata.str1.8
.align 8
.LC10:
.string "Taille totale de la m\303\251moire globale %ld\n"
.align 8
.LC11:
.string "/home/ubuntu/Datasets/stackv2/train-structured/JbRemy/GPU_programming/master/version1.cu"
.section .rodata.str1.1
.LC12:
.string "Exectudtion time: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq -192512(%rsp), %r11
.cfi_def_cfa 11, 192560
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $1056, %rsp
.cfi_def_cfa_offset 193616
movq %fs:40, %rax
movq %rax, 193560(%rsp)
xorl %eax, %eax
leaq 4(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 4(%rsp)
jle .L57
movl $0, %ebx
leaq .LC10(%rip), %rbp
.L58:
leaq 512(%rsp), %rdi
movl %ebx, %esi
call cudaGetDeviceProperties_v2@PLT
movq 800(%rsp), %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 4(%rsp)
jg .L58
.L57:
leaq 1552(%rsp), %r14
leaq 17552(%rsp), %r12
movq %r14, %rsi
movq %r12, %rdi
call _Z8get_dataPfS_
leaq 105552(%rsp), %rbp
movl $4, %esi
movq %rbp, %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $199, %edx
leaq .LC11(%rip), %rbx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 9552(%rsp), %r13
movl $4, %esi
movq %r13, %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $200, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 13552(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $201, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %r14, %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $203, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $204, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $215, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 16(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $216, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 288(%rsp), %r12
leaq 64(%rsp), %rbp
movq %r12, %rsi
movq %rbp, %rdi
call _Z9positionsPiS_
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $220, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $221, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $227, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $228, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $229, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $237, %edx
leaq .LC11(%rip), %rbx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
movl %eax, %edi
movl $238, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 52(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %eax, %edi
movl $239, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 13552(%rsp), %rbp
leaq 5552(%rsp), %r12
movl $2, %ecx
movl $4, %edx
movq %rbp, %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $244, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq %r12, %rdi
call _Z12write_resultPf
leaq 9552(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $249, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 105552(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $250, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq %rbp, %rdi
call cudaFree@PLT
movl %eax, %edi
movl $251, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 8(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $252, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $253, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $254, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 32(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $255, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 193560(%rsp), %rax
subq %fs:40, %rax
jne .L64
movl $0, %eax
addq $193568, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
leaq 13552(%rsp), %rdx
movq %r13, %rsi
leaq 105552(%rsp), %rdi
movq 16(%rsp), %r8
movq 8(%rsp), %rcx
call _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC13:
.string "_Z21Compute_all_rotationsPfS_S_PKiS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z21Compute_all_rotationsPfS_S_PKiS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
cudaDeviceProp prop ;
int count ;
cudaGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
cudaGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(cudaMalloc((void **)&d_J, size));
testCUDA(cudaMalloc((void **)&d_A, size));
testCUDA(cudaMalloc((void **)&d_out, size));
testCUDA(cudaMemcpy(d_A, &A, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_J, &J, size, cudaMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(cudaMalloc((void **)&d_pos_i, size));
testCUDA(cudaMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(cudaMemcpy(d_pos_i, &pos_j, size, cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(d_pos_j, &pos_i, size, cudaMemcpyHostToDevice));
// Timer definition and start
float TimerV;
cudaEvent_t start, stop;
testCUDA(cudaEventCreate(&start));
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(cudaEventRecord(stop, 0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(cudaMemcpy(&out, d_out, sizeof(float), cudaMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(cudaFree(d_A));
testCUDA(cudaFree(d_J));
testCUDA(cudaFree(d_out));
testCUDA(cudaFree(d_pos_i));
testCUDA(cudaFree(d_pos_j));
testCUDA(cudaFree(start));
testCUDA(cudaFree(stop));
return 0;
} | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(hipError_t error, const char *file, int line) {
if (error != hipSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
hipDeviceProp_t prop ;
int count ;
hipGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
hipGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(hipMalloc((void **)&d_J, size));
testCUDA(hipMalloc((void **)&d_A, size));
testCUDA(hipMalloc((void **)&d_out, size));
testCUDA(hipMemcpy(d_A, &A, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_J, &J, size, hipMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(hipMalloc((void **)&d_pos_i, size));
testCUDA(hipMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(hipMemcpy(d_pos_i, &pos_j, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_pos_j, &pos_i, size, hipMemcpyHostToDevice));
// Timer definition and start
float TimerV;
hipEvent_t start, stop;
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(hipEventRecord(stop, 0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(hipMemcpy(&out, d_out, sizeof(float), hipMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(hipFree(d_A));
testCUDA(hipFree(d_J));
testCUDA(hipFree(d_out));
testCUDA(hipFree(d_pos_i));
testCUDA(hipFree(d_pos_j));
testCUDA(hipFree(start));
testCUDA(hipFree(stop));
return 0;
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(hipError_t error, const char *file, int line) {
if (error != hipSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
hipDeviceProp_t prop ;
int count ;
hipGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
hipGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(hipMalloc((void **)&d_J, size));
testCUDA(hipMalloc((void **)&d_A, size));
testCUDA(hipMalloc((void **)&d_out, size));
testCUDA(hipMemcpy(d_A, &A, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_J, &J, size, hipMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(hipMalloc((void **)&d_pos_i, size));
testCUDA(hipMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(hipMemcpy(d_pos_i, &pos_j, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_pos_j, &pos_i, size, hipMemcpyHostToDevice));
// Timer definition and start
float TimerV;
hipEvent_t start, stop;
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(hipEventRecord(stop, 0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(hipMemcpy(&out, d_out, sizeof(float), hipMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(hipFree(d_A));
testCUDA(hipFree(d_J));
testCUDA(hipFree(d_out));
testCUDA(hipFree(d_pos_i));
testCUDA(hipFree(d_pos_j));
testCUDA(hipFree(start));
testCUDA(hipFree(stop));
return 0;
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21Compute_all_rotationsPfS_S_PKiS1_
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.p2align 8
.type _Z21Compute_all_rotationsPfS_S_PKiS1_,@function
_Z21Compute_all_rotationsPfS_S_PKiS1_:
s_clause 0x2
s_load_b32 s12, s[0:1], 0x34
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x20
s_mul_i32 s0, s15, 0x64
v_and_b32_e32 v5, 3, v0
v_dual_mov_b32 v1, s0 :: v_dual_and_b32 v2, 1, v0
v_lshlrev_b32_e32 v6, 2, v0
v_lshrrev_b32_e32 v3, 2, v0
s_mov_b32 s14, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_eq_u32_e64 s0, 0, v2
v_add_nc_u32_e32 v0, 4, v6
s_delay_alu instid0(VALU_DEP_3)
v_cvt_f32_ubyte0_e32 v7, v3
s_waitcnt lgkmcnt(0)
s_and_b32 s16, s12, 0xffff
v_mov_b32_e32 v2, s11
v_mov_b32_e32 v4, s10
v_cmp_eq_u32_e64 s1, 0, v5
v_cmp_ne_u32_e32 vcc_lo, 0, v5
s_mulk_i32 s16, 0xdc
s_mov_b64 s[12:13], 0
s_or_b32 s17, s16, 3
v_cndmask_b32_e64 v8, s3, v2, s1
v_cndmask_b32_e64 v9, s2, v4, s1
s_or_b32 s18, s16, 2
s_or_b32 s19, s16, 1
s_branch .LBB0_3
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s22
ds_load_b32 v10, v6
ds_load_b32 v11, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_add_co_u32 v3, s1, s8, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s1, s9, v4, s1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v10, v10, v11
global_store_b32 v[3:4], v10, off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s20
s_add_i32 s14, s14, 1
s_add_u32 s12, s12, 4
s_addc_u32 s13, s13, 0
s_cmpk_eq_i32 s12, 0xdc
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_20
.LBB0_3:
s_and_saveexec_b32 s1, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s20, exec_lo, s1
s_cbranch_execz .LBB0_11
v_cmp_lt_i32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s21, s1
s_xor_b32 s1, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1)
s_or_saveexec_b32 s21, s1
v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v11, s17
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s2
v_mov_b32_e32 v12, s12
s_xor_b32 exec_lo, exec_lo, s21
s_cbranch_execz .LBB0_10
v_cmp_ne_u32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s23, s1
s_xor_b32 s1, exec_lo, s23
s_lshl_b32 s22, s14, 2
s_or_saveexec_b32 s1, s1
v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v11, s19
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s2
v_mov_b32_e32 v12, s22
s_xor_b32 exec_lo, exec_lo, s1
s_lshl_b32 s22, s14, 2
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s10
v_dual_mov_b32 v4, s11 :: v_dual_mov_b32 v11, s18
v_mov_b32_e32 v12, s22
s_or_b32 exec_lo, exec_lo, s1
.LBB0_10:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s21
.LBB0_11:
s_and_not1_saveexec_b32 s1, s20
v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v11, s12
v_dual_mov_b32 v3, s10 :: v_dual_mov_b32 v4, s11
v_mov_b32_e32 v10, s15
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s1, v3, s12
v_add_co_ci_u32_e64 v4, s1, s13, v4, s1
global_load_b32 v4, v[3:4], off
v_mul_lo_u32 v3, v10, 0x64
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[13:14], null, v4, 10, v[3:4]
v_cvt_f32_u32_e32 v3, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f32_e32 v4, v7, v3
v_add_nc_u32_e32 v3, v11, v12
v_cvt_i32_f32_e32 v10, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[10:11], 2, v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s1, s4, v3
v_add_co_ci_u32_e64 v4, s1, s5, v4, s1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v10, s1, s6, v10
v_add_co_ci_u32_e64 v11, s1, s7, v11, s1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[10:11], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v4
ds_store_b32 v6, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s20, s0
s_cbranch_execz .LBB0_2
s_mov_b32 s21, 0
s_mov_b32 s22, exec_lo
v_cmpx_lt_i32_e32 1, v5
s_xor_b32 s22, exec_lo, s22
v_cmp_eq_u32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s21, s1, exec_lo
s_and_not1_saveexec_b32 s22, s22
v_cmp_eq_u32_e64 s1, 0, v5
s_and_not1_b32 s21, s21, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s1, s1, exec_lo
s_or_b32 s21, s21, s1
s_or_b32 exec_lo, exec_lo, s22
s_and_saveexec_b32 s22, s21
s_cbranch_execz .LBB0_1
v_add_co_u32 v2, s1, v9, s12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s1, s13, v8, s1
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, v4, 10, v[1:2]
v_cvt_f32_u32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v2, v7, v2
v_cvt_i32_f32_e32 v2, v2
s_branch .LBB0_1
.LBB0_20:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21Compute_all_rotationsPfS_S_PKiS1_
.amdhsa_group_segment_fixed_size 160
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 24
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, .Lfunc_end0-_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 160
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21Compute_all_rotationsPfS_S_PKiS1_
.private_segment_fixed_size: 0
.sgpr_count: 26
.sgpr_spill_count: 0
.symbol: _Z21Compute_all_rotationsPfS_S_PKiS1_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting HIP source code to AMD host assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/device_functions.h>
// Function that catches errors
// Function that catches the error
void testCUDA(hipError_t error, const char *file, int line) {
if (error != hipSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the
// macros __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
// Global variables
#define d 10
#define N 10
#define P 55
#define NUMBER_BLOCKS 512
#define THREADS_PER_BLOCKS 4*d
// Function that performs the product of Jacobi rotations
/* This function will be performed N times by N different blocks */
__global__ void Compute_all_rotations(float *J, float *A, float *out, const int *pos_i, const int *pos_j){
__shared__ float temp[4 * d] ; // variable that is to be shared by the threads in the block
int block_j = blockDim.x * 4 * P ;
int idx_J;
int idx_A;
int idx_out;
for (int p=0 ; p<P ; p++) {
// index = threadIdx.x + blockIdx.x*blockDim.x ;
// Perform multiplications
if (threadIdx.x % 4 == 0 ) {
idx_J = 0 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 1 ) {
idx_J = 1 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2 ) {
idx_J = 2 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 3 ) {
idx_J = 3 + 4 * p + block_j;
idx_A = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
temp[threadIdx.x] = J[idx_J] * A[idx_A] ;
__syncthreads(); // synchronize threads
// Perform additions
if (threadIdx.x % 2 == 0){
if (threadIdx.x % 4 == 0){
idx_out = blockIdx.x * d * d + d * pos_i[p] + floorf(threadIdx.x / 4);
}
else if (threadIdx.x % 4 == 2){
idx_out = blockIdx.x * d * d + d * pos_j[p] + floorf(threadIdx.x / 4);
}
out[idx_out] = temp[threadIdx.x] + temp[threadIdx.x + 1] ;
}
__syncthreads(); // synchronize threads
}
}
// The folowing function reads the data stored in csv files and stores it in arrays
void read_file(const char * fname, float *array, int pos){
FILE *file;
char tampon[sizeof(float)];
int actuel = 0;
char c;
int count;
file = fopen (fname, "r");
while ((c = fgetc(file)) != EOF) {
if (c == ';' || c == '\n') {
array[pos + count] = atof(tampon);
actuel = 0;
memset(tampon, 0, sizeof tampon);
} else {
tampon[actuel++] = c;
}
}
printf("TEST\n");
fclose (file);
}
void get_data(float *J, float *A){
char fname[100] = {0};
for (int n = 0 ; n<N ; n++){
for (int p = 0 ; p<P ; p++){
snprintf (fname, 100, "files/%i/J_%i.txt", n, p);
read_file (fname, J, P*n*4 + 4*p);
}
snprintf (fname, 100, "files/%i/A.txt", n);
read_file (fname, A, n*d*d);
}
}
void write_result(float* out){
FILE *file;
const char* str = "; ";
char fname[100] = {0};
for (int n=0 ; n<N ; n++) {
snprintf (fname, 100, "files/%i/out.txt", n);
file = fopen(fname, "w");
for (int i=0 ; i<d ; i++) {
for (int j=0 ; j<d ; j++) {
if (j == d-1) {
str = "\n";
}
fprintf(file, "%f %s", out[n*d*d + i*d +j], str);
str = "; ";
}
}
fclose(file);
}
}
void positions(int* pos_i, int* pos_j){
int shift = 0;
for(int i=0 ; i<P ; i++){
pos_i[i] = floor((i + shift) / d);
pos_j[i] = (i + shift) % d;
if((i + shift) % d == d-1){
shift++;
}
}
}
int main(){
// Properties of our GPUs
hipDeviceProp_t prop ;
int count ;
hipGetDeviceCount(&count) ;
for(int i=0 ; i<count ; i++) {
hipGetDeviceProperties(&prop, i) ;
printf("Taille totale de la mémoire globale %ld\n", prop.totalGlobalMem) ;
}
// Define J A and out
float J [P*4*d*N];
float A [d*d*N];
float out [d*d*N];
get_data(J, A);
// device copies
float d_J [P*4*d*N];
float d_A [d*d*N];
float d_out [d*d*N];
int size = sizeof(float);
testCUDA(hipMalloc((void **)&d_J, size));
testCUDA(hipMalloc((void **)&d_A, size));
testCUDA(hipMalloc((void **)&d_out, size));
testCUDA(hipMemcpy(d_A, &A, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_J, &J, size, hipMemcpyHostToDevice));
// Define pos_i et pos_j
int pos_i [P];
int pos_j [P];
int *d_pos_i, *d_pos_j;
size = sizeof(int);
testCUDA(hipMalloc((void **)&d_pos_i, size));
testCUDA(hipMalloc((void **)&d_pos_j, size));
positions(pos_i, pos_j);
testCUDA(hipMemcpy(d_pos_i, &pos_j, size, hipMemcpyHostToDevice));
testCUDA(hipMemcpy(d_pos_j, &pos_i, size, hipMemcpyHostToDevice));
// Timer definition and start
float TimerV;
hipEvent_t start, stop;
testCUDA(hipEventCreate(&start));
testCUDA(hipEventCreate(&stop));
testCUDA(hipEventRecord(start, 0));
// Computing rotations
Compute_all_rotations<<<1,1>>>(d_J, d_A, d_out, d_pos_i, d_pos_j);
// Stopping timer
testCUDA(hipEventRecord(stop, 0));
testCUDA(hipEventSynchronize(stop));
testCUDA(hipEventElapsedTime(&TimerV, start, stop));
printf("Exectudtion time: %f ms\n", TimerV);
// Copying and saving result
testCUDA(hipMemcpy(&out, d_out, sizeof(float), hipMemcpyDeviceToHost));
write_result(out);
// Cleanup
testCUDA(hipFree(d_A));
testCUDA(hipFree(d_J));
testCUDA(hipFree(d_out));
testCUDA(hipFree(d_pos_i));
testCUDA(hipFree(d_pos_j));
testCUDA(hipFree(start));
testCUDA(hipFree(stop));
return 0;
} | .text
.file "version1.hip"
.globl _Z8testCUDA10hipError_tPKci # -- Begin function _Z8testCUDA10hipError_tPKci
.p2align 4, 0x90
.type _Z8testCUDA10hipError_tPKci,@function
_Z8testCUDA10hipError_tPKci: # @_Z8testCUDA10hipError_tPKci
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z8testCUDA10hipError_tPKci, .Lfunc_end0-_Z8testCUDA10hipError_tPKci
.cfi_endproc
# -- End function
.globl _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_ # -- Begin function _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.p2align 4, 0x90
.type _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_,@function
_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_: # @_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end1:
.size _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_, .Lfunc_end1-_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.cfi_endproc
# -- End function
.globl _Z9read_filePKcPfi # -- Begin function _Z9read_filePKcPfi
.p2align 4, 0x90
.type _Z9read_filePKcPfi,@function
_Z9read_filePKcPfi: # @_Z9read_filePKcPfi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movl $.L.str.1, %esi
callq fopen
movq %rax, %r14
xorl %r12d, %r12d
leaq 4(%rsp), %r15
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
xorl %r12d, %r12d
movq %r15, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx)
movl $0, 4(%rsp)
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
callq fgetc
cmpb $10, %al
je .LBB2_6
# %bb.2: # in Loop: Header=BB2_1 Depth=1
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB2_6
# %bb.3: # in Loop: Header=BB2_1 Depth=1
cmpl $255, %ecx
je .LBB2_5
# %bb.4: # in Loop: Header=BB2_1 Depth=1
movslq %r12d, %rcx
incl %r12d
movb %al, 4(%rsp,%rcx)
jmp .LBB2_1
.LBB2_5:
movl $.Lstr, %edi
callq puts@PLT
movq %r14, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp fclose # TAILCALL
.Lfunc_end2:
.size _Z9read_filePKcPfi, .Lfunc_end2-_Z9read_filePKcPfi
.cfi_endproc
# -- End function
.globl _Z8get_dataPfS_ # -- Begin function _Z8get_dataPfS_
.p2align 4, 0x90
.type _Z8get_dataPfS_,@function
_Z8get_dataPfS_: # @_Z8get_dataPfS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq %rdi, %r14
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movaps %xmm0, 64(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movl $0, 112(%rsp)
movl $0, 8(%rsp) # 4-byte Folded Spill
leaq 12(%rsp), %r12
jmp .LBB3_1
.p2align 4, 0x90
.LBB3_13: # %_Z9read_filePKcPfi.exit
# in Loop: Header=BB3_1 Depth=1
movl $.Lstr, %edi
callq puts@PLT
movq %r13, %rdi
callq fclose
movl 8(%rsp), %eax # 4-byte Reload
incl %eax
movl %eax, 8(%rsp) # 4-byte Spill
cmpl $10, %eax
je .LBB3_14
.LBB3_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
# Child Loop BB3_3 Depth 3
# Child Loop BB3_9 Depth 2
xorl %r13d, %r13d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %_Z9read_filePKcPfi.exit16
# in Loop: Header=BB3_2 Depth=2
movl $.Lstr, %edi
callq puts@PLT
movq %r15, %rdi
callq fclose
incl %r13d
cmpl $55, %r13d
je .LBB3_8
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_3 Depth 3
xorl %ebp, %ebp
movl $100, %esi
movl $.L.str.3, %edx
leaq 16(%rsp), %r15
movq %r15, %rdi
movl 8(%rsp), %ecx # 4-byte Reload
movl %r13d, %r8d
xorl %eax, %eax
callq snprintf
movl $.L.str.1, %esi
movq %r15, %rdi
callq fopen
movq %rax, %r15
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_15: # in Loop: Header=BB3_3 Depth=3
xorl %ebp, %ebp
movq %r12, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r14)
movl $0, 12(%rsp)
.LBB3_3: # Parent Loop BB3_1 Depth=1
# Parent Loop BB3_2 Depth=2
# => This Inner Loop Header: Depth=3
movq %r15, %rdi
callq fgetc
cmpb $10, %al
je .LBB3_15
# %bb.4: # in Loop: Header=BB3_3 Depth=3
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB3_15
# %bb.5: # in Loop: Header=BB3_3 Depth=3
cmpl $255, %ecx
je .LBB3_7
# %bb.6: # in Loop: Header=BB3_3 Depth=3
movslq %ebp, %rcx
incl %ebp
movb %al, 12(%rsp,%rcx)
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_8: # in Loop: Header=BB3_1 Depth=1
xorl %r15d, %r15d
movl $100, %esi
movl $.L.str.4, %edx
leaq 16(%rsp), %r13
movq %r13, %rdi
movl 8(%rsp), %ecx # 4-byte Reload
xorl %eax, %eax
callq snprintf
movl $.L.str.1, %esi
movq %r13, %rdi
callq fopen
movq %rax, %r13
jmp .LBB3_9
.p2align 4, 0x90
.LBB3_16: # in Loop: Header=BB3_9 Depth=2
xorl %r15d, %r15d
movq %r12, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx)
movl $0, 12(%rsp)
.LBB3_9: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
movq %r13, %rdi
callq fgetc
cmpb $10, %al
je .LBB3_16
# %bb.10: # in Loop: Header=BB3_9 Depth=2
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB3_16
# %bb.11: # in Loop: Header=BB3_9 Depth=2
cmpl $255, %ecx
je .LBB3_13
# %bb.12: # in Loop: Header=BB3_9 Depth=2
movslq %r15d, %rcx
incl %r15d
movb %al, 12(%rsp,%rcx)
jmp .LBB3_9
.LBB3_14:
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z8get_dataPfS_, .Lfunc_end3-_Z8get_dataPfS_
.cfi_endproc
# -- End function
.globl _Z12write_resultPf # -- Begin function _Z12write_resultPf
.p2align 4, 0x90
.type _Z12write_resultPf,@function
_Z12write_resultPf: # @_Z12write_resultPf
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movaps %xmm0, 64(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movl $0, 112(%rsp)
movl $.L.str.8, %r13d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_1: # =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
# Child Loop BB4_3 Depth 3
movl $100, %esi
movl $.L.str.6, %edx
leaq 16(%rsp), %r14
movq %r14, %rdi
movl %r15d, %ecx
xorl %eax, %eax
callq snprintf
movl $.L.str.7, %esi
movq %r14, %rdi
callq fopen
movq %rax, %r12
movq %rbx, 8(%rsp) # 8-byte Spill
movq %rbx, %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_2: # %.preheader
# Parent Loop BB4_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_3 Depth 3
movq $-10, %rax
movq $-10, %rbx
.p2align 4, 0x90
.LBB4_3: # Parent Loop BB4_1 Depth=1
# Parent Loop BB4_2 Depth=2
# => This Inner Loop Header: Depth=3
incq %rbx
movl $.L.str.5, %edx
cmoveq %r13, %rdx
movss 40(%rbp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %esi
movq %r12, %rdi
movb $1, %al
callq fprintf
movq %rbx, %rax
testq %rbx, %rbx
jne .LBB4_3
# %bb.4: # in Loop: Header=BB4_2 Depth=2
incq %r14
addq $40, %rbp
cmpq $10, %r14
jne .LBB4_2
# %bb.5: # in Loop: Header=BB4_1 Depth=1
movq %r12, %rdi
callq fclose
incq %r15
movq 8(%rsp), %rbx # 8-byte Reload
addq $400, %rbx # imm = 0x190
cmpq $10, %r15
jne .LBB4_1
# %bb.6:
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z12write_resultPf, .Lfunc_end4-_Z12write_resultPf
.cfi_endproc
# -- End function
.globl _Z9positionsPiS_ # -- Begin function _Z9positionsPiS_
.p2align 4, 0x90
.type _Z9positionsPiS_,@function
_Z9positionsPiS_: # @_Z9positionsPiS_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
movl $3435973837, %ecx # imm = 0xCCCCCCCD
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_1: # =>This Inner Loop Header: Depth=1
leal (%rdx,%rax), %r8d
imulq %rcx, %r8
shrq $35, %r8
movl %r8d, (%rdi,%rax,4)
addl %r8d, %r8d
leal (%r8,%r8,4), %r8d
movl %edx, %r9d
subl %r8d, %r9d
addl %eax, %r9d
subl %edx, %r8d
addl $9, %r8d
movl %r9d, (%rsi,%rax,4)
xorl %r9d, %r9d
cmpl %eax, %r8d
sete %r9b
addl %edx, %r9d
incq %rax
movl %r9d, %edx
cmpq $55, %rax
jne .LBB5_1
# %bb.2:
retq
.Lfunc_end5:
.size _Z9positionsPiS_, .Lfunc_end5-_Z9positionsPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $194056, %rsp # imm = 0x2F608
.cfi_def_cfa_offset 194096
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB6_3
# %bb.1: # %.lr.ph
leaq 4576(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movq 4864(%rsp), %rsi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB6_2
.LBB6_3: # %._crit_edge
leaq 106048(%rsp), %rdi
leaq 14048(%rsp), %rsi
callq _Z8get_dataPfS_
leaq 18048(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_4
# %bb.6: # %_Z8testCUDA10hipError_tPKci.exit
leaq 10048(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_7
# %bb.8: # %_Z8testCUDA10hipError_tPKci.exit15
leaq 6048(%rsp), %rbx
movl $4, %esi
movq %rbx, %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB6_9
# %bb.10: # %_Z8testCUDA10hipError_tPKci.exit17
leaq 10048(%rsp), %r14
leaq 14048(%rsp), %rsi
movl $4, %edx
movq %r14, %rdi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_11
# %bb.12: # %_Z8testCUDA10hipError_tPKci.exit19
leaq 18048(%rsp), %r15
leaq 106048(%rsp), %rsi
movl $4, %edx
movq %r15, %rdi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_13
# %bb.14: # %_Z8testCUDA10hipError_tPKci.exit21
leaq 32(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_15
# %bb.16: # %_Z8testCUDA10hipError_tPKci.exit23
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_54
# %bb.17: # %_Z8testCUDA10hipError_tPKci.exit25.preheader
xorl %eax, %eax
movl $3435973837, %ecx # imm = 0xCCCCCCCD
xorl %edx, %edx
.p2align 4, 0x90
.LBB6_18: # %_Z8testCUDA10hipError_tPKci.exit25
# =>This Inner Loop Header: Depth=1
leal (%rdx,%rax), %esi
imulq %rcx, %rsi
shrq $35, %rsi
movl %esi, 352(%rsp,%rax,4)
addl %esi, %esi
leal (%rsi,%rsi,4), %esi
movl %edx, %edi
subl %esi, %edi
addl %eax, %edi
movl %edi, 128(%rsp,%rax,4)
xorl %esi, %esi
cmpl $9, %edi
sete %sil
addl %edx, %esi
incq %rax
movl %esi, %edx
cmpq $55, %rax
jne .LBB6_18
# %bb.19: # %_Z9positionsPiS_.exit
movq 32(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_20
# %bb.21: # %_Z8testCUDA10hipError_tPKci.exit27
movq 24(%rsp), %rdi
leaq 352(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_22
# %bb.23: # %_Z8testCUDA10hipError_tPKci.exit29
leaq 16(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
jne .LBB6_24
# %bb.25: # %_Z8testCUDA10hipError_tPKci.exit31
leaq 8(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
jne .LBB6_26
# %bb.27: # %_Z8testCUDA10hipError_tPKci.exit33
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
testl %eax, %eax
jne .LBB6_28
# %bb.29: # %_Z8testCUDA10hipError_tPKci.exit35
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_31
# %bb.30:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq %r15, 120(%rsp)
movq %r14, 112(%rsp)
movq %rbx, 104(%rsp)
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
leaq 120(%rsp), %rax
movq %rax, 576(%rsp)
leaq 112(%rsp), %rax
movq %rax, 584(%rsp)
leaq 104(%rsp), %rax
movq %rax, 592(%rsp)
leaq 96(%rsp), %rax
movq %rax, 600(%rsp)
leaq 88(%rsp), %rax
movq %rax, 608(%rsp)
leaq 40(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 576(%rsp), %r9
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_31:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
testl %eax, %eax
jne .LBB6_32
# %bb.33: # %_Z8testCUDA10hipError_tPKci.exit37
movq 8(%rsp), %rdi
callq hipEventSynchronize
testl %eax, %eax
jne .LBB6_34
# %bb.35: # %_Z8testCUDA10hipError_tPKci.exit39
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 40(%rsp), %rdi
callq hipEventElapsedTime
testl %eax, %eax
jne .LBB6_36
# %bb.37: # %_Z8testCUDA10hipError_tPKci.exit41
movss 40(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.12, %edi
movb $1, %al
callq printf
leaq 576(%rsp), %rdi
leaq 6048(%rsp), %rsi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_38
# %bb.39: # %_Z8testCUDA10hipError_tPKci.exit43
leaq 576(%rsp), %rdi
callq _Z12write_resultPf
leaq 10048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_40
# %bb.41: # %_Z8testCUDA10hipError_tPKci.exit45
leaq 18048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_42
# %bb.43: # %_Z8testCUDA10hipError_tPKci.exit47
leaq 6048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_44
# %bb.45: # %_Z8testCUDA10hipError_tPKci.exit49
movq 32(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_46
# %bb.47: # %_Z8testCUDA10hipError_tPKci.exit51
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_48
# %bb.49: # %_Z8testCUDA10hipError_tPKci.exit53
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_50
# %bb.51: # %_Z8testCUDA10hipError_tPKci.exit55
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_52
# %bb.53: # %_Z8testCUDA10hipError_tPKci.exit57
xorl %eax, %eax
addq $194056, %rsp # imm = 0x2F608
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_4:
.cfi_def_cfa_offset 194096
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $201, %edx
jmp .LBB6_5
.LBB6_7:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $202, %edx
jmp .LBB6_5
.LBB6_9:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $203, %edx
jmp .LBB6_5
.LBB6_11:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $205, %edx
jmp .LBB6_5
.LBB6_13:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $206, %edx
jmp .LBB6_5
.LBB6_15:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $217, %edx
jmp .LBB6_5
.LBB6_54:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $218, %edx
jmp .LBB6_5
.LBB6_20:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $222, %edx
jmp .LBB6_5
.LBB6_22:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $223, %edx
jmp .LBB6_5
.LBB6_24:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $229, %edx
jmp .LBB6_5
.LBB6_26:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $230, %edx
jmp .LBB6_5
.LBB6_28:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $231, %edx
jmp .LBB6_5
.LBB6_32:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $239, %edx
jmp .LBB6_5
.LBB6_34:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $240, %edx
jmp .LBB6_5
.LBB6_36:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $241, %edx
jmp .LBB6_5
.LBB6_38:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $246, %edx
jmp .LBB6_5
.LBB6_40:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $251, %edx
jmp .LBB6_5
.LBB6_42:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $252, %edx
jmp .LBB6_5
.LBB6_44:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $253, %edx
jmp .LBB6_5
.LBB6_46:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $254, %edx
jmp .LBB6_5
.LBB6_48:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $255, %edx
jmp .LBB6_5
.LBB6_50:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $256, %edx # imm = 0x100
jmp .LBB6_5
.LBB6_52:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $257, %edx # imm = 0x101
.LBB6_5:
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "There is an error in file %s at line %d\n"
.size .L.str, 41
.type _Z21Compute_all_rotationsPfS_S_PKiS1_,@object # @_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .rodata,"a",@progbits
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.p2align 3, 0x0
_Z21Compute_all_rotationsPfS_S_PKiS1_:
.quad _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "files/%i/J_%i.txt"
.size .L.str.3, 18
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "files/%i/A.txt"
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "
.size .L.str.5, 3
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "files/%i/out.txt"
.size .L.str.6, 17
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "w"
.size .L.str.7, 2
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\n"
.size .L.str.8, 2
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%f %s"
.size .L.str.9, 6
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Taille totale de la m\303\251moire globale %ld\n"
.size .L.str.10, 42
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/JbRemy/GPU_programming/master/version1.hip"
.size .L.str.11, 100
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Exectudtion time: %f ms\n"
.size .L.str.12, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z21Compute_all_rotationsPfS_S_PKiS1_"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST"
.size .Lstr, 5
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21Compute_all_rotationsPfS_S_PKiS1_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting from CUDA device assembly to AMD device assembly. | code for sm_80
Function : _Z21Compute_all_rotationsPfS_S_PKiS1_
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ MOV R1, c[0x0][0x28] ; /* 0x00000a0000017a02 */
/* 0x000fe40000000f00 */
/*0010*/ S2R R0, SR_TID.X ; /* 0x0000000000007919 */
/* 0x000e220000002100 */
/*0020*/ ULDC.64 UR4, c[0x0][0x178] ; /* 0x00005e0000047ab9 */
/* 0x000fe20000000a00 */
/*0030*/ HFMA2.MMA R9, -RZ, RZ, 0, 0 ; /* 0x00000000ff097435 */
/* 0x000fe200000001ff */
/*0040*/ UIADD3 UR4, UP0, UR4, 0x4, URZ ; /* 0x0000000404047890 */
/* 0x000fe2000ff1e03f */
/*0050*/ S2R R7, SR_CTAID.X ; /* 0x0000000000077919 */
/* 0x000e620000002500 */
/*0060*/ MOV R8, 0x5 ; /* 0x0000000500087802 */
/* 0x000fe20000000f00 */
/*0070*/ IMAD.MOV.U32 R4, RZ, RZ, c[0x0][0x180] ; /* 0x00006000ff047624 */
/* 0x000fe200078e00ff */
/*0080*/ UIADD3.X UR5, URZ, UR5, URZ, UP0, !UPT ; /* 0x000000053f057290 */
/* 0x000fe200087fe43f */
/*0090*/ MOV R5, c[0x0][0x184] ; /* 0x0000610000057a02 */
/* 0x000fca0000000f00 */
/*00a0*/ IMAD.U32 R3, RZ, RZ, UR5 ; /* 0x00000005ff037e24 */
/* 0x000fe2000f8e00ff */
/*00b0*/ SHF.R.U32.HI R2, RZ, 0x2, R0 ; /* 0x00000002ff027819 */
/* 0x001fe40000011600 */
/*00c0*/ LOP3.LUT R6, R0, 0x3, RZ, 0xc0, !PT ; /* 0x0000000300067812 */
/* 0x000fe200078ec0ff */
/*00d0*/ IMAD R7, R7, 0x64, RZ ; /* 0x0000006407077824 */
/* 0x002fc600078e02ff */
/*00e0*/ I2F.U32 R2, R2 ; /* 0x0000000200027306 */
/* 0x000e220000201000 */
/*00f0*/ ISETP.NE.AND P0, PT, R6, RZ, PT ; /* 0x000000ff0600720c */
/* 0x000fce0003f05270 */
/*0100*/ FRND.FLOOR R10, R2 ; /* 0x00000002000a7307 */
/* 0x0010640000205000 */
/*0110*/ MOV R2, UR4 ; /* 0x0000000400027c02 */
/* 0x001fe20008000f00 */
/*0120*/ ULDC.64 UR4, c[0x0][0x118] ; /* 0x0000460000047ab9 */
/* 0x000fca0000000a00 */
/*0130*/ BSSY B0, 0x270 ; /* 0x0000013000007945 */
/* 0x000fe20003800000 */
/*0140*/ @!P0 BRA 0x230 ; /* 0x000000e000008947 */
/* 0x000fea0003800000 */
/*0150*/ ISETP.NE.AND P1, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f25270 */
/*0160*/ @!P1 BRA 0x1f0 ; /* 0x0000008000009947 */
/* 0x000fea0003800000 */
/*0170*/ ISETP.NE.AND P1, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f25270 */
/*0180*/ @!P1 MOV R12, 0x4 ; /* 0x00000004000c9802 */
/* 0x000fe20000000f00 */
/*0190*/ @P1 IMAD.MOV.U32 R15, RZ, RZ, R5 ; /* 0x000000ffff0f1224 */
/* 0x000fe200078e0005 */
/*01a0*/ @P1 MOV R14, R4 ; /* 0x00000004000e1202 */
/* 0x000fe40000000f00 */
/*01b0*/ @P1 IADD3 R16, R8.reuse, -0x2, RZ ; /* 0xfffffffe08101810 */
/* 0x040fe20007ffe0ff */
/*01c0*/ @!P1 IMAD.WIDE R14, R9, R12, c[0x0][0x178] ; /* 0x00005e00090e9625 */
/* 0x000fe200078e020c */
/*01d0*/ @!P1 IADD3 R16, R8, -0x3, RZ ; /* 0xfffffffd08109810 */
/* 0x000fe20007ffe0ff */
/*01e0*/ BRA 0x260 ; /* 0x0000007000007947 */
/* 0x000fea0003800000 */
/*01f0*/ HFMA2.MMA R14, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0e7435 */
/* 0x000fe200000001ff */
/*0200*/ IADD3 R16, R8, -0x4, RZ ; /* 0xfffffffc08107810 */
/* 0x000fd20007ffe0ff */
/*0210*/ IMAD.WIDE R14, R9, R14, c[0x0][0x180] ; /* 0x00006000090e7625 */
/* 0x000fe200078e020e */
/*0220*/ BRA 0x260 ; /* 0x0000003000007947 */
/* 0x000fea0003800000 */
/*0230*/ MOV R14, 0x4 ; /* 0x00000004000e7802 */
/* 0x000fe40000000f00 */
/*0240*/ IADD3 R16, R8, -0x5, RZ ; /* 0xfffffffb08107810 */
/* 0x000fc60007ffe0ff */
/*0250*/ IMAD.WIDE R14, R9, R14, c[0x0][0x178] ; /* 0x00005e00090e7625 */
/* 0x000fc800078e020e */
/*0260*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0270*/ LDG.E R12, [R14.64] ; /* 0x000000040e0c7981 */
/* 0x000ea4000c1e1900 */
/*0280*/ IMAD R20, R12, 0xa, R7 ; /* 0x0000000a0c147824 */
/* 0x004fe200078e0207 */
/*0290*/ HFMA2.MMA R12, -RZ, RZ, 0, 2.384185791015625e-07 ; /* 0x00000004ff0c7435 */
/* 0x000fc600000001ff */
/*02a0*/ I2F.U32 R13, R20 ; /* 0x00000014000d7306 */
/* 0x000e240000201000 */
/*02b0*/ FADD R21, R10, R13 ; /* 0x0000000d0a157221 */
/* 0x003fe40000000000 */
/*02c0*/ IMAD.MOV.U32 R13, RZ, RZ, c[0x0][0x0] ; /* 0x00000000ff0d7624 */
/* 0x000fc800078e00ff */
/*02d0*/ F2I.TRUNC.NTZ R19, R21 ; /* 0x0000001500137305 */
/* 0x000e22000020f100 */
/*02e0*/ IMAD R17, R13, 0xdc, R16 ; /* 0x000000dc0d117824 */
/* 0x000fc800078e0210 */
/*02f0*/ IMAD.WIDE R16, R17, R12, c[0x0][0x160] ; /* 0x0000580011107625 */
/* 0x000fcc00078e020c */
/*0300*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*0310*/ IMAD.WIDE R18, R19, R12, c[0x0][0x168] ; /* 0x00005a0013127625 */
/* 0x001fcc00078e020c */
/*0320*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea2000c1e1900 */
/*0330*/ LOP3.LUT P1, RZ, R0, 0x1, RZ, 0xc0, !PT ; /* 0x0000000100ff7812 */
/* 0x000fe2000782c0ff */
/*0340*/ BSSY B0, 0x4f0 ; /* 0x000001a000007945 */
/* 0x000fe20003800000 */
/*0350*/ FMUL R15, R18, R17 ; /* 0x00000011120f7220 */
/* 0x004fca0000400000 */
/*0360*/ STS [R0.X4], R15 ; /* 0x0000000f00007388 */
/* 0x0001e80000004800 */
/*0370*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0380*/ @P1 BRA 0x4e0 ; /* 0x0000015000001947 */
/* 0x000fea0003800000 */
/*0390*/ BSSY B1, 0x490 ; /* 0x000000f000017945 */
/* 0x001fe20003800000 */
/*03a0*/ @!P0 BRA 0x430 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*03b0*/ ISETP.NE.AND P2, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f45270 */
/*03c0*/ @P2 BRA 0x480 ; /* 0x000000b000002947 */
/* 0x000fea0003800000 */
/*03d0*/ LDG.E R14, [R4.64] ; /* 0x00000004040e7981 */
/* 0x000ea4000c1e1900 */
/*03e0*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*03f0*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0400*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0410*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e22000020f100 */
/*0420*/ BRA 0x480 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*0430*/ LDG.E R14, [R2.64+-0x4] ; /* 0xfffffc04020e7981 */
/* 0x000ea4000c1e1900 */
/*0440*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*0450*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0460*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0470*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e24000020f100 */
/*0480*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0490*/ LDS R16, [R0.X4+0x4] ; /* 0x0000040000107984 */
/* 0x000fe20000004800 */
/*04a0*/ IMAD.WIDE R14, R11, R12, c[0x0][0x170] ; /* 0x00005c000b0e7625 */
/* 0x001fc600078e020c */
/*04b0*/ LDS R17, [R0.X4] ; /* 0x0000000000117984 */
/* 0x000e240000004800 */
/*04c0*/ FADD R17, R16, R17 ; /* 0x0000001110117221 */
/* 0x001fca0000000000 */
/*04d0*/ STG.E [R14.64], R17 ; /* 0x000000110e007986 */
/* 0x0001e4000c101904 */
/*04e0*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*04f0*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0500*/ ISETP.NE.AND P2, PT, R8, 0xdd, PT ; /* 0x000000dd0800780c */
/* 0x000fda0003f45270 */
/*0510*/ @!P2 EXIT ; /* 0x000000000000a94d */
/* 0x000fea0003800000 */
/*0520*/ IADD3 R17, R9, 0x1, RZ ; /* 0x0000000109117810 */
/* 0x000fe20007ffe0ff */
/*0530*/ BSSY B0, 0x630 ; /* 0x000000f000007945 */
/* 0x000fe20003800000 */
/*0540*/ MOV R14, R2 ; /* 0x00000002000e7202 */
/* 0x000fe40000000f00 */
/*0550*/ MOV R15, R3 ; /* 0x00000003000f7202 */
/* 0x000fe20000000f00 */
/*0560*/ IMAD.SHL.U32 R16, R17, 0x4, RZ ; /* 0x0000000411107824 */
/* 0x000fe200078e00ff */
/*0570*/ @!P0 BRA 0x620 ; /* 0x000000a000008947 */
/* 0x000fea0003800000 */
/*0580*/ ISETP.NE.AND P2, PT, R6, 0x1, PT ; /* 0x000000010600780c */
/* 0x000fda0003f45270 */
/*0590*/ @!P2 BRA 0x600 ; /* 0x000000600000a947 */
/* 0x000fea0003800000 */
/*05a0*/ ISETP.NE.AND P2, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f45270 */
/*05b0*/ @P2 IADD3 R16, R16, 0x3, RZ ; /* 0x0000000310102810 */
/* 0x000fe20007ffe0ff */
/*05c0*/ @P2 IMAD.WIDE R14, R17, R12, c[0x0][0x180] ; /* 0x00006000110e2625 */
/* 0x000fc600078e020c */
/*05d0*/ @!P2 IADD3 R16, R16, 0x2, RZ ; /* 0x000000021010a810 */
/* 0x000fe20007ffe0ff */
/*05e0*/ @!P2 IMAD.WIDE R14, R17, R12, c[0x0][0x178] ; /* 0x00005e00110ea625 */
/* 0x000fe200078e020c */
/*05f0*/ BRA 0x620 ; /* 0x0000002000007947 */
/* 0x000fea0003800000 */
/*0600*/ IMAD.WIDE R14, R17, R12, c[0x0][0x180] ; /* 0x00006000110e7625 */
/* 0x000fe200078e020c */
/*0610*/ MOV R16, R8 ; /* 0x0000000800107202 */
/* 0x000fc60000000f00 */
/*0620*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x000fea0003800000 */
/*0630*/ LDG.E R14, [R14.64] ; /* 0x000000040e0e7981 */
/* 0x000ea4000c1e1900 */
/*0640*/ IMAD R20, R14, 0xa, R7 ; /* 0x0000000a0e147824 */
/* 0x004fc800078e0207 */
/*0650*/ I2F.U32 R17, R20 ; /* 0x0000001400117306 */
/* 0x000e240000201000 */
/*0660*/ FADD R21, R10, R17 ; /* 0x000000110a157221 */
/* 0x001fe40000000000 */
/*0670*/ IMAD R17, R13, 0xdc, R16 ; /* 0x000000dc0d117824 */
/* 0x000fc800078e0210 */
/*0680*/ F2I.TRUNC.NTZ R19, R21 ; /* 0x0000001500137305 */
/* 0x000e22000020f100 */
/*0690*/ IMAD.WIDE R16, R17, R12, c[0x0][0x160] ; /* 0x0000580011107625 */
/* 0x000fcc00078e020c */
/*06a0*/ LDG.E R17, [R16.64] ; /* 0x0000000410117981 */
/* 0x000ea2000c1e1900 */
/*06b0*/ IMAD.WIDE R18, R19, R12, c[0x0][0x168] ; /* 0x00005a0013127625 */
/* 0x001fcc00078e020c */
/*06c0*/ LDG.E R18, [R18.64] ; /* 0x0000000412127981 */
/* 0x000ea2000c1e1900 */
/*06d0*/ BSSY B0, 0x880 ; /* 0x000001a000007945 */
/* 0x000fe20003800000 */
/*06e0*/ FMUL R13, R18, R17 ; /* 0x00000011120d7220 */
/* 0x004fca0000400000 */
/*06f0*/ STS [R0.X4], R13 ; /* 0x0000000d00007388 */
/* 0x0001e80000004800 */
/*0700*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fec0000010000 */
/*0710*/ @P1 BRA 0x870 ; /* 0x0000015000001947 */
/* 0x000fea0003800000 */
/*0720*/ BSSY B1, 0x820 ; /* 0x000000f000017945 */
/* 0x001fe20003800000 */
/*0730*/ @!P0 BRA 0x7c0 ; /* 0x0000008000008947 */
/* 0x000fea0003800000 */
/*0740*/ ISETP.NE.AND P1, PT, R6, 0x2, PT ; /* 0x000000020600780c */
/* 0x000fda0003f25270 */
/*0750*/ @P1 BRA 0x810 ; /* 0x000000b000001947 */
/* 0x000fea0003800000 */
/*0760*/ LDG.E R14, [R4.64+0x4] ; /* 0x00000404040e7981 */
/* 0x000ea4000c1e1900 */
/*0770*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*0780*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*0790*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*07a0*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e22000020f100 */
/*07b0*/ BRA 0x810 ; /* 0x0000005000007947 */
/* 0x000fea0003800000 */
/*07c0*/ LDG.E R14, [R2.64] ; /* 0x00000004020e7981 */
/* 0x000ea4000c1e1900 */
/*07d0*/ IMAD R14, R14, 0xa, R7 ; /* 0x0000000a0e0e7824 */
/* 0x004fc800078e0207 */
/*07e0*/ I2F.U32 R11, R14 ; /* 0x0000000e000b7306 */
/* 0x000e240000201000 */
/*07f0*/ FADD R11, R10, R11 ; /* 0x0000000b0a0b7221 */
/* 0x001fcc0000000000 */
/*0800*/ F2I.TRUNC.NTZ R11, R11 ; /* 0x0000000b000b7305 */
/* 0x000e24000020f100 */
/*0810*/ BSYNC B1 ; /* 0x0000000000017941 */
/* 0x000fea0003800000 */
/*0820*/ LDS R14, [R0.X4+0x4] ; /* 0x00000400000e7984 */
/* 0x000fe20000004800 */
/*0830*/ IMAD.WIDE R12, R11, R12, c[0x0][0x170] ; /* 0x00005c000b0c7625 */
/* 0x001fc600078e020c */
/*0840*/ LDS R15, [R0.X4] ; /* 0x00000000000f7984 */
/* 0x000e240000004800 */
/*0850*/ FADD R15, R14, R15 ; /* 0x0000000f0e0f7221 */
/* 0x001fca0000000000 */
/*0860*/ STG.E [R12.64], R15 ; /* 0x0000000f0c007986 */
/* 0x0001e4000c101904 */
/*0870*/ BSYNC B0 ; /* 0x0000000000007941 */
/* 0x001fea0003800000 */
/*0880*/ BAR.SYNC.DEFER_BLOCKING 0x0 ; /* 0x0000000000007b1d */
/* 0x000fe20000010000 */
/*0890*/ IADD3 R2, P1, R2, 0x8, RZ ; /* 0x0000000802027810 */
/* 0x000fe40007f3e0ff */
/*08a0*/ IADD3 R4, P2, R4, 0x8, RZ ; /* 0x0000000804047810 */
/* 0x000fe40007f5e0ff */
/*08b0*/ IADD3.X R3, RZ, R3, RZ, P1, !PT ; /* 0x00000003ff037210 */
/* 0x000fe40000ffe4ff */
/*08c0*/ IADD3.X R5, RZ, R5, RZ, P2, !PT ; /* 0x00000005ff057210 */
/* 0x000fe400017fe4ff */
/*08d0*/ IADD3 R9, R9, 0x2, RZ ; /* 0x0000000209097810 */
/* 0x000fe40007ffe0ff */
/*08e0*/ IADD3 R8, R8, 0x8, RZ ; /* 0x0000000808087810 */
/* 0x000fe20007ffe0ff */
/*08f0*/ BRA 0x130 ; /* 0xfffff83000007947 */
/* 0x000fea000383ffff */
/*0900*/ BRA 0x900; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*0910*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0920*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0930*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0940*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0950*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0960*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0970*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0980*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0990*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09a0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09b0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09c0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*09f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z21Compute_all_rotationsPfS_S_PKiS1_
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.p2align 8
.type _Z21Compute_all_rotationsPfS_S_PKiS1_,@function
_Z21Compute_all_rotationsPfS_S_PKiS1_:
s_clause 0x2
s_load_b32 s12, s[0:1], 0x34
s_load_b256 s[4:11], s[0:1], 0x0
s_load_b64 s[2:3], s[0:1], 0x20
s_mul_i32 s0, s15, 0x64
v_and_b32_e32 v5, 3, v0
v_dual_mov_b32 v1, s0 :: v_dual_and_b32 v2, 1, v0
v_lshlrev_b32_e32 v6, 2, v0
v_lshrrev_b32_e32 v3, 2, v0
s_mov_b32 s14, 0
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
v_cmp_eq_u32_e64 s0, 0, v2
v_add_nc_u32_e32 v0, 4, v6
s_delay_alu instid0(VALU_DEP_3)
v_cvt_f32_ubyte0_e32 v7, v3
s_waitcnt lgkmcnt(0)
s_and_b32 s16, s12, 0xffff
v_mov_b32_e32 v2, s11
v_mov_b32_e32 v4, s10
v_cmp_eq_u32_e64 s1, 0, v5
v_cmp_ne_u32_e32 vcc_lo, 0, v5
s_mulk_i32 s16, 0xdc
s_mov_b64 s[12:13], 0
s_or_b32 s17, s16, 3
v_cndmask_b32_e64 v8, s3, v2, s1
v_cndmask_b32_e64 v9, s2, v4, s1
s_or_b32 s18, s16, 2
s_or_b32 s19, s16, 1
s_branch .LBB0_3
.LBB0_1:
s_or_b32 exec_lo, exec_lo, s22
ds_load_b32 v10, v6
ds_load_b32 v11, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_lshlrev_b64 v[3:4], 2, v[2:3]
v_add_co_u32 v3, s1, s8, v3
s_delay_alu instid0(VALU_DEP_1)
v_add_co_ci_u32_e64 v4, s1, s9, v4, s1
s_waitcnt lgkmcnt(0)
v_add_f32_e32 v10, v10, v11
global_store_b32 v[3:4], v10, off
.LBB0_2:
s_or_b32 exec_lo, exec_lo, s20
s_add_i32 s14, s14, 1
s_add_u32 s12, s12, 4
s_addc_u32 s13, s13, 0
s_cmpk_eq_i32 s12, 0xdc
s_waitcnt_vscnt null, 0x0
s_barrier
buffer_gl0_inv
s_cbranch_scc1 .LBB0_20
.LBB0_3:
s_and_saveexec_b32 s1, vcc_lo
s_delay_alu instid0(SALU_CYCLE_1)
s_xor_b32 s20, exec_lo, s1
s_cbranch_execz .LBB0_11
v_cmp_lt_i32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s21, s1
s_xor_b32 s1, exec_lo, s21
s_delay_alu instid0(SALU_CYCLE_1)
s_or_saveexec_b32 s21, s1
v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v11, s17
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s2
v_mov_b32_e32 v12, s12
s_xor_b32 exec_lo, exec_lo, s21
s_cbranch_execz .LBB0_10
v_cmp_ne_u32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_saveexec_b32 s23, s1
s_xor_b32 s1, exec_lo, s23
s_lshl_b32 s22, s14, 2
s_or_saveexec_b32 s1, s1
v_dual_mov_b32 v4, s3 :: v_dual_mov_b32 v11, s19
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s2
v_mov_b32_e32 v12, s22
s_xor_b32 exec_lo, exec_lo, s1
s_lshl_b32 s22, s14, 2
v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v3, s10
v_dual_mov_b32 v4, s11 :: v_dual_mov_b32 v11, s18
v_mov_b32_e32 v12, s22
s_or_b32 exec_lo, exec_lo, s1
.LBB0_10:
s_delay_alu instid0(SALU_CYCLE_1)
s_or_b32 exec_lo, exec_lo, s21
.LBB0_11:
s_and_not1_saveexec_b32 s1, s20
v_dual_mov_b32 v12, s16 :: v_dual_mov_b32 v11, s12
v_dual_mov_b32 v3, s10 :: v_dual_mov_b32 v4, s11
v_mov_b32_e32 v10, s15
s_or_b32 exec_lo, exec_lo, s1
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s1, v3, s12
v_add_co_ci_u32_e64 v4, s1, s13, v4, s1
global_load_b32 v4, v[3:4], off
v_mul_lo_u32 v3, v10, 0x64
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mad_u64_u32 v[13:14], null, v4, 10, v[3:4]
v_cvt_f32_u32_e32 v3, v13
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_f32_e32 v4, v7, v3
v_add_nc_u32_e32 v3, v11, v12
v_cvt_i32_f32_e32 v10, v4
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_ashrrev_i32_e32 v4, 31, v3
v_ashrrev_i32_e32 v11, 31, v10
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[3:4], 2, v[3:4]
v_lshlrev_b64 v[10:11], 2, v[10:11]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v3, s1, s4, v3
v_add_co_ci_u32_e64 v4, s1, s5, v4, s1
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_co_u32 v10, s1, s6, v10
v_add_co_ci_u32_e64 v11, s1, s7, v11, s1
global_load_b32 v3, v[3:4], off
global_load_b32 v4, v[10:11], off
s_waitcnt vmcnt(0)
v_mul_f32_e32 v3, v3, v4
ds_store_b32 v6, v3
s_waitcnt lgkmcnt(0)
s_barrier
buffer_gl0_inv
s_and_saveexec_b32 s20, s0
s_cbranch_execz .LBB0_2
s_mov_b32 s21, 0
s_mov_b32 s22, exec_lo
v_cmpx_lt_i32_e32 1, v5
s_xor_b32 s22, exec_lo, s22
v_cmp_eq_u32_e64 s1, 2, v5
s_delay_alu instid0(VALU_DEP_1)
s_and_b32 s21, s1, exec_lo
s_and_not1_saveexec_b32 s22, s22
v_cmp_eq_u32_e64 s1, 0, v5
s_and_not1_b32 s21, s21, exec_lo
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
s_and_b32 s1, s1, exec_lo
s_or_b32 s21, s21, s1
s_or_b32 exec_lo, exec_lo, s22
s_and_saveexec_b32 s22, s21
s_cbranch_execz .LBB0_1
v_add_co_u32 v2, s1, v9, s12
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
v_add_co_ci_u32_e64 v3, s1, s13, v8, s1
global_load_b32 v4, v[2:3], off
s_waitcnt vmcnt(0)
v_mad_u64_u32 v[2:3], null, v4, 10, v[1:2]
v_cvt_f32_u32_e32 v2, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_f32_e32 v2, v7, v2
v_cvt_i32_f32_e32 v2, v2
s_branch .LBB0_1
.LBB0_20:
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z21Compute_all_rotationsPfS_S_PKiS1_
.amdhsa_group_segment_fixed_size 160
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 15
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 0
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 0
.amdhsa_next_free_vgpr 15
.amdhsa_next_free_sgpr 24
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, .Lfunc_end0-_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 24
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 32
.size: 8
.value_kind: global_buffer
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 160
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z21Compute_all_rotationsPfS_S_PKiS1_
.private_segment_fixed_size: 0
.sgpr_count: 26
.sgpr_spill_count: 0
.symbol: _Z21Compute_all_rotationsPfS_S_PKiS1_.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 15
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
You are a helpful coding assistant specialized in converting from CUDA host assembly to AMD host assembly. | .file "tmpxft_0007e846_00000000-6_version1.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2065:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2065:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.section .rodata.str1.8,"aMS",@progbits,1
.align 8
.LC0:
.string "There is an error in file %s at line %d\n"
.text
.globl _Z8testCUDA9cudaErrorPKci
.type _Z8testCUDA9cudaErrorPKci, @function
_Z8testCUDA9cudaErrorPKci:
.LFB2057:
.cfi_startproc
endbr64
testl %edi, %edi
jne .L8
ret
.L8:
subq $8, %rsp
.cfi_def_cfa_offset 16
movl %edx, %ecx
movq %rsi, %rdx
leaq .LC0(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movl $1, %edi
call exit@PLT
.cfi_endproc
.LFE2057:
.size _Z8testCUDA9cudaErrorPKci, .-_Z8testCUDA9cudaErrorPKci
.section .rodata.str1.1,"aMS",@progbits,1
.LC1:
.string "r"
.LC2:
.string "TEST\n"
.text
.globl _Z9read_filePKcPfi
.type _Z9read_filePKcPfi, @function
_Z9read_filePKcPfi:
.LFB2058:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $24, %rsp
.cfi_def_cfa_offset 80
movq %rsi, %r14
movl %edx, %r12d
movq %fs:40, %rax
movq %rax, 8(%rsp)
xorl %eax, %eax
leaq .LC1(%rip), %rsi
call fopen@PLT
movq %rax, %rbp
movl $0, %ebx
leaq 4(%rsp), %r13
movslq %r12d, %r12
movl $0, %r15d
jmp .L10
.L11:
movslq %ebx, %rdx
movb %al, 4(%rsp,%rdx)
leal 1(%rbx), %ebx
.L10:
movq %rbp, %rdi
call fgetc@PLT
cmpb $-1, %al
je .L18
cmpb $59, %al
je .L16
cmpb $10, %al
jne .L11
.L16:
movl $0, %esi
movq %r13, %rdi
call strtod@PLT
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r14,%r12,4)
movl $0, 0(%r13)
movl %r15d, %ebx
jmp .L10
.L18:
leaq .LC2(%rip), %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
movq %rbp, %rdi
call fclose@PLT
movq 8(%rsp), %rax
subq %fs:40, %rax
jne .L19
addq $24, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L19:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2058:
.size _Z9read_filePKcPfi, .-_Z9read_filePKcPfi
.section .rodata.str1.1
.LC3:
.string "files/%i/J_%i.txt"
.LC4:
.string "files/%i/A.txt"
.text
.globl _Z8get_dataPfS_
.type _Z8get_dataPfS_, @function
_Z8get_dataPfS_:
.LFB2059:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $136, %rsp
.cfi_def_cfa_offset 192
movq %rdi, %r14
movq %rsi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 120(%rsp)
xorl %eax, %eax
movq $0, 16(%rsp)
movq $0, 24(%rsp)
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movl $0, 112(%rsp)
movl $0, %r15d
movl $0, 4(%rsp)
movl $0, %r13d
leaq 16(%rsp), %r12
.L21:
leal 0(,%r15,4), %ebp
movl $0, %ebx
.L22:
subq $8, %rsp
.cfi_def_cfa_offset 200
pushq %rbx
.cfi_def_cfa_offset 208
movl %r13d, %r9d
leaq .LC3(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %r12, %rdi
movl $0, %eax
call __snprintf_chk@PLT
addq $16, %rsp
.cfi_def_cfa_offset 192
movl %ebp, %edx
movq %r14, %rsi
movq %r12, %rdi
call _Z9read_filePKcPfi
addl $1, %ebx
addl $4, %ebp
cmpl $55, %ebx
jne .L22
movl %r13d, %r9d
leaq .LC4(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %r12, %rdi
movl $0, %eax
call __snprintf_chk@PLT
movl 4(%rsp), %ebx
movl %ebx, %edx
movq 8(%rsp), %rsi
movq %r12, %rdi
call _Z9read_filePKcPfi
addl $1, %r13d
movl %ebx, %eax
addl $100, %eax
movl %eax, 4(%rsp)
addl $55, %r15d
cmpl $10, %r13d
jne .L21
movq 120(%rsp), %rax
subq %fs:40, %rax
jne .L27
addq $136, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L27:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2059:
.size _Z8get_dataPfS_, .-_Z8get_dataPfS_
.section .rodata.str1.1
.LC5:
.string "files/%i/out.txt"
.LC6:
.string "w"
.LC7:
.string "; "
.LC8:
.string "%f %s"
.LC9:
.string "\n"
.text
.globl _Z12write_resultPf
.type _Z12write_resultPf, @function
_Z12write_resultPf:
.LFB2060:
.cfi_startproc
endbr64
pushq %r15
.cfi_def_cfa_offset 16
.cfi_offset 15, -16
pushq %r14
.cfi_def_cfa_offset 24
.cfi_offset 14, -24
pushq %r13
.cfi_def_cfa_offset 32
.cfi_offset 13, -32
pushq %r12
.cfi_def_cfa_offset 40
.cfi_offset 12, -40
pushq %rbp
.cfi_def_cfa_offset 48
.cfi_offset 6, -48
pushq %rbx
.cfi_def_cfa_offset 56
.cfi_offset 3, -56
subq $152, %rsp
.cfi_def_cfa_offset 208
movq %rdi, 8(%rsp)
movq %fs:40, %rax
movq %rax, 136(%rsp)
xorl %eax, %eax
movq $0, 32(%rsp)
movq $0, 40(%rsp)
movq $0, 48(%rsp)
movq $0, 56(%rsp)
movq $0, 64(%rsp)
movq $0, 72(%rsp)
movq $0, 80(%rsp)
movq $0, 88(%rsp)
movq $0, 96(%rsp)
movq $0, 104(%rsp)
movq $0, 112(%rsp)
movq $0, 120(%rsp)
movl $0, 128(%rsp)
movl $0, 24(%rsp)
movl $0, 28(%rsp)
leaq .LC7(%rip), %r14
leaq .LC8(%rip), %r12
jmp .L32
.L40:
pxor %xmm0, %xmm0
cvtss2sd 0(%r13,%rbx,4), %xmm0
movq %r14, %rcx
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addq $1, %rbx
.L31:
cmpq $9, %rbx
jne .L40
movl 20(%rsp), %eax
addl $9, %eax
cltq
movq 8(%rsp), %rdi
pxor %xmm0, %xmm0
cvtss2sd (%rdi,%rax,4), %xmm0
leaq .LC9(%rip), %rcx
movq %r12, %rdx
movl $2, %esi
movq %rbp, %rdi
movl $1, %eax
call __fprintf_chk@PLT
addl $10, %r15d
movl 24(%rsp), %eax
leal 100(%rax), %ebx
cmpl %ebx, %r15d
je .L41
.L29:
movl %r15d, 20(%rsp)
movslq %r15d, %rax
movq 8(%rsp), %rdx
leaq (%rdx,%rax,4), %r13
movl $0, %ebx
jmp .L31
.L41:
movq %rbp, %rdi
call fclose@PLT
addl $1, 28(%rsp)
movl 28(%rsp), %eax
cmpl $10, %eax
je .L42
movl %ebx, 24(%rsp)
.L32:
leaq 32(%rsp), %rbx
movl 28(%rsp), %r9d
leaq .LC5(%rip), %r8
movl $100, %ecx
movl $2, %edx
movl $100, %esi
movq %rbx, %rdi
movl $0, %eax
call __snprintf_chk@PLT
leaq .LC6(%rip), %rsi
movq %rbx, %rdi
call fopen@PLT
movq %rax, %rbp
movl 24(%rsp), %r15d
jmp .L29
.L42:
movq 136(%rsp), %rax
subq %fs:40, %rax
jne .L43
addq $152, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %rbp
.cfi_def_cfa_offset 40
popq %r12
.cfi_def_cfa_offset 32
popq %r13
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
ret
.L43:
.cfi_restore_state
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2060:
.size _Z12write_resultPf, .-_Z12write_resultPf
.globl _Z9positionsPiS_
.type _Z9positionsPiS_, @function
_Z9positionsPiS_:
.LFB2061:
.cfi_startproc
endbr64
movq %rdi, %r9
movl $0, %ecx
movl $0, %edi
.L46:
leal (%rdi,%rcx), %edx
movslq %edx, %rax
imulq $1717986919, %rax, %rax
sarq $34, %rax
movl %edx, %r8d
sarl $31, %r8d
subl %r8d, %eax
movl %eax, (%r9,%rcx,4)
leal (%rax,%rax,4), %eax
addl %eax, %eax
subl %eax, %edx
movl %edx, (%rsi,%rcx,4)
cmpl $9, %edx
sete %al
movzbl %al, %eax
addl %eax, %edi
addq $1, %rcx
cmpq $55, %rcx
jne .L46
ret
.cfi_endproc
.LFE2061:
.size _Z9positionsPiS_, .-_Z9positionsPiS_
.globl _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
.type _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_, @function
_Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_:
.LFB2087:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movq %rcx, 16(%rsp)
movq %r8, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 16(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L52
.L48:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L53
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L52:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z21Compute_all_rotationsPfS_S_PKiS1_(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L48
.L53:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2087:
.size _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_, .-_Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.type _Z21Compute_all_rotationsPfS_S_PKiS1_, @function
_Z21Compute_all_rotationsPfS_S_PKiS1_:
.LFB2088:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2088:
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, .-_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .rodata.str1.8
.align 8
.LC10:
.string "Taille totale de la m\303\251moire globale %ld\n"
.align 8
.LC11:
.string "/home/ubuntu/Datasets/stackv2/train-structured/JbRemy/GPU_programming/master/version1.cu"
.section .rodata.str1.1
.LC12:
.string "Exectudtion time: %f ms\n"
.text
.globl main
.type main, @function
main:
.LFB2062:
.cfi_startproc
endbr64
pushq %r14
.cfi_def_cfa_offset 16
.cfi_offset 14, -16
pushq %r13
.cfi_def_cfa_offset 24
.cfi_offset 13, -24
pushq %r12
.cfi_def_cfa_offset 32
.cfi_offset 12, -32
pushq %rbp
.cfi_def_cfa_offset 40
.cfi_offset 6, -40
pushq %rbx
.cfi_def_cfa_offset 48
.cfi_offset 3, -48
leaq -192512(%rsp), %r11
.cfi_def_cfa 11, 192560
.LPSRL0:
subq $4096, %rsp
orq $0, (%rsp)
cmpq %r11, %rsp
jne .LPSRL0
.cfi_def_cfa_register 7
subq $1056, %rsp
.cfi_def_cfa_offset 193616
movq %fs:40, %rax
movq %rax, 193560(%rsp)
xorl %eax, %eax
leaq 4(%rsp), %rdi
call cudaGetDeviceCount@PLT
cmpl $0, 4(%rsp)
jle .L57
movl $0, %ebx
leaq .LC10(%rip), %rbp
.L58:
leaq 512(%rsp), %rdi
movl %ebx, %esi
call cudaGetDeviceProperties_v2@PLT
movq 800(%rsp), %rdx
movq %rbp, %rsi
movl $2, %edi
movl $0, %eax
call __printf_chk@PLT
addl $1, %ebx
cmpl %ebx, 4(%rsp)
jg .L58
.L57:
leaq 1552(%rsp), %r14
leaq 17552(%rsp), %r12
movq %r14, %rsi
movq %r12, %rdi
call _Z8get_dataPfS_
leaq 105552(%rsp), %rbp
movl $4, %esi
movq %rbp, %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $199, %edx
leaq .LC11(%rip), %rbx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 9552(%rsp), %r13
movl $4, %esi
movq %r13, %rdi
call cudaMalloc@PLT
movl %eax, %edi
movl $200, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 13552(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $201, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %r14, %rsi
movq %r13, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $203, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq %rbp, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $204, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 8(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $215, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 16(%rsp), %rdi
movl $4, %esi
call cudaMalloc@PLT
movl %eax, %edi
movl $216, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 288(%rsp), %r12
leaq 64(%rsp), %rbp
movq %r12, %rsi
movq %rbp, %rdi
call _Z9positionsPiS_
movl $1, %ecx
movl $4, %edx
movq %r12, %rsi
movq 8(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $220, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, %ecx
movl $4, %edx
movq %rbp, %rsi
movq 16(%rsp), %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $221, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 24(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $227, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 32(%rsp), %rdi
call cudaEventCreate@PLT
movl %eax, %edi
movl $228, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $0, %esi
movq 24(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $229, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movl $1, 52(%rsp)
movl $1, 56(%rsp)
movl $1, 60(%rsp)
movl $1, 40(%rsp)
movl $1, 44(%rsp)
movl $1, 48(%rsp)
movl $0, %r9d
movl $0, %r8d
movq 52(%rsp), %rdx
movl $1, %ecx
movq 40(%rsp), %rdi
movl $1, %esi
call __cudaPushCallConfiguration@PLT
testl %eax, %eax
je .L63
.L59:
movl $0, %esi
movq 32(%rsp), %rdi
call cudaEventRecord@PLT
movl %eax, %edi
movl $237, %edx
leaq .LC11(%rip), %rbx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 32(%rsp), %rdi
call cudaEventSynchronize@PLT
movl %eax, %edi
movl $238, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 52(%rsp), %rdi
movq 32(%rsp), %rdx
movq 24(%rsp), %rsi
call cudaEventElapsedTime@PLT
movl %eax, %edi
movl $239, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
pxor %xmm0, %xmm0
cvtss2sd 52(%rsp), %xmm0
leaq .LC12(%rip), %rsi
movl $2, %edi
movl $1, %eax
call __printf_chk@PLT
leaq 13552(%rsp), %rbp
leaq 5552(%rsp), %r12
movl $2, %ecx
movl $4, %edx
movq %rbp, %rsi
movq %r12, %rdi
call cudaMemcpy@PLT
movl %eax, %edi
movl $244, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq %r12, %rdi
call _Z12write_resultPf
leaq 9552(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $249, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
leaq 105552(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $250, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq %rbp, %rdi
call cudaFree@PLT
movl %eax, %edi
movl $251, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 8(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $252, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 16(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $253, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 24(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $254, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 32(%rsp), %rdi
call cudaFree@PLT
movl %eax, %edi
movl $255, %edx
movq %rbx, %rsi
call _Z8testCUDA9cudaErrorPKci
movq 193560(%rsp), %rax
subq %fs:40, %rax
jne .L64
movl $0, %eax
addq $193568, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 48
popq %rbx
.cfi_def_cfa_offset 40
popq %rbp
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r13
.cfi_def_cfa_offset 16
popq %r14
.cfi_def_cfa_offset 8
ret
.L63:
.cfi_restore_state
leaq 13552(%rsp), %rdx
movq %r13, %rsi
leaq 105552(%rsp), %rdi
movq 16(%rsp), %r8
movq 8(%rsp), %rcx
call _Z51__device_stub__Z21Compute_all_rotationsPfS_S_PKiS1_PfS_S_PKiS1_
jmp .L59
.L64:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2062:
.size main, .-main
.section .rodata.str1.8
.align 8
.LC13:
.string "_Z21Compute_all_rotationsPfS_S_PKiS1_"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2090:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC13(%rip), %rdx
movq %rdx, %rcx
leaq _Z21Compute_all_rotationsPfS_S_PKiS1_(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2090:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: | .text
.file "version1.hip"
.globl _Z8testCUDA10hipError_tPKci # -- Begin function _Z8testCUDA10hipError_tPKci
.p2align 4, 0x90
.type _Z8testCUDA10hipError_tPKci,@function
_Z8testCUDA10hipError_tPKci: # @_Z8testCUDA10hipError_tPKci
.cfi_startproc
# %bb.0:
testl %edi, %edi
jne .LBB0_2
# %bb.1:
retq
.LBB0_2:
pushq %rax
.cfi_def_cfa_offset 16
movl $.L.str, %edi
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end0:
.size _Z8testCUDA10hipError_tPKci, .Lfunc_end0-_Z8testCUDA10hipError_tPKci
.cfi_endproc
# -- End function
.globl _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_ # -- Begin function _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.p2align 4, 0x90
.type _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_,@function
_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_: # @_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.cfi_startproc
# %bb.0:
subq $136, %rsp
.cfi_def_cfa_offset 144
movq %rdi, 88(%rsp)
movq %rsi, 80(%rsp)
movq %rdx, 72(%rsp)
movq %rcx, 64(%rsp)
movq %r8, 56(%rsp)
leaq 88(%rsp), %rax
movq %rax, 96(%rsp)
leaq 80(%rsp), %rax
movq %rax, 104(%rsp)
leaq 72(%rsp), %rax
movq %rax, 112(%rsp)
leaq 64(%rsp), %rax
movq %rax, 120(%rsp)
leaq 56(%rsp), %rax
movq %rax, 128(%rsp)
leaq 40(%rsp), %rdi
leaq 24(%rsp), %rsi
leaq 16(%rsp), %rdx
leaq 8(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 24(%rsp), %rcx
movl 32(%rsp), %r8d
leaq 96(%rsp), %r9
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %edi
pushq 8(%rsp)
.cfi_adjust_cfa_offset 8
pushq 24(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $152, %rsp
.cfi_adjust_cfa_offset -152
retq
.Lfunc_end1:
.size _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_, .Lfunc_end1-_Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.cfi_endproc
# -- End function
.globl _Z9read_filePKcPfi # -- Begin function _Z9read_filePKcPfi
.p2align 4, 0x90
.type _Z9read_filePKcPfi,@function
_Z9read_filePKcPfi: # @_Z9read_filePKcPfi
.cfi_startproc
# %bb.0:
pushq %r15
.cfi_def_cfa_offset 16
pushq %r14
.cfi_def_cfa_offset 24
pushq %r12
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
pushq %rax
.cfi_def_cfa_offset 48
.cfi_offset %rbx, -40
.cfi_offset %r12, -32
.cfi_offset %r14, -24
.cfi_offset %r15, -16
movq %rsi, %rbx
movl $.L.str.1, %esi
callq fopen
movq %rax, %r14
xorl %r12d, %r12d
leaq 4(%rsp), %r15
jmp .LBB2_1
.p2align 4, 0x90
.LBB2_6: # in Loop: Header=BB2_1 Depth=1
xorl %r12d, %r12d
movq %r15, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx)
movl $0, 4(%rsp)
.LBB2_1: # =>This Inner Loop Header: Depth=1
movq %r14, %rdi
callq fgetc
cmpb $10, %al
je .LBB2_6
# %bb.2: # in Loop: Header=BB2_1 Depth=1
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB2_6
# %bb.3: # in Loop: Header=BB2_1 Depth=1
cmpl $255, %ecx
je .LBB2_5
# %bb.4: # in Loop: Header=BB2_1 Depth=1
movslq %r12d, %rcx
incl %r12d
movb %al, 4(%rsp,%rcx)
jmp .LBB2_1
.LBB2_5:
movl $.Lstr, %edi
callq puts@PLT
movq %r14, %rdi
addq $8, %rsp
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r12
.cfi_def_cfa_offset 24
popq %r14
.cfi_def_cfa_offset 16
popq %r15
.cfi_def_cfa_offset 8
jmp fclose # TAILCALL
.Lfunc_end2:
.size _Z9read_filePKcPfi, .Lfunc_end2-_Z9read_filePKcPfi
.cfi_endproc
# -- End function
.globl _Z8get_dataPfS_ # -- Begin function _Z8get_dataPfS_
.p2align 4, 0x90
.type _Z8get_dataPfS_,@function
_Z8get_dataPfS_: # @_Z8get_dataPfS_
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rsi, %rbx
movq %rdi, %r14
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movaps %xmm0, 64(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movl $0, 112(%rsp)
movl $0, 8(%rsp) # 4-byte Folded Spill
leaq 12(%rsp), %r12
jmp .LBB3_1
.p2align 4, 0x90
.LBB3_13: # %_Z9read_filePKcPfi.exit
# in Loop: Header=BB3_1 Depth=1
movl $.Lstr, %edi
callq puts@PLT
movq %r13, %rdi
callq fclose
movl 8(%rsp), %eax # 4-byte Reload
incl %eax
movl %eax, 8(%rsp) # 4-byte Spill
cmpl $10, %eax
je .LBB3_14
.LBB3_1: # %.preheader
# =>This Loop Header: Depth=1
# Child Loop BB3_2 Depth 2
# Child Loop BB3_3 Depth 3
# Child Loop BB3_9 Depth 2
xorl %r13d, %r13d
jmp .LBB3_2
.p2align 4, 0x90
.LBB3_7: # %_Z9read_filePKcPfi.exit16
# in Loop: Header=BB3_2 Depth=2
movl $.Lstr, %edi
callq puts@PLT
movq %r15, %rdi
callq fclose
incl %r13d
cmpl $55, %r13d
je .LBB3_8
.LBB3_2: # Parent Loop BB3_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB3_3 Depth 3
xorl %ebp, %ebp
movl $100, %esi
movl $.L.str.3, %edx
leaq 16(%rsp), %r15
movq %r15, %rdi
movl 8(%rsp), %ecx # 4-byte Reload
movl %r13d, %r8d
xorl %eax, %eax
callq snprintf
movl $.L.str.1, %esi
movq %r15, %rdi
callq fopen
movq %rax, %r15
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_15: # in Loop: Header=BB3_3 Depth=3
xorl %ebp, %ebp
movq %r12, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%r14)
movl $0, 12(%rsp)
.LBB3_3: # Parent Loop BB3_1 Depth=1
# Parent Loop BB3_2 Depth=2
# => This Inner Loop Header: Depth=3
movq %r15, %rdi
callq fgetc
cmpb $10, %al
je .LBB3_15
# %bb.4: # in Loop: Header=BB3_3 Depth=3
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB3_15
# %bb.5: # in Loop: Header=BB3_3 Depth=3
cmpl $255, %ecx
je .LBB3_7
# %bb.6: # in Loop: Header=BB3_3 Depth=3
movslq %ebp, %rcx
incl %ebp
movb %al, 12(%rsp,%rcx)
jmp .LBB3_3
.p2align 4, 0x90
.LBB3_8: # in Loop: Header=BB3_1 Depth=1
xorl %r15d, %r15d
movl $100, %esi
movl $.L.str.4, %edx
leaq 16(%rsp), %r13
movq %r13, %rdi
movl 8(%rsp), %ecx # 4-byte Reload
xorl %eax, %eax
callq snprintf
movl $.L.str.1, %esi
movq %r13, %rdi
callq fopen
movq %rax, %r13
jmp .LBB3_9
.p2align 4, 0x90
.LBB3_16: # in Loop: Header=BB3_9 Depth=2
xorl %r15d, %r15d
movq %r12, %rdi
xorl %esi, %esi
callq strtod
cvtsd2ss %xmm0, %xmm0
movss %xmm0, (%rbx)
movl $0, 12(%rsp)
.LBB3_9: # Parent Loop BB3_1 Depth=1
# => This Inner Loop Header: Depth=2
movq %r13, %rdi
callq fgetc
cmpb $10, %al
je .LBB3_16
# %bb.10: # in Loop: Header=BB3_9 Depth=2
movzbl %al, %ecx
cmpl $59, %ecx
je .LBB3_16
# %bb.11: # in Loop: Header=BB3_9 Depth=2
cmpl $255, %ecx
je .LBB3_13
# %bb.12: # in Loop: Header=BB3_9 Depth=2
movslq %r15d, %rcx
incl %r15d
movb %al, 12(%rsp,%rcx)
jmp .LBB3_9
.LBB3_14:
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end3:
.size _Z8get_dataPfS_, .Lfunc_end3-_Z8get_dataPfS_
.cfi_endproc
# -- End function
.globl _Z12write_resultPf # -- Begin function _Z12write_resultPf
.p2align 4, 0x90
.type _Z12write_resultPf,@function
_Z12write_resultPf: # @_Z12write_resultPf
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %r13
.cfi_def_cfa_offset 40
pushq %r12
.cfi_def_cfa_offset 48
pushq %rbx
.cfi_def_cfa_offset 56
subq $120, %rsp
.cfi_def_cfa_offset 176
.cfi_offset %rbx, -56
.cfi_offset %r12, -48
.cfi_offset %r13, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
movq %rdi, %rbx
xorps %xmm0, %xmm0
movaps %xmm0, 96(%rsp)
movaps %xmm0, 80(%rsp)
movaps %xmm0, 64(%rsp)
movaps %xmm0, 48(%rsp)
movaps %xmm0, 32(%rsp)
movaps %xmm0, 16(%rsp)
movl $0, 112(%rsp)
movl $.L.str.8, %r13d
xorl %r15d, %r15d
.p2align 4, 0x90
.LBB4_1: # =>This Loop Header: Depth=1
# Child Loop BB4_2 Depth 2
# Child Loop BB4_3 Depth 3
movl $100, %esi
movl $.L.str.6, %edx
leaq 16(%rsp), %r14
movq %r14, %rdi
movl %r15d, %ecx
xorl %eax, %eax
callq snprintf
movl $.L.str.7, %esi
movq %r14, %rdi
callq fopen
movq %rax, %r12
movq %rbx, 8(%rsp) # 8-byte Spill
movq %rbx, %rbp
xorl %r14d, %r14d
.p2align 4, 0x90
.LBB4_2: # %.preheader
# Parent Loop BB4_1 Depth=1
# => This Loop Header: Depth=2
# Child Loop BB4_3 Depth 3
movq $-10, %rax
movq $-10, %rbx
.p2align 4, 0x90
.LBB4_3: # Parent Loop BB4_1 Depth=1
# Parent Loop BB4_2 Depth=2
# => This Inner Loop Header: Depth=3
incq %rbx
movl $.L.str.5, %edx
cmoveq %r13, %rdx
movss 40(%rbp,%rax,4), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.9, %esi
movq %r12, %rdi
movb $1, %al
callq fprintf
movq %rbx, %rax
testq %rbx, %rbx
jne .LBB4_3
# %bb.4: # in Loop: Header=BB4_2 Depth=2
incq %r14
addq $40, %rbp
cmpq $10, %r14
jne .LBB4_2
# %bb.5: # in Loop: Header=BB4_1 Depth=1
movq %r12, %rdi
callq fclose
incq %r15
movq 8(%rsp), %rbx # 8-byte Reload
addq $400, %rbx # imm = 0x190
cmpq $10, %r15
jne .LBB4_1
# %bb.6:
addq $120, %rsp
.cfi_def_cfa_offset 56
popq %rbx
.cfi_def_cfa_offset 48
popq %r12
.cfi_def_cfa_offset 40
popq %r13
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.Lfunc_end4:
.size _Z12write_resultPf, .Lfunc_end4-_Z12write_resultPf
.cfi_endproc
# -- End function
.globl _Z9positionsPiS_ # -- Begin function _Z9positionsPiS_
.p2align 4, 0x90
.type _Z9positionsPiS_,@function
_Z9positionsPiS_: # @_Z9positionsPiS_
.cfi_startproc
# %bb.0:
xorl %eax, %eax
movl $3435973837, %ecx # imm = 0xCCCCCCCD
xorl %edx, %edx
.p2align 4, 0x90
.LBB5_1: # =>This Inner Loop Header: Depth=1
leal (%rdx,%rax), %r8d
imulq %rcx, %r8
shrq $35, %r8
movl %r8d, (%rdi,%rax,4)
addl %r8d, %r8d
leal (%r8,%r8,4), %r8d
movl %edx, %r9d
subl %r8d, %r9d
addl %eax, %r9d
subl %edx, %r8d
addl $9, %r8d
movl %r9d, (%rsi,%rax,4)
xorl %r9d, %r9d
cmpl %eax, %r8d
sete %r9b
addl %edx, %r9d
incq %rax
movl %r9d, %edx
cmpq $55, %rax
jne .LBB5_1
# %bb.2:
retq
.Lfunc_end5:
.size _Z9positionsPiS_, .Lfunc_end5-_Z9positionsPiS_
.cfi_endproc
# -- End function
.globl main # -- Begin function main
.p2align 4, 0x90
.type main,@function
main: # @main
.cfi_startproc
# %bb.0:
pushq %rbp
.cfi_def_cfa_offset 16
pushq %r15
.cfi_def_cfa_offset 24
pushq %r14
.cfi_def_cfa_offset 32
pushq %rbx
.cfi_def_cfa_offset 40
subq $194056, %rsp # imm = 0x2F608
.cfi_def_cfa_offset 194096
.cfi_offset %rbx, -40
.cfi_offset %r14, -32
.cfi_offset %r15, -24
.cfi_offset %rbp, -16
leaq 4(%rsp), %rdi
callq hipGetDeviceCount
cmpl $0, 4(%rsp)
jle .LBB6_3
# %bb.1: # %.lr.ph
leaq 4576(%rsp), %rbx
xorl %ebp, %ebp
.p2align 4, 0x90
.LBB6_2: # =>This Inner Loop Header: Depth=1
movq %rbx, %rdi
movl %ebp, %esi
callq hipGetDevicePropertiesR0600
movq 4864(%rsp), %rsi
movl $.L.str.10, %edi
xorl %eax, %eax
callq printf
incl %ebp
cmpl 4(%rsp), %ebp
jl .LBB6_2
.LBB6_3: # %._crit_edge
leaq 106048(%rsp), %rdi
leaq 14048(%rsp), %rsi
callq _Z8get_dataPfS_
leaq 18048(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_4
# %bb.6: # %_Z8testCUDA10hipError_tPKci.exit
leaq 10048(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_7
# %bb.8: # %_Z8testCUDA10hipError_tPKci.exit15
leaq 6048(%rsp), %rbx
movl $4, %esi
movq %rbx, %rdi
callq hipMalloc
testl %eax, %eax
jne .LBB6_9
# %bb.10: # %_Z8testCUDA10hipError_tPKci.exit17
leaq 10048(%rsp), %r14
leaq 14048(%rsp), %rsi
movl $4, %edx
movq %r14, %rdi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_11
# %bb.12: # %_Z8testCUDA10hipError_tPKci.exit19
leaq 18048(%rsp), %r15
leaq 106048(%rsp), %rsi
movl $4, %edx
movq %r15, %rdi
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_13
# %bb.14: # %_Z8testCUDA10hipError_tPKci.exit21
leaq 32(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_15
# %bb.16: # %_Z8testCUDA10hipError_tPKci.exit23
leaq 24(%rsp), %rdi
movl $4, %esi
callq hipMalloc
testl %eax, %eax
jne .LBB6_54
# %bb.17: # %_Z8testCUDA10hipError_tPKci.exit25.preheader
xorl %eax, %eax
movl $3435973837, %ecx # imm = 0xCCCCCCCD
xorl %edx, %edx
.p2align 4, 0x90
.LBB6_18: # %_Z8testCUDA10hipError_tPKci.exit25
# =>This Inner Loop Header: Depth=1
leal (%rdx,%rax), %esi
imulq %rcx, %rsi
shrq $35, %rsi
movl %esi, 352(%rsp,%rax,4)
addl %esi, %esi
leal (%rsi,%rsi,4), %esi
movl %edx, %edi
subl %esi, %edi
addl %eax, %edi
movl %edi, 128(%rsp,%rax,4)
xorl %esi, %esi
cmpl $9, %edi
sete %sil
addl %edx, %esi
incq %rax
movl %esi, %edx
cmpq $55, %rax
jne .LBB6_18
# %bb.19: # %_Z9positionsPiS_.exit
movq 32(%rsp), %rdi
leaq 128(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_20
# %bb.21: # %_Z8testCUDA10hipError_tPKci.exit27
movq 24(%rsp), %rdi
leaq 352(%rsp), %rsi
movl $4, %edx
movl $1, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_22
# %bb.23: # %_Z8testCUDA10hipError_tPKci.exit29
leaq 16(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
jne .LBB6_24
# %bb.25: # %_Z8testCUDA10hipError_tPKci.exit31
leaq 8(%rsp), %rdi
callq hipEventCreate
testl %eax, %eax
jne .LBB6_26
# %bb.27: # %_Z8testCUDA10hipError_tPKci.exit33
movq 16(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
testl %eax, %eax
jne .LBB6_28
# %bb.29: # %_Z8testCUDA10hipError_tPKci.exit35
movabsq $4294967297, %rdi # imm = 0x100000001
movl $1, %esi
movq %rdi, %rdx
movl $1, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq __hipPushCallConfiguration
testl %eax, %eax
jne .LBB6_31
# %bb.30:
movq 32(%rsp), %rax
movq 24(%rsp), %rcx
movq %r15, 120(%rsp)
movq %r14, 112(%rsp)
movq %rbx, 104(%rsp)
movq %rax, 96(%rsp)
movq %rcx, 88(%rsp)
leaq 120(%rsp), %rax
movq %rax, 576(%rsp)
leaq 112(%rsp), %rax
movq %rax, 584(%rsp)
leaq 104(%rsp), %rax
movq %rax, 592(%rsp)
leaq 96(%rsp), %rax
movq %rax, 600(%rsp)
leaq 88(%rsp), %rax
movq %rax, 608(%rsp)
leaq 40(%rsp), %rdi
leaq 72(%rsp), %rsi
leaq 64(%rsp), %rdx
leaq 56(%rsp), %rcx
callq __hipPopCallConfiguration
movq 40(%rsp), %rsi
movl 48(%rsp), %edx
movq 72(%rsp), %rcx
movl 80(%rsp), %r8d
leaq 576(%rsp), %r9
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %edi
pushq 56(%rsp)
.cfi_adjust_cfa_offset 8
pushq 72(%rsp)
.cfi_adjust_cfa_offset 8
callq hipLaunchKernel
addq $16, %rsp
.cfi_adjust_cfa_offset -16
.LBB6_31:
movq 8(%rsp), %rdi
xorl %esi, %esi
callq hipEventRecord
testl %eax, %eax
jne .LBB6_32
# %bb.33: # %_Z8testCUDA10hipError_tPKci.exit37
movq 8(%rsp), %rdi
callq hipEventSynchronize
testl %eax, %eax
jne .LBB6_34
# %bb.35: # %_Z8testCUDA10hipError_tPKci.exit39
movq 16(%rsp), %rsi
movq 8(%rsp), %rdx
leaq 40(%rsp), %rdi
callq hipEventElapsedTime
testl %eax, %eax
jne .LBB6_36
# %bb.37: # %_Z8testCUDA10hipError_tPKci.exit41
movss 40(%rsp), %xmm0 # xmm0 = mem[0],zero,zero,zero
cvtss2sd %xmm0, %xmm0
movl $.L.str.12, %edi
movb $1, %al
callq printf
leaq 576(%rsp), %rdi
leaq 6048(%rsp), %rsi
movl $4, %edx
movl $2, %ecx
callq hipMemcpy
testl %eax, %eax
jne .LBB6_38
# %bb.39: # %_Z8testCUDA10hipError_tPKci.exit43
leaq 576(%rsp), %rdi
callq _Z12write_resultPf
leaq 10048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_40
# %bb.41: # %_Z8testCUDA10hipError_tPKci.exit45
leaq 18048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_42
# %bb.43: # %_Z8testCUDA10hipError_tPKci.exit47
leaq 6048(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_44
# %bb.45: # %_Z8testCUDA10hipError_tPKci.exit49
movq 32(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_46
# %bb.47: # %_Z8testCUDA10hipError_tPKci.exit51
movq 24(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_48
# %bb.49: # %_Z8testCUDA10hipError_tPKci.exit53
movq 16(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_50
# %bb.51: # %_Z8testCUDA10hipError_tPKci.exit55
movq 8(%rsp), %rdi
callq hipFree
testl %eax, %eax
jne .LBB6_52
# %bb.53: # %_Z8testCUDA10hipError_tPKci.exit57
xorl %eax, %eax
addq $194056, %rsp # imm = 0x2F608
.cfi_def_cfa_offset 40
popq %rbx
.cfi_def_cfa_offset 32
popq %r14
.cfi_def_cfa_offset 24
popq %r15
.cfi_def_cfa_offset 16
popq %rbp
.cfi_def_cfa_offset 8
retq
.LBB6_4:
.cfi_def_cfa_offset 194096
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $201, %edx
jmp .LBB6_5
.LBB6_7:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $202, %edx
jmp .LBB6_5
.LBB6_9:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $203, %edx
jmp .LBB6_5
.LBB6_11:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $205, %edx
jmp .LBB6_5
.LBB6_13:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $206, %edx
jmp .LBB6_5
.LBB6_15:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $217, %edx
jmp .LBB6_5
.LBB6_54:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $218, %edx
jmp .LBB6_5
.LBB6_20:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $222, %edx
jmp .LBB6_5
.LBB6_22:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $223, %edx
jmp .LBB6_5
.LBB6_24:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $229, %edx
jmp .LBB6_5
.LBB6_26:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $230, %edx
jmp .LBB6_5
.LBB6_28:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $231, %edx
jmp .LBB6_5
.LBB6_32:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $239, %edx
jmp .LBB6_5
.LBB6_34:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $240, %edx
jmp .LBB6_5
.LBB6_36:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $241, %edx
jmp .LBB6_5
.LBB6_38:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $246, %edx
jmp .LBB6_5
.LBB6_40:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $251, %edx
jmp .LBB6_5
.LBB6_42:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $252, %edx
jmp .LBB6_5
.LBB6_44:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $253, %edx
jmp .LBB6_5
.LBB6_46:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $254, %edx
jmp .LBB6_5
.LBB6_48:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $255, %edx
jmp .LBB6_5
.LBB6_50:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $256, %edx # imm = 0x100
jmp .LBB6_5
.LBB6_52:
movl $.L.str, %edi
movl $.L.str.11, %esi
movl $257, %edx # imm = 0x101
.LBB6_5:
xorl %eax, %eax
callq printf
movl $1, %edi
callq exit
.Lfunc_end6:
.size main, .Lfunc_end6-main
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_ctor
.type __hip_module_ctor,@function
__hip_module_ctor: # @__hip_module_ctor
.cfi_startproc
# %bb.0:
subq $40, %rsp
.cfi_def_cfa_offset 48
cmpq $0, __hip_gpubin_handle(%rip)
jne .LBB7_2
# %bb.1:
movl $__hip_fatbin_wrapper, %edi
callq __hipRegisterFatBinary
movq %rax, __hip_gpubin_handle(%rip)
.LBB7_2:
movq __hip_gpubin_handle(%rip), %rdi
xorps %xmm0, %xmm0
movups %xmm0, 16(%rsp)
movups %xmm0, (%rsp)
movl $_Z21Compute_all_rotationsPfS_S_PKiS1_, %esi
movl $.L__unnamed_1, %edx
movl $.L__unnamed_1, %ecx
movl $-1, %r8d
xorl %r9d, %r9d
callq __hipRegisterFunction
movl $__hip_module_dtor, %edi
addq $40, %rsp
.cfi_def_cfa_offset 8
jmp atexit # TAILCALL
.Lfunc_end7:
.size __hip_module_ctor, .Lfunc_end7-__hip_module_ctor
.cfi_endproc
# -- End function
.p2align 4, 0x90 # -- Begin function __hip_module_dtor
.type __hip_module_dtor,@function
__hip_module_dtor: # @__hip_module_dtor
.cfi_startproc
# %bb.0:
movq __hip_gpubin_handle(%rip), %rdi
testq %rdi, %rdi
je .LBB8_2
# %bb.1:
pushq %rax
.cfi_def_cfa_offset 16
callq __hipUnregisterFatBinary
movq $0, __hip_gpubin_handle(%rip)
addq $8, %rsp
.cfi_def_cfa_offset 8
.LBB8_2:
retq
.Lfunc_end8:
.size __hip_module_dtor, .Lfunc_end8-__hip_module_dtor
.cfi_endproc
# -- End function
.type .L.str,@object # @.str
.section .rodata.str1.1,"aMS",@progbits,1
.L.str:
.asciz "There is an error in file %s at line %d\n"
.size .L.str, 41
.type _Z21Compute_all_rotationsPfS_S_PKiS1_,@object # @_Z21Compute_all_rotationsPfS_S_PKiS1_
.section .rodata,"a",@progbits
.globl _Z21Compute_all_rotationsPfS_S_PKiS1_
.p2align 3, 0x0
_Z21Compute_all_rotationsPfS_S_PKiS1_:
.quad _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.size _Z21Compute_all_rotationsPfS_S_PKiS1_, 8
.type .L.str.1,@object # @.str.1
.section .rodata.str1.1,"aMS",@progbits,1
.L.str.1:
.asciz "r"
.size .L.str.1, 2
.type .L.str.3,@object # @.str.3
.L.str.3:
.asciz "files/%i/J_%i.txt"
.size .L.str.3, 18
.type .L.str.4,@object # @.str.4
.L.str.4:
.asciz "files/%i/A.txt"
.size .L.str.4, 15
.type .L.str.5,@object # @.str.5
.L.str.5:
.asciz "
.size .L.str.5, 3
.type .L.str.6,@object # @.str.6
.L.str.6:
.asciz "files/%i/out.txt"
.size .L.str.6, 17
.type .L.str.7,@object # @.str.7
.L.str.7:
.asciz "w"
.size .L.str.7, 2
.type .L.str.8,@object # @.str.8
.L.str.8:
.asciz "\n"
.size .L.str.8, 2
.type .L.str.9,@object # @.str.9
.L.str.9:
.asciz "%f %s"
.size .L.str.9, 6
.type .L.str.10,@object # @.str.10
.L.str.10:
.asciz "Taille totale de la m\303\251moire globale %ld\n"
.size .L.str.10, 42
.type .L.str.11,@object # @.str.11
.L.str.11:
.asciz "/home/ubuntu/Datasets/stackv2/train-structured-repos-hip/JbRemy/GPU_programming/master/version1.hip"
.size .L.str.11, 100
.type .L.str.12,@object # @.str.12
.L.str.12:
.asciz "Exectudtion time: %f ms\n"
.size .L.str.12, 25
.type .L__unnamed_1,@object # @0
.L__unnamed_1:
.asciz "_Z21Compute_all_rotationsPfS_S_PKiS1_"
.size .L__unnamed_1, 38
.type __hip_fatbin_wrapper,@object # @__hip_fatbin_wrapper
.section .hipFatBinSegment,"a",@progbits
.p2align 3, 0x0
__hip_fatbin_wrapper:
.long 1212764230 # 0x48495046
.long 1 # 0x1
.quad __hip_fatbin
.quad 0
.size __hip_fatbin_wrapper, 24
.type __hip_gpubin_handle,@object # @__hip_gpubin_handle
.local __hip_gpubin_handle
.comm __hip_gpubin_handle,8,8
.section .init_array,"aw",@init_array
.p2align 3, 0x0
.quad __hip_module_ctor
.type __hip_cuid_,@object # @__hip_cuid_
.bss
.globl __hip_cuid_
__hip_cuid_:
.byte 0 # 0x0
.size __hip_cuid_, 1
.type .Lstr,@object # @str
.section .rodata.str1.1,"aMS",@progbits,1
.Lstr:
.asciz "TEST"
.size .Lstr, 5
.section ".linker-options","e",@llvm_linker_options
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym _Z36__device_stub__Compute_all_rotationsPfS_S_PKiS1_
.addrsig_sym __hip_module_ctor
.addrsig_sym __hip_module_dtor
.addrsig_sym _Z21Compute_all_rotationsPfS_S_PKiS1_
.addrsig_sym __hip_fatbin
.addrsig_sym __hip_fatbin_wrapper
.addrsig_sym __hip_cuid_ |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA device assembly. | #include <stdio.h>
__global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index < N*N){
int M = N+2;
int i = index + M + 1 + 2 * (index / N);
unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]);
}
} | code for sm_80
Function : _Z6jacobiPdS_S_id
.headerflags @"EF_CUDA_TEXMODE_UNIFIED EF_CUDA_64BIT_ADDRESS EF_CUDA_SM80 EF_CUDA_VIRTUAL_SM(EF_CUDA_SM80)"
/*0000*/ IMAD.MOV.U32 R1, RZ, RZ, c[0x0][0x28] ; /* 0x00000a00ff017624 */
/* 0x000fe400078e00ff */
/*0010*/ S2R R0, SR_CTAID.Y ; /* 0x0000000000007919 */
/* 0x000e220000002600 */
/*0020*/ ULDC UR5, c[0x0][0x178] ; /* 0x00005e0000057ab9 */
/* 0x000fe40000000800 */
/*0030*/ UIMAD UR4, UR5, UR5, URZ ; /* 0x00000005050472a4 */
/* 0x000fe2000f8e023f */
/*0040*/ S2R R3, SR_CTAID.X ; /* 0x0000000000037919 */
/* 0x000e280000002500 */
/*0050*/ S2R R5, SR_TID.Y ; /* 0x0000000000057919 */
/* 0x000e680000002200 */
/*0060*/ S2R R7, SR_TID.X ; /* 0x0000000000077919 */
/* 0x000ea20000002100 */
/*0070*/ IMAD R0, R0, c[0x0][0xc], R3 ; /* 0x0000030000007a24 */
/* 0x001fc800078e0203 */
/*0080*/ IMAD R0, R0, c[0x0][0x4], R5 ; /* 0x0000010000007a24 */
/* 0x002fc800078e0205 */
/*0090*/ IMAD R0, R0, c[0x0][0x0], R7 ; /* 0x0000000000007a24 */
/* 0x004fca00078e0207 */
/*00a0*/ ISETP.GE.AND P0, PT, R0, UR4, PT ; /* 0x0000000400007c0c */
/* 0x000fda000bf06270 */
/*00b0*/ @P0 EXIT ; /* 0x000000000000094d */
/* 0x000fea0003800000 */
/*00c0*/ IABS R5, c[0x0][0x178] ; /* 0x00005e0000057a13 */
/* 0x000fe20000000000 */
/*00d0*/ UIADD3 UR4, UR5, 0x2, URZ ; /* 0x0000000205047890 */
/* 0x000fe4000fffe03f */
/*00e0*/ ULDC.64 UR6, c[0x0][0x118] ; /* 0x0000460000067ab9 */
/* 0x000fe20000000a00 */
/*00f0*/ I2F.RP R4, R5 ; /* 0x0000000500047306 */
/* 0x000e300000209400 */
/*0100*/ MUFU.RCP R4, R4 ; /* 0x0000000400047308 */
/* 0x001e240000001000 */
/*0110*/ IADD3 R2, R4, 0xffffffe, RZ ; /* 0x0ffffffe04027810 */
/* 0x001fcc0007ffe0ff */
/*0120*/ F2I.FTZ.U32.TRUNC.NTZ R3, R2 ; /* 0x0000000200037305 */
/* 0x000064000021f000 */
/*0130*/ HFMA2.MMA R2, -RZ, RZ, 0, 0 ; /* 0x00000000ff027435 */
/* 0x001fe200000001ff */
/*0140*/ IMAD.MOV R6, RZ, RZ, -R3 ; /* 0x000000ffff067224 */
/* 0x002fc800078e0a03 */
/*0150*/ IMAD R7, R6, R5, RZ ; /* 0x0000000506077224 */
/* 0x000fe200078e02ff */
/*0160*/ IABS R6, R0 ; /* 0x0000000000067213 */
/* 0x000fc80000000000 */
/*0170*/ IMAD.HI.U32 R3, R3, R7, R2 ; /* 0x0000000703037227 */
/* 0x000fe200078e0002 */
/*0180*/ LOP3.LUT R2, R0.reuse, c[0x0][0x178], RZ, 0x3c, !PT ; /* 0x00005e0000027a12 */
/* 0x040fe400078e3cff */
/*0190*/ IADD3 R0, R0, UR4, RZ ; /* 0x0000000400007c10 */
/* 0x000fe4000fffe0ff */
/*01a0*/ ISETP.GE.AND P1, PT, R2, RZ, PT ; /* 0x000000ff0200720c */
/* 0x000fe20003f26270 */
/*01b0*/ IMAD.HI.U32 R3, R3, R6, RZ ; /* 0x0000000603037227 */
/* 0x000fc800078e00ff */
/*01c0*/ IMAD.MOV R4, RZ, RZ, -R3 ; /* 0x000000ffff047224 */
/* 0x000fc800078e0a03 */
/*01d0*/ IMAD R4, R5, R4, R6 ; /* 0x0000000405047224 */
/* 0x000fca00078e0206 */
/*01e0*/ ISETP.GT.U32.AND P2, PT, R5, R4, PT ; /* 0x000000040500720c */
/* 0x000fda0003f44070 */
/*01f0*/ @!P2 IMAD.IADD R4, R4, 0x1, -R5 ; /* 0x000000010404a824 */
/* 0x000fe200078e0a05 */
/*0200*/ @!P2 IADD3 R3, R3, 0x1, RZ ; /* 0x000000010303a810 */
/* 0x000fe40007ffe0ff */
/*0210*/ ISETP.NE.AND P2, PT, RZ, c[0x0][0x178], PT ; /* 0x00005e00ff007a0c */
/* 0x000fe40003f45270 */
/*0220*/ ISETP.GE.U32.AND P0, PT, R4, R5, PT ; /* 0x000000050400720c */
/* 0x000fda0003f06070 */
/*0230*/ @P0 IADD3 R3, R3, 0x1, RZ ; /* 0x0000000103030810 */
/* 0x000fc80007ffe0ff */
/*0240*/ @!P1 IADD3 R3, -R3, RZ, RZ ; /* 0x000000ff03039210 */
/* 0x000fe40007ffe1ff */
/*0250*/ @!P2 LOP3.LUT R3, RZ, c[0x0][0x178], RZ, 0x33, !PT ; /* 0x00005e00ff03aa12 */
/* 0x000fca00078e33ff */
/*0260*/ IMAD R4, R3, 0x2, R0 ; /* 0x0000000203047824 */
/* 0x000fe200078e0200 */
/*0270*/ MOV R3, 0x8 ; /* 0x0000000800037802 */
/* 0x000fc80000000f00 */
/*0280*/ IADD3 R0, R4.reuse, 0x1, RZ ; /* 0x0000000104007810 */
/* 0x040fe20007ffe0ff */
/*0290*/ IMAD.WIDE R4, R4, R3, c[0x0][0x160] ; /* 0x0000580004047625 */
/* 0x000fc600078e0203 */
/*02a0*/ IADD3 R10, R0.reuse, -UR4, RZ ; /* 0x80000004000a7c10 */
/* 0x040fe4000fffe0ff */
/*02b0*/ LDG.E.64 R6, [R4.64+0x10] ; /* 0x0000100604067981 */
/* 0x000ea2000c1e1b00 */
/*02c0*/ IADD3 R12, R0, UR4, RZ ; /* 0x00000004000c7c10 */
/* 0x000fe4000fffe0ff */
/*02d0*/ IMAD.WIDE R10, R10, R3.reuse, c[0x0][0x160] ; /* 0x000058000a0a7625 */
/* 0x080fe200078e0203 */
/*02e0*/ LDG.E.64 R8, [R4.64] ; /* 0x0000000604087981 */
/* 0x000ea6000c1e1b00 */
/*02f0*/ IMAD.WIDE R12, R12, R3.reuse, c[0x0][0x160] ; /* 0x000058000c0c7625 */
/* 0x080fe400078e0203 */
/*0300*/ LDG.E.64 R10, [R10.64] ; /* 0x000000060a0a7981 */
/* 0x000ee4000c1e1b00 */
/*0310*/ IMAD.WIDE R14, R0, R3, c[0x0][0x170] ; /* 0x00005c00000e7625 */
/* 0x000fc400078e0203 */
/*0320*/ LDG.E.64 R12, [R12.64] ; /* 0x000000060c0c7981 */
/* 0x000f28000c1e1b00 */
/*0330*/ LDG.E.64 R14, [R14.64] ; /* 0x000000060e0e7981 */
/* 0x000f62000c1e1b00 */
/*0340*/ IMAD.WIDE R2, R0, R3, c[0x0][0x168] ; /* 0x00005a0000027625 */
/* 0x000fe200078e0203 */
/*0350*/ DADD R6, R6, R8 ; /* 0x0000000006067229 */
/* 0x004ecc0000000008 */
/*0360*/ DADD R6, R6, R10 ; /* 0x0000000006067229 */
/* 0x008f0c000000000a */
/*0370*/ DADD R6, R6, R12 ; /* 0x0000000006067229 */
/* 0x010f4c000000000c */
/*0380*/ DFMA R6, R14, c[0x0][0x180], R6 ; /* 0x000060000e067a2b */
/* 0x020e0c0000000006 */
/*0390*/ DMUL R6, R6, 0.25 ; /* 0x3fd0000006067828 */
/* 0x001e0e0000000000 */
/*03a0*/ STG.E.64 [R2.64], R6 ; /* 0x0000000602007986 */
/* 0x001fe2000c101b06 */
/*03b0*/ EXIT ; /* 0x000000000000794d */
/* 0x000fea0003800000 */
/*03c0*/ BRA 0x3c0; /* 0xfffffff000007947 */
/* 0x000fc0000383ffff */
/*03d0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03e0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*03f0*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0400*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0410*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0420*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0430*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0440*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0450*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0460*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
/*0470*/ NOP; /* 0x0000000000007918 */
/* 0x000fc00000000000 */
.......... |
You are a helpful coding assistant specialized in converting CUDA source code to CUDA host assembly. | #include <stdio.h>
__global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index < N*N){
int M = N+2;
int i = index + M + 1 + 2 * (index / N);
unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]);
}
} | .file "tmpxft_0014925a_00000000-6_jacobi.cudafe1.cpp"
.text
#APP
#NO_APP
.type _ZL26__cudaUnregisterBinaryUtilv, @function
_ZL26__cudaUnregisterBinaryUtilv:
.LFB2059:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaUnregisterFatBinary@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2059:
.size _ZL26__cudaUnregisterBinaryUtilv, .-_ZL26__cudaUnregisterBinaryUtilv
.globl _Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id
.type _Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id, @function
_Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id:
.LFB2081:
.cfi_startproc
endbr64
subq $168, %rsp
.cfi_def_cfa_offset 176
movq %rdi, 40(%rsp)
movq %rsi, 32(%rsp)
movq %rdx, 24(%rsp)
movl %ecx, 20(%rsp)
movsd %xmm0, 8(%rsp)
movq %fs:40, %rax
movq %rax, 152(%rsp)
xorl %eax, %eax
leaq 40(%rsp), %rax
movq %rax, 112(%rsp)
leaq 32(%rsp), %rax
movq %rax, 120(%rsp)
leaq 24(%rsp), %rax
movq %rax, 128(%rsp)
leaq 20(%rsp), %rax
movq %rax, 136(%rsp)
leaq 8(%rsp), %rax
movq %rax, 144(%rsp)
movl $1, 64(%rsp)
movl $1, 68(%rsp)
movl $1, 72(%rsp)
movl $1, 76(%rsp)
movl $1, 80(%rsp)
movl $1, 84(%rsp)
leaq 56(%rsp), %rcx
leaq 48(%rsp), %rdx
leaq 76(%rsp), %rsi
leaq 64(%rsp), %rdi
call __cudaPopCallConfiguration@PLT
testl %eax, %eax
je .L7
.L3:
movq 152(%rsp), %rax
subq %fs:40, %rax
jne .L8
addq $168, %rsp
.cfi_remember_state
.cfi_def_cfa_offset 8
ret
.L7:
.cfi_restore_state
pushq 56(%rsp)
.cfi_def_cfa_offset 184
pushq 56(%rsp)
.cfi_def_cfa_offset 192
leaq 128(%rsp), %r9
movq 92(%rsp), %rcx
movl 100(%rsp), %r8d
movq 80(%rsp), %rsi
movl 88(%rsp), %edx
leaq _Z6jacobiPdS_S_id(%rip), %rdi
call cudaLaunchKernel@PLT
addq $16, %rsp
.cfi_def_cfa_offset 176
jmp .L3
.L8:
call __stack_chk_fail@PLT
.cfi_endproc
.LFE2081:
.size _Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id, .-_Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id
.globl _Z6jacobiPdS_S_id
.type _Z6jacobiPdS_S_id, @function
_Z6jacobiPdS_S_id:
.LFB2082:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
call _Z31__device_stub__Z6jacobiPdS_S_idPdS_S_id
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2082:
.size _Z6jacobiPdS_S_id, .-_Z6jacobiPdS_S_id
.section .rodata.str1.1,"aMS",@progbits,1
.LC0:
.string "_Z6jacobiPdS_S_id"
.text
.type _ZL24__sti____cudaRegisterAllv, @function
_ZL24__sti____cudaRegisterAllv:
.LFB2084:
.cfi_startproc
endbr64
subq $8, %rsp
.cfi_def_cfa_offset 16
leaq _ZL15__fatDeviceText(%rip), %rdi
call __cudaRegisterFatBinary@PLT
movq %rax, %rdi
movq %rax, _ZL20__cudaFatCubinHandle(%rip)
pushq $0
.cfi_def_cfa_offset 24
pushq $0
.cfi_def_cfa_offset 32
pushq $0
.cfi_def_cfa_offset 40
pushq $0
.cfi_def_cfa_offset 48
movl $0, %r9d
movl $-1, %r8d
leaq .LC0(%rip), %rdx
movq %rdx, %rcx
leaq _Z6jacobiPdS_S_id(%rip), %rsi
call __cudaRegisterFunction@PLT
addq $32, %rsp
.cfi_def_cfa_offset 16
movq _ZL20__cudaFatCubinHandle(%rip), %rdi
call __cudaRegisterFatBinaryEnd@PLT
leaq _ZL26__cudaUnregisterBinaryUtilv(%rip), %rdi
call atexit@PLT
addq $8, %rsp
.cfi_def_cfa_offset 8
ret
.cfi_endproc
.LFE2084:
.size _ZL24__sti____cudaRegisterAllv, .-_ZL24__sti____cudaRegisterAllv
.section .init_array,"aw"
.align 8
.quad _ZL24__sti____cudaRegisterAllv
.section .nvFatBinSegment,"aw"
.align 8
.type _ZL15__fatDeviceText, @object
.size _ZL15__fatDeviceText, 24
_ZL15__fatDeviceText:
.long 1180844977
.long 1
.quad fatbinData
.quad 0
.local _ZL20__cudaFatCubinHandle
.comm _ZL20__cudaFatCubinHandle,8,8
.ident "GCC: (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0"
.section .note.GNU-stack,"",@progbits
.section .note.gnu.property,"a"
.align 8
.long 1f - 0f
.long 4f - 1f
.long 5
0:
.string "GNU"
1:
.align 8
.long 0xc0000002
.long 3f - 2f
2:
.long 0x3
3:
.align 8
4: |
You are a helpful coding assistant specialized in converting CUDA source code to HIP/AMD source code. | #include <stdio.h>
__global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index < N*N){
int M = N+2;
int i = index + M + 1 + 2 * (index / N);
unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]);
}
} | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index < N*N){
int M = N+2;
int i = index + M + 1 + 2 * (index / N);
unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]);
}
} |
You are a helpful coding assistant specialized in converting HIP source code to AMD device assembly. | #include <hip/hip_runtime.h>
#include <stdio.h>
__global__ void jacobi(double * uold, double * unew, double * f, int N, double lambda2){
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index < N*N){
int M = N+2;
int i = index + M + 1 + 2 * (index / N);
unew[i] = 0.25 * (uold[i-1] + uold[i+1] + uold[i-M] + uold[i+M] + lambda2*f[i]);
}
} | .text
.amdgcn_target "amdgcn-amd-amdhsa--gfx1100"
.protected _Z6jacobiPdS_S_id
.globl _Z6jacobiPdS_S_id
.p2align 8
.type _Z6jacobiPdS_S_id,@function
_Z6jacobiPdS_S_id:
s_clause 0x2
s_load_b32 s3, s[0:1], 0x28
s_load_b32 s4, s[0:1], 0x34
s_load_b32 s2, s[0:1], 0x18
v_bfe_u32 v1, v0, 10, 10
s_waitcnt lgkmcnt(0)
s_mul_i32 s3, s3, s15
s_lshr_b32 s5, s4, 16
s_add_i32 s3, s3, s14
s_delay_alu instid0(SALU_CYCLE_1)
v_mad_u64_u32 v[2:3], null, s3, s5, v[1:2]
v_and_b32_e32 v3, 0x3ff, v0
s_and_b32 s3, s4, 0xffff
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_mad_u64_u32 v[0:1], null, v2, s3, v[3:4]
s_mul_i32 s3, s2, s2
s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
v_cmp_gt_i32_e32 vcc_lo, s3, v0
s_and_saveexec_b32 s3, vcc_lo
s_cbranch_execz .LBB0_2
s_ashr_i32 s3, s2, 31
v_ashrrev_i32_e32 v3, 31, v0
s_add_i32 s4, s2, s3
s_add_i32 s8, s2, 2
s_xor_b32 s4, s4, s3
s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(VALU_DEP_3)
v_cvt_f32_u32_e32 v1, s4
s_sub_i32 s5, 0, s4
v_add_nc_u32_e32 v4, v0, v3
v_add_nc_u32_e32 v0, s8, v0
v_rcp_iflag_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_4) | instid1(VALU_DEP_1)
v_xor_b32_e32 v4, v4, v3
v_xor_b32_e32 v3, s3, v3
s_load_b64 s[2:3], s[0:1], 0x10
s_waitcnt_depctr 0xfff
v_mul_f32_e32 v1, 0x4f7ffffe, v1
v_cvt_u32_f32_e32 v1, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, s5, v1
v_mul_hi_u32 v2, v1, v2
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_add_nc_u32_e32 v1, v1, v2
v_mul_hi_u32 v1, v4, v1
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_mul_lo_u32 v2, v1, s4
v_sub_nc_u32_e32 v2, v4, v2
v_add_nc_u32_e32 v4, 1, v1
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_subrev_nc_u32_e32 v5, s4, v2
v_cmp_le_u32_e32 vcc_lo, s4, v2
v_dual_cndmask_b32 v2, v2, v5 :: v_dual_cndmask_b32 v1, v1, v4
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
v_cmp_le_u32_e32 vcc_lo, s4, v2
v_add_nc_u32_e32 v4, 1, v1
s_clause 0x1
s_load_b128 s[4:7], s[0:1], 0x0
s_load_b64 s[0:1], s[0:1], 0x20
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_cndmask_b32_e32 v1, v1, v4, vcc_lo
v_xor_b32_e32 v1, v1, v3
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_sub_nc_u32_e32 v1, v1, v3
v_lshl_add_u32 v0, v1, 1, v0
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
v_add_nc_u32_e32 v2, 1, v0
v_ashrrev_i32_e32 v1, 31, v0
v_ashrrev_i32_e32 v3, 31, v2
s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_4)
v_lshlrev_b64 v[0:1], 3, v[0:1]
v_subrev_nc_u32_e32 v7, s8, v2
v_add_nc_u32_e32 v9, s8, v2
v_lshlrev_b64 v[3:4], 3, v[2:3]
s_waitcnt lgkmcnt(0)
s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_3)
v_add_co_u32 v0, vcc_lo, s4, v0
v_add_co_ci_u32_e32 v1, vcc_lo, s5, v1, vcc_lo
v_add_co_u32 v5, vcc_lo, s4, v3
s_delay_alu instid0(VALU_DEP_4)
v_add_co_ci_u32_e32 v6, vcc_lo, s5, v4, vcc_lo
s_clause 0x1
global_load_b64 v[0:1], v[0:1], off
global_load_b64 v[5:6], v[5:6], off offset:8
v_ashrrev_i32_e32 v8, 31, v7
v_ashrrev_i32_e32 v10, 31, v9
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
v_lshlrev_b64 v[7:8], 3, v[7:8]
v_lshlrev_b64 v[9:10], 3, v[9:10]
s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
v_add_co_u32 v7, vcc_lo, s4, v7
v_add_co_ci_u32_e32 v8, vcc_lo, s5, v8, vcc_lo
s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_4)
v_add_co_u32 v9, vcc_lo, s4, v9
v_add_co_ci_u32_e32 v10, vcc_lo, s5, v10, vcc_lo
global_load_b64 v[7:8], v[7:8], off
v_add_co_u32 v11, vcc_lo, s2, v3
global_load_b64 v[9:10], v[9:10], off
v_add_co_ci_u32_e32 v12, vcc_lo, s3, v4, vcc_lo
v_add_co_u32 v2, vcc_lo, s6, v3
v_add_co_ci_u32_e32 v3, vcc_lo, s7, v4, vcc_lo
global_load_b64 v[11:12], v[11:12], off
s_waitcnt vmcnt(3)
v_add_f64 v[0:1], v[0:1], v[5:6]
s_waitcnt vmcnt(2)
s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
v_add_f64 v[0:1], v[0:1], v[7:8]
s_waitcnt vmcnt(1)
v_add_f64 v[0:1], v[0:1], v[9:10]
s_waitcnt vmcnt(0)
s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
v_fma_f64 v[0:1], v[11:12], s[0:1], v[0:1]
v_ldexp_f64 v[0:1], v[0:1], -2
global_store_b64 v[2:3], v[0:1], off
.LBB0_2:
s_nop 0
s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
s_endpgm
.section .rodata,"a",@progbits
.p2align 6, 0x0
.amdhsa_kernel _Z6jacobiPdS_S_id
.amdhsa_group_segment_fixed_size 0
.amdhsa_private_segment_fixed_size 0
.amdhsa_kernarg_size 296
.amdhsa_user_sgpr_count 14
.amdhsa_user_sgpr_dispatch_ptr 0
.amdhsa_user_sgpr_queue_ptr 0
.amdhsa_user_sgpr_kernarg_segment_ptr 1
.amdhsa_user_sgpr_dispatch_id 0
.amdhsa_user_sgpr_private_segment_size 0
.amdhsa_wavefront_size32 1
.amdhsa_uses_dynamic_stack 0
.amdhsa_enable_private_segment 0
.amdhsa_system_sgpr_workgroup_id_x 1
.amdhsa_system_sgpr_workgroup_id_y 1
.amdhsa_system_sgpr_workgroup_id_z 0
.amdhsa_system_sgpr_workgroup_info 0
.amdhsa_system_vgpr_workitem_id 1
.amdhsa_next_free_vgpr 13
.amdhsa_next_free_sgpr 16
.amdhsa_float_round_mode_32 0
.amdhsa_float_round_mode_16_64 0
.amdhsa_float_denorm_mode_32 3
.amdhsa_float_denorm_mode_16_64 3
.amdhsa_dx10_clamp 1
.amdhsa_ieee_mode 1
.amdhsa_fp16_overflow 0
.amdhsa_workgroup_processor_mode 1
.amdhsa_memory_ordered 1
.amdhsa_forward_progress 0
.amdhsa_shared_vgpr_count 0
.amdhsa_exception_fp_ieee_invalid_op 0
.amdhsa_exception_fp_denorm_src 0
.amdhsa_exception_fp_ieee_div_zero 0
.amdhsa_exception_fp_ieee_overflow 0
.amdhsa_exception_fp_ieee_underflow 0
.amdhsa_exception_fp_ieee_inexact 0
.amdhsa_exception_int_div_zero 0
.end_amdhsa_kernel
.text
.Lfunc_end0:
.size _Z6jacobiPdS_S_id, .Lfunc_end0-_Z6jacobiPdS_S_id
.section .AMDGPU.csdata,"",@progbits
.text
.p2alignl 7, 3214868480
.fill 96, 4, 3214868480
.type __hip_cuid_,@object
.section .bss,"aw",@nobits
.globl __hip_cuid_
__hip_cuid_:
.byte 0
.size __hip_cuid_, 1
.ident "AMD clang version 18.0.0git (https://github.com/RadeonOpenCompute/llvm-project roc-6.3.2 25012 e5bf7e55c91490b07c49d8960fa7983d864936c4)"
.section ".note.GNU-stack","",@progbits
.addrsig
.addrsig_sym __hip_cuid_
.amdgpu_metadata
---
amdhsa.kernels:
- .args:
- .address_space: global
.offset: 0
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 8
.size: 8
.value_kind: global_buffer
- .address_space: global
.offset: 16
.size: 8
.value_kind: global_buffer
- .offset: 24
.size: 4
.value_kind: by_value
- .offset: 32
.size: 8
.value_kind: by_value
- .offset: 40
.size: 4
.value_kind: hidden_block_count_x
- .offset: 44
.size: 4
.value_kind: hidden_block_count_y
- .offset: 48
.size: 4
.value_kind: hidden_block_count_z
- .offset: 52
.size: 2
.value_kind: hidden_group_size_x
- .offset: 54
.size: 2
.value_kind: hidden_group_size_y
- .offset: 56
.size: 2
.value_kind: hidden_group_size_z
- .offset: 58
.size: 2
.value_kind: hidden_remainder_x
- .offset: 60
.size: 2
.value_kind: hidden_remainder_y
- .offset: 62
.size: 2
.value_kind: hidden_remainder_z
- .offset: 80
.size: 8
.value_kind: hidden_global_offset_x
- .offset: 88
.size: 8
.value_kind: hidden_global_offset_y
- .offset: 96
.size: 8
.value_kind: hidden_global_offset_z
- .offset: 104
.size: 2
.value_kind: hidden_grid_dims
.group_segment_fixed_size: 0
.kernarg_segment_align: 8
.kernarg_segment_size: 296
.language: OpenCL C
.language_version:
- 2
- 0
.max_flat_workgroup_size: 1024
.name: _Z6jacobiPdS_S_id
.private_segment_fixed_size: 0
.sgpr_count: 18
.sgpr_spill_count: 0
.symbol: _Z6jacobiPdS_S_id.kd
.uniform_work_group_size: 1
.uses_dynamic_stack: false
.vgpr_count: 13
.vgpr_spill_count: 0
.wavefront_size: 32
.workgroup_processor_mode: 1
amdhsa.target: amdgcn-amd-amdhsa--gfx1100
amdhsa.version:
- 1
- 2
...
.end_amdgpu_metadata |
Subsets and Splits