|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py |
| 2 | +; RUN: llc -global-isel -amdgpu-fixed-function-abi -stop-after=irtranslator -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope %s |
| 3 | + |
| 4 | +define amdgpu_kernel void @test_indirect_call_sgpr_ptr(void()* %fptr) { |
| 5 | + ; CHECK-LABEL: name: test_indirect_call_sgpr_ptr |
| 6 | + ; CHECK: bb.1 (%ir-block.0): |
| 7 | + ; CHECK: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr0, $vgpr1, $vgpr2, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11 |
| 8 | + ; CHECK: [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr2 |
| 9 | + ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr1 |
| 10 | + ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr0 |
| 11 | + ; CHECK: [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr16 |
| 12 | + ; CHECK: [[COPY4:%[0-9]+]]:sgpr_32 = COPY $sgpr15 |
| 13 | + ; CHECK: [[COPY5:%[0-9]+]]:sgpr_32 = COPY $sgpr14 |
| 14 | + ; CHECK: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11 |
| 15 | + ; CHECK: [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7 |
| 16 | + ; CHECK: [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5 |
| 17 | + ; CHECK: [[COPY9:%[0-9]+]]:_(p4) = COPY $sgpr8_sgpr9 |
| 18 | + ; CHECK: [[INT:%[0-9]+]]:_(p4) = G_INTRINSIC intrinsic(@llvm.amdgcn.kernarg.segment.ptr) |
| 19 | + ; CHECK: [[LOAD:%[0-9]+]]:sreg_64(p0) = G_LOAD [[INT]](p4) :: (dereferenceable invariant load 8 from %ir.fptr.kernarg.offset.cast, align 16, addrspace 4) |
| 20 | + ; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $scc |
| 21 | + ; CHECK: [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY8]] |
| 22 | + ; CHECK: [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]] |
| 23 | + ; CHECK: [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY9]](p4) |
| 24 | + ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8 |
| 25 | + ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p4) = G_PTR_ADD [[COPY12]], [[C]](s64) |
| 26 | + ; CHECK: [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY6]] |
| 27 | + ; CHECK: [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY5]] |
| 28 | + ; CHECK: [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY4]] |
| 29 | + ; CHECK: [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]] |
| 30 | + ; CHECK: [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32) |
| 31 | + ; CHECK: [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32) |
| 32 | + ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 10 |
| 33 | + ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY18]], [[C1]](s32) |
| 34 | + ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY17]], [[SHL]] |
| 35 | + ; CHECK: [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32) |
| 36 | + ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 20 |
| 37 | + ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY19]], [[C2]](s32) |
| 38 | + ; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[OR]], [[SHL1]] |
| 39 | + ; CHECK: [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $private_rsrc_reg |
| 40 | + ; CHECK: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>) |
| 41 | + ; CHECK: $sgpr4_sgpr5 = COPY [[COPY10]](p4) |
| 42 | + ; CHECK: $sgpr6_sgpr7 = COPY [[COPY11]](p4) |
| 43 | + ; CHECK: $sgpr8_sgpr9 = COPY [[PTR_ADD]](p4) |
| 44 | + ; CHECK: $sgpr10_sgpr11 = COPY [[COPY13]](s64) |
| 45 | + ; CHECK: $sgpr12 = COPY [[COPY14]](s32) |
| 46 | + ; CHECK: $sgpr13 = COPY [[COPY15]](s32) |
| 47 | + ; CHECK: $sgpr14 = COPY [[COPY16]](s32) |
| 48 | + ; CHECK: $vgpr31 = COPY [[OR1]](s32) |
| 49 | + ; CHECK: $sgpr30_sgpr31 = SI_CALL [[LOAD]](p0), 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31 |
| 50 | + ; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $scc |
| 51 | + ; CHECK: S_ENDPGM 0 |
| 52 | + call void %fptr() |
| 53 | + ret void |
| 54 | +} |
0 commit comments