|
1 | 1 | ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
2 |
| -; RUN: llc -global-isel -mtriple=amdgcn -mcpu=fiji -O0 -stop-after=irtranslator -o - %s | FileCheck %s |
| 2 | +; RUN: llc -global-isel -mtriple=amdgcn -mcpu=gfx940 -O0 -stop-after=irtranslator -o - %s | FileCheck %s |
3 | 3 |
|
4 | 4 | define float @test_atomicrmw_fadd(ptr addrspace(3) %addr) {
|
5 | 5 | ; CHECK-LABEL: name: test_atomicrmw_fadd
|
@@ -34,20 +34,172 @@ define float @test_atomicrmw_fsub(ptr addrspace(3) %addr) {
|
34 | 34 | ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[LOAD]](s32), %bb.1, %13(s32), %bb.2
|
35 | 35 | ; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[PHI1]], [[C]]
|
36 | 36 | ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[PHI1]], [[FSUB]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3)
|
37 |
| - ; CHECK-NEXT: [[INTRINSIC:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
38 |
| - ; CHECK-NEXT: [[INTRINSIC_W_SIDE_EFFECTS:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INTRINSIC]](s64) |
39 |
| - ; CHECK-NEXT: G_BRCOND [[INTRINSIC_W_SIDE_EFFECTS]](s1), %bb.3 |
| 37 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
| 38 | + ; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64) |
| 39 | + ; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3 |
40 | 40 | ; CHECK-NEXT: G_BR %bb.2
|
41 | 41 | ; CHECK-NEXT: {{ $}}
|
42 | 42 | ; CHECK-NEXT: bb.3.atomicrmw.end:
|
43 | 43 | ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32), %bb.2
|
44 |
| - ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INTRINSIC]](s64), %bb.2 |
| 44 | + ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2 |
45 | 45 | ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
|
46 | 46 | ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](s32)
|
47 | 47 | ; CHECK-NEXT: SI_RETURN implicit $vgpr0
|
48 | 48 | %oldval = atomicrmw fsub ptr addrspace(3) %addr, float 1.0 seq_cst
|
49 | 49 | ret float %oldval
|
50 | 50 | }
|
51 | 51 |
|
| 52 | +define <2 x half> @test_atomicrmw_fadd_vector(ptr addrspace(3) %addr) { |
| 53 | + ; CHECK-LABEL: name: test_atomicrmw_fadd_vector |
| 54 | + ; CHECK: bb.1 (%ir-block.0): |
| 55 | + ; CHECK-NEXT: successors: %bb.2(0x80000000) |
| 56 | + ; CHECK-NEXT: liveins: $vgpr0 |
| 57 | + ; CHECK-NEXT: {{ $}} |
| 58 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 |
| 59 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 |
| 60 | + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16) |
| 61 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 |
| 62 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3) |
| 63 | + ; CHECK-NEXT: G_BR %bb.2 |
| 64 | + ; CHECK-NEXT: {{ $}} |
| 65 | + ; CHECK-NEXT: bb.2.atomicrmw.start: |
| 66 | + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| 67 | + ; CHECK-NEXT: {{ $}} |
| 68 | + ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1 |
| 69 | + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2 |
| 70 | + ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(<2 x s16>) = G_FADD [[PHI1]], [[BUILD_VECTOR]] |
| 71 | + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FADD]](<2 x s16>) |
| 72 | + ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>) |
| 73 | + ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3) |
| 74 | + ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32) |
| 75 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
| 76 | + ; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64) |
| 77 | + ; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3 |
| 78 | + ; CHECK-NEXT: G_BR %bb.2 |
| 79 | + ; CHECK-NEXT: {{ $}} |
| 80 | + ; CHECK-NEXT: bb.3.atomicrmw.end: |
| 81 | + ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2 |
| 82 | + ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2 |
| 83 | + ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64) |
| 84 | + ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>) |
| 85 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 86 | + %oldval = atomicrmw fadd ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst |
| 87 | + ret <2 x half> %oldval |
| 88 | +} |
| 89 | + |
| 90 | +define <2 x half> @test_atomicrmw_fsub_vector(ptr addrspace(3) %addr) { |
| 91 | + ; CHECK-LABEL: name: test_atomicrmw_fsub_vector |
| 92 | + ; CHECK: bb.1 (%ir-block.0): |
| 93 | + ; CHECK-NEXT: successors: %bb.2(0x80000000) |
| 94 | + ; CHECK-NEXT: liveins: $vgpr0 |
| 95 | + ; CHECK-NEXT: {{ $}} |
| 96 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 |
| 97 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 |
| 98 | + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16) |
| 99 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 |
| 100 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3) |
| 101 | + ; CHECK-NEXT: G_BR %bb.2 |
| 102 | + ; CHECK-NEXT: {{ $}} |
| 103 | + ; CHECK-NEXT: bb.2.atomicrmw.start: |
| 104 | + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| 105 | + ; CHECK-NEXT: {{ $}} |
| 106 | + ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1 |
| 107 | + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2 |
| 108 | + ; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(<2 x s16>) = G_FSUB [[PHI1]], [[BUILD_VECTOR]] |
| 109 | + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FSUB]](<2 x s16>) |
| 110 | + ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>) |
| 111 | + ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3) |
| 112 | + ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32) |
| 113 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
| 114 | + ; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64) |
| 115 | + ; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3 |
| 116 | + ; CHECK-NEXT: G_BR %bb.2 |
| 117 | + ; CHECK-NEXT: {{ $}} |
| 118 | + ; CHECK-NEXT: bb.3.atomicrmw.end: |
| 119 | + ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2 |
| 120 | + ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2 |
| 121 | + ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64) |
| 122 | + ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>) |
| 123 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 124 | + %oldval = atomicrmw fsub ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst |
| 125 | + ret <2 x half> %oldval |
| 126 | +} |
| 127 | + |
| 128 | +define <2 x half> @test_atomicrmw_fmin_vector(ptr addrspace(3) %addr) { |
| 129 | + ; CHECK-LABEL: name: test_atomicrmw_fmin_vector |
| 130 | + ; CHECK: bb.1 (%ir-block.0): |
| 131 | + ; CHECK-NEXT: successors: %bb.2(0x80000000) |
| 132 | + ; CHECK-NEXT: liveins: $vgpr0 |
| 133 | + ; CHECK-NEXT: {{ $}} |
| 134 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 |
| 135 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 |
| 136 | + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16) |
| 137 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 |
| 138 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3) |
| 139 | + ; CHECK-NEXT: G_BR %bb.2 |
| 140 | + ; CHECK-NEXT: {{ $}} |
| 141 | + ; CHECK-NEXT: bb.2.atomicrmw.start: |
| 142 | + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| 143 | + ; CHECK-NEXT: {{ $}} |
| 144 | + ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1 |
| 145 | + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2 |
| 146 | + ; CHECK-NEXT: [[FMINNUM:%[0-9]+]]:_(<2 x s16>) = G_FMINNUM [[PHI1]], [[BUILD_VECTOR]] |
| 147 | + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FMINNUM]](<2 x s16>) |
| 148 | + ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>) |
| 149 | + ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3) |
| 150 | + ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32) |
| 151 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
| 152 | + ; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64) |
| 153 | + ; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3 |
| 154 | + ; CHECK-NEXT: G_BR %bb.2 |
| 155 | + ; CHECK-NEXT: {{ $}} |
| 156 | + ; CHECK-NEXT: bb.3.atomicrmw.end: |
| 157 | + ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2 |
| 158 | + ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2 |
| 159 | + ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64) |
| 160 | + ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>) |
| 161 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 162 | + %oldval = atomicrmw fmin ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst |
| 163 | + ret <2 x half> %oldval |
| 164 | +} |
| 165 | + |
| 166 | +define <2 x half> @test_atomicrmw_fmax_vector(ptr addrspace(3) %addr) { |
| 167 | + ; CHECK-LABEL: name: test_atomicrmw_fmax_vector |
| 168 | + ; CHECK: bb.1 (%ir-block.0): |
| 169 | + ; CHECK-NEXT: successors: %bb.2(0x80000000) |
| 170 | + ; CHECK-NEXT: liveins: $vgpr0 |
| 171 | + ; CHECK-NEXT: {{ $}} |
| 172 | + ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0 |
| 173 | + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH3C00 |
| 174 | + ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[C]](s16), [[C]](s16) |
| 175 | + ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 |
| 176 | + ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[COPY]](p3) :: (load (<2 x s16>) from %ir.addr, addrspace 3) |
| 177 | + ; CHECK-NEXT: G_BR %bb.2 |
| 178 | + ; CHECK-NEXT: {{ $}} |
| 179 | + ; CHECK-NEXT: bb.2.atomicrmw.start: |
| 180 | + ; CHECK-NEXT: successors: %bb.3(0x40000000), %bb.2(0x40000000) |
| 181 | + ; CHECK-NEXT: {{ $}} |
| 182 | + ; CHECK-NEXT: [[PHI:%[0-9]+]]:_(s64) = G_PHI %19(s64), %bb.2, [[C1]](s64), %bb.1 |
| 183 | + ; CHECK-NEXT: [[PHI1:%[0-9]+]]:_(<2 x s16>) = G_PHI [[LOAD]](<2 x s16>), %bb.1, %18(<2 x s16>), %bb.2 |
| 184 | + ; CHECK-NEXT: [[FMAXNUM:%[0-9]+]]:_(<2 x s16>) = G_FMAXNUM [[PHI1]], [[BUILD_VECTOR]] |
| 185 | + ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[FMAXNUM]](<2 x s16>) |
| 186 | + ; CHECK-NEXT: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[PHI1]](<2 x s16>) |
| 187 | + ; CHECK-NEXT: [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[BITCAST1]], [[BITCAST]] :: (load store seq_cst seq_cst (s32) on %ir.addr, addrspace 3) |
| 188 | + ; CHECK-NEXT: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[ATOMIC_CMPXCHG_WITH_SUCCESS]](s32) |
| 189 | + ; CHECK-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64) |
| 190 | + ; CHECK-NEXT: [[INT1:%[0-9]+]]:_(s1) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.loop), [[INT]](s64) |
| 191 | + ; CHECK-NEXT: G_BRCOND [[INT1]](s1), %bb.3 |
| 192 | + ; CHECK-NEXT: G_BR %bb.2 |
| 193 | + ; CHECK-NEXT: {{ $}} |
| 194 | + ; CHECK-NEXT: bb.3.atomicrmw.end: |
| 195 | + ; CHECK-NEXT: [[PHI2:%[0-9]+]]:_(<2 x s16>) = G_PHI [[BITCAST2]](<2 x s16>), %bb.2 |
| 196 | + ; CHECK-NEXT: [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2 |
| 197 | + ; CHECK-NEXT: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64) |
| 198 | + ; CHECK-NEXT: $vgpr0 = COPY [[PHI2]](<2 x s16>) |
| 199 | + ; CHECK-NEXT: SI_RETURN implicit $vgpr0 |
| 200 | + %oldval = atomicrmw fmax ptr addrspace(3) %addr, <2 x half> <half 1.0, half 1.0> seq_cst |
| 201 | + ret <2 x half> %oldval |
| 202 | +} |
| 203 | + |
52 | 204 | !llvm.module.flags = !{!0}
|
53 | 205 | !0 = !{i32 1, !"amdhsa_code_object_version", i32 500}
|
0 commit comments