Skip to content

Commit ff53d50

Browse files
authored
[RISCV] Improve legalization of e8 m8 VL>256 shuffles (#79330)
If we can't produce a large enough index vector in i8, we may need to legalize the shuffle (via scalarization - which in turn gets lowered into stack usage). This change makes two related changes: * Deferring legalization until we actually need to generate the vrgather instruction. With the new recursive structure, this only happens when doing the fallback for one of the arms. * Check the actual mask values for something outside of the representable range. Both are covered by recently added tests.
1 parent 5e3ae4c commit ff53d50

File tree

2 files changed

+84
-123
lines changed

2 files changed

+84
-123
lines changed

llvm/lib/Target/RISCV/RISCVISelLowering.cpp

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4971,8 +4971,9 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
49714971
if (SDValue V = lowerVECTOR_SHUFFLEAsRotate(SVN, DAG, Subtarget))
49724972
return V;
49734973

4974-
if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
4975-
// On such a large vector we're unable to use i8 as the index type.
4974+
if (VT.getScalarSizeInBits() == 8 &&
4975+
any_of(Mask, [&](const auto &Idx) { return Idx > 255; })) {
4976+
// On such a vector we're unable to use i8 as the index type.
49764977
// FIXME: We could promote the index to i16 and use vrgatherei16, but that
49774978
// may involve vector splitting if we're already at LMUL=8, or our
49784979
// user-supplied maximum fixed-length LMUL.
@@ -5049,14 +5050,6 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
50495050
return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
50505051
}
50515052

5052-
if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
5053-
// On such a large vector we're unable to use i8 as the index type.
5054-
// FIXME: We could promote the index to i16 and use vrgatherei16, but that
5055-
// may involve vector splitting if we're already at LMUL=8, or our
5056-
// user-supplied maximum fixed-length LMUL.
5057-
return SDValue();
5058-
}
5059-
50605053
// As a backup, shuffles can be lowered via a vrgather instruction, possibly
50615054
// merged with a second vrgather.
50625055
SmallVector<int> ShuffleMaskLHS, ShuffleMaskRHS;

llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll

Lines changed: 81 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -60,53 +60,32 @@ define <512 x i8> @single_source(<512 x i8> %a) {
6060
define <512 x i8> @range_restriction(<512 x i8> %a) {
6161
; CHECK-LABEL: range_restriction:
6262
; CHECK: # %bb.0:
63-
; CHECK-NEXT: addi sp, sp, -1536
64-
; CHECK-NEXT: .cfi_def_cfa_offset 1536
65-
; CHECK-NEXT: sd ra, 1528(sp) # 8-byte Folded Spill
66-
; CHECK-NEXT: sd s0, 1520(sp) # 8-byte Folded Spill
67-
; CHECK-NEXT: .cfi_offset ra, -8
68-
; CHECK-NEXT: .cfi_offset s0, -16
69-
; CHECK-NEXT: addi s0, sp, 1536
70-
; CHECK-NEXT: .cfi_def_cfa s0, 0
71-
; CHECK-NEXT: andi sp, sp, -512
72-
; CHECK-NEXT: vmv8r.v v16, v8
7363
; CHECK-NEXT: li a0, 512
74-
; CHECK-NEXT: addi a1, sp, 512
7564
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
76-
; CHECK-NEXT: vse8.v v8, (a1)
77-
; CHECK-NEXT: lbu a0, 766(sp)
78-
; CHECK-NEXT: vmv.x.s a1, v16
79-
; CHECK-NEXT: vmv.v.x v8, a1
80-
; CHECK-NEXT: vslide1down.vx v8, v8, a0
81-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
82-
; CHECK-NEXT: vslidedown.vi v17, v16, 5
83-
; CHECK-NEXT: vmv.x.s a0, v17
84-
; CHECK-NEXT: vmv.s.x v24, a0
85-
; CHECK-NEXT: li a0, 432
86-
; CHECK-NEXT: li a1, 431
87-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
88-
; CHECK-NEXT: vslideup.vx v8, v24, a1
89-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
90-
; CHECK-NEXT: vslidedown.vi v17, v16, 4
91-
; CHECK-NEXT: vmv.x.s a0, v17
92-
; CHECK-NEXT: vmv.s.x v24, a0
93-
; CHECK-NEXT: li a0, 466
94-
; CHECK-NEXT: li a1, 465
95-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
96-
; CHECK-NEXT: vslideup.vx v8, v24, a1
97-
; CHECK-NEXT: li a0, 44
98-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
99-
; CHECK-NEXT: vslidedown.vx v16, v16, a0
100-
; CHECK-NEXT: vmv.x.s a0, v16
101-
; CHECK-NEXT: vmv.s.x v16, a0
102-
; CHECK-NEXT: li a0, 501
103-
; CHECK-NEXT: li a1, 500
104-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
105-
; CHECK-NEXT: vslideup.vx v8, v16, a1
106-
; CHECK-NEXT: addi sp, s0, -1536
107-
; CHECK-NEXT: ld ra, 1528(sp) # 8-byte Folded Reload
108-
; CHECK-NEXT: ld s0, 1520(sp) # 8-byte Folded Reload
109-
; CHECK-NEXT: addi sp, sp, 1536
65+
; CHECK-NEXT: vmv.v.i v16, 0
66+
; CHECK-NEXT: li a1, 254
67+
; CHECK-NEXT: vslide1down.vx v24, v16, a1
68+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
69+
; CHECK-NEXT: vmv.v.i v16, 5
70+
; CHECK-NEXT: li a1, 432
71+
; CHECK-NEXT: li a2, 431
72+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
73+
; CHECK-NEXT: vslideup.vx v24, v16, a2
74+
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
75+
; CHECK-NEXT: vmv.v.i v16, 4
76+
; CHECK-NEXT: li a1, 466
77+
; CHECK-NEXT: li a2, 465
78+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
79+
; CHECK-NEXT: vslideup.vx v24, v16, a2
80+
; CHECK-NEXT: li a1, 44
81+
; CHECK-NEXT: vmv.s.x v16, a1
82+
; CHECK-NEXT: li a1, 501
83+
; CHECK-NEXT: li a2, 500
84+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
85+
; CHECK-NEXT: vslideup.vx v24, v16, a2
86+
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
87+
; CHECK-NEXT: vrgather.vv v16, v8, v24
88+
; CHECK-NEXT: vmv.v.v v8, v16
11089
; CHECK-NEXT: ret
11190
%res = shufflevector <512 x i8> %a, <512 x i8> poison, <512 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 44, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 254>
11291
ret <512 x i8> %res
@@ -116,87 +95,76 @@ define <512 x i8> @range_restriction(<512 x i8> %a) {
11695
define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
11796
; CHECK-LABEL: two_source:
11897
; CHECK: # %bb.0:
119-
; CHECK-NEXT: addi sp, sp, -2032
120-
; CHECK-NEXT: .cfi_def_cfa_offset 2032
121-
; CHECK-NEXT: sd ra, 2024(sp) # 8-byte Folded Spill
122-
; CHECK-NEXT: sd s0, 2016(sp) # 8-byte Folded Spill
98+
; CHECK-NEXT: addi sp, sp, -1536
99+
; CHECK-NEXT: .cfi_def_cfa_offset 1536
100+
; CHECK-NEXT: sd ra, 1528(sp) # 8-byte Folded Spill
101+
; CHECK-NEXT: sd s0, 1520(sp) # 8-byte Folded Spill
123102
; CHECK-NEXT: .cfi_offset ra, -8
124103
; CHECK-NEXT: .cfi_offset s0, -16
125-
; CHECK-NEXT: addi s0, sp, 2032
104+
; CHECK-NEXT: addi s0, sp, 1536
126105
; CHECK-NEXT: .cfi_def_cfa s0, 0
127-
; CHECK-NEXT: addi sp, sp, -16
128106
; CHECK-NEXT: andi sp, sp, -512
129107
; CHECK-NEXT: vmv8r.v v24, v8
130108
; CHECK-NEXT: li a0, 512
131-
; CHECK-NEXT: addi a1, sp, 1024
109+
; CHECK-NEXT: addi a1, sp, 512
132110
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
133111
; CHECK-NEXT: vse8.v v8, (a1)
134-
; CHECK-NEXT: addi a1, sp, 512
135-
; CHECK-NEXT: vse8.v v16, (a1)
136112
; CHECK-NEXT: vmv.x.s a1, v24
137113
; CHECK-NEXT: vmv.v.x v8, a1
138-
; CHECK-NEXT: li a1, 43
139-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
140-
; CHECK-NEXT: vslidedown.vx v17, v16, a1
141-
; CHECK-NEXT: vmv.x.s a1, v17
142-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
143-
; CHECK-NEXT: vslide1down.vx v8, v8, a1
144-
; CHECK-NEXT: li a0, 36
145114
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
146-
; CHECK-NEXT: vslidedown.vx v17, v16, a0
147-
; CHECK-NEXT: vmv.x.s a0, v17
148-
; CHECK-NEXT: vmv.s.x v0, a0
149-
; CHECK-NEXT: li a0, 399
150-
; CHECK-NEXT: li a1, 398
151-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
152-
; CHECK-NEXT: vslideup.vx v8, v0, a1
153-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
154-
; CHECK-NEXT: vslidedown.vi v17, v24, 5
155-
; CHECK-NEXT: vmv.x.s a0, v17
156-
; CHECK-NEXT: vmv.s.x v0, a0
157-
; CHECK-NEXT: li a0, 432
158-
; CHECK-NEXT: li a1, 431
159-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
160-
; CHECK-NEXT: vslideup.vx v8, v0, a1
161-
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
162-
; CHECK-NEXT: vslidedown.vi v17, v24, 4
163-
; CHECK-NEXT: vmv.x.s a0, v17
164-
; CHECK-NEXT: vmv.s.x v24, a0
165-
; CHECK-NEXT: li a0, 466
166-
; CHECK-NEXT: li a1, 465
167-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
168-
; CHECK-NEXT: vslideup.vx v8, v24, a1
169-
; CHECK-NEXT: li a1, 62
115+
; CHECK-NEXT: vslidedown.vi v25, v24, 5
116+
; CHECK-NEXT: vmv.x.s a1, v25
117+
; CHECK-NEXT: vmv.s.x v0, a1
118+
; CHECK-NEXT: li a1, 432
119+
; CHECK-NEXT: li a2, 431
120+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
121+
; CHECK-NEXT: vslideup.vx v8, v0, a2
170122
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
171-
; CHECK-NEXT: vslidedown.vx v16, v16, a1
172-
; CHECK-NEXT: vmv.x.s a1, v16
173-
; CHECK-NEXT: vmv.s.x v16, a1
174-
; CHECK-NEXT: li a1, 467
123+
; CHECK-NEXT: vslidedown.vi v24, v24, 4
124+
; CHECK-NEXT: vmv.x.s a1, v24
125+
; CHECK-NEXT: vmv.s.x v24, a1
126+
; CHECK-NEXT: li a1, 466
127+
; CHECK-NEXT: li a2, 465
175128
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
176-
; CHECK-NEXT: lbu a1, 1497(sp)
177-
; CHECK-NEXT: vslideup.vx v8, v16, a0
178-
; CHECK-NEXT: vmv.s.x v16, a1
179-
; CHECK-NEXT: li a0, 478
180-
; CHECK-NEXT: li a1, 477
181-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
182-
; CHECK-NEXT: lbu a0, 674(sp)
183-
; CHECK-NEXT: vslideup.vx v8, v16, a1
184-
; CHECK-NEXT: vmv.s.x v16, a0
185-
; CHECK-NEXT: li a0, 490
186-
; CHECK-NEXT: li a1, 489
187-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
188-
; CHECK-NEXT: lbu a0, 1524(sp)
189-
; CHECK-NEXT: vslideup.vx v8, v16, a1
190-
; CHECK-NEXT: vmv.s.x v16, a0
191-
; CHECK-NEXT: li a0, 501
192-
; CHECK-NEXT: li a1, 500
193-
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
194-
; CHECK-NEXT: vslideup.vx v8, v16, a1
195-
; CHECK-NEXT: addi sp, s0, -2048
196-
; CHECK-NEXT: addi sp, sp, 16
197-
; CHECK-NEXT: ld ra, 2024(sp) # 8-byte Folded Reload
198-
; CHECK-NEXT: ld s0, 2016(sp) # 8-byte Folded Reload
199-
; CHECK-NEXT: addi sp, sp, 2032
129+
; CHECK-NEXT: lbu a1, 985(sp)
130+
; CHECK-NEXT: vslideup.vx v8, v24, a2
131+
; CHECK-NEXT: vmv.s.x v24, a1
132+
; CHECK-NEXT: li a1, 478
133+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
134+
; CHECK-NEXT: lbu a1, 1012(sp)
135+
; CHECK-NEXT: li a2, 477
136+
; CHECK-NEXT: vslideup.vx v8, v24, a2
137+
; CHECK-NEXT: vmv.s.x v24, a1
138+
; CHECK-NEXT: li a1, 501
139+
; CHECK-NEXT: li a2, 500
140+
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
141+
; CHECK-NEXT: vslideup.vx v8, v24, a2
142+
; CHECK-NEXT: lui a1, 2761
143+
; CHECK-NEXT: slli a1, a1, 25
144+
; CHECK-NEXT: addi a1, a1, 501
145+
; CHECK-NEXT: slli a1, a1, 13
146+
; CHECK-NEXT: addi a1, a1, 512
147+
; CHECK-NEXT: li a2, 64
148+
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
149+
; CHECK-NEXT: vmv.v.x v24, a1
150+
; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
151+
; CHECK-NEXT: vmv.v.i v0, 0
152+
; CHECK-NEXT: lui a1, 1047552
153+
; CHECK-NEXT: addiw a1, a1, 1
154+
; CHECK-NEXT: slli a1, a1, 23
155+
; CHECK-NEXT: addi a1, a1, 1
156+
; CHECK-NEXT: slli a1, a1, 18
157+
; CHECK-NEXT: vslide1down.vx v0, v0, a1
158+
; CHECK-NEXT: lui a1, 4
159+
; CHECK-NEXT: vmv.s.x v1, a1
160+
; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
161+
; CHECK-NEXT: vslideup.vi v0, v1, 6
162+
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
163+
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
164+
; CHECK-NEXT: addi sp, s0, -1536
165+
; CHECK-NEXT: ld ra, 1528(sp) # 8-byte Folded Reload
166+
; CHECK-NEXT: ld s0, 1520(sp) # 8-byte Folded Reload
167+
; CHECK-NEXT: addi sp, sp, 1536
200168
; CHECK-NEXT: ret
201169
%res = shufflevector <512 x i8> %a, <512 x i8> %b, <512 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 548, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 5, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 4, i32 574, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 473, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 674, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 500, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 555>
202170
ret <512 x i8> %res

0 commit comments

Comments
 (0)