Skip to content

Commit 7016f2d

Browse files
committed
[RISCV] Add coverage for vsub.vi emulation via vadd.vi
Note that we already perform the emulation for intrinsics, but not for plain IR.
1 parent deb4b20 commit 7016f2d

File tree

1 file changed

+110
-44
lines changed

1 file changed

+110
-44
lines changed

llvm/test/CodeGen/RISCV/rvv/vsub-sdnode.ll

Lines changed: 110 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@ define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
2727
define <vscale x 1 x i8> @vsub_vx_nxv1i8_0(<vscale x 1 x i8> %va) {
2828
; CHECK-LABEL: vsub_vx_nxv1i8_0:
2929
; CHECK: # %bb.0:
30-
; CHECK-NEXT: li a0, 1
30+
; CHECK-NEXT: li a0, 17
3131
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
3232
; CHECK-NEXT: vsub.vx v8, v8, a0
3333
; CHECK-NEXT: ret
34-
%vc = sub <vscale x 1 x i8> %va, splat (i8 1)
34+
%vc = sub <vscale x 1 x i8> %va, splat (i8 17)
3535
ret <vscale x 1 x i8> %vc
3636
}
3737

@@ -71,11 +71,11 @@ define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
7171
define <vscale x 2 x i8> @vsub_vx_nxv2i8_0(<vscale x 2 x i8> %va) {
7272
; CHECK-LABEL: vsub_vx_nxv2i8_0:
7373
; CHECK: # %bb.0:
74-
; CHECK-NEXT: li a0, 1
74+
; CHECK-NEXT: li a0, 17
7575
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
7676
; CHECK-NEXT: vsub.vx v8, v8, a0
7777
; CHECK-NEXT: ret
78-
%vc = sub <vscale x 2 x i8> %va, splat (i8 1)
78+
%vc = sub <vscale x 2 x i8> %va, splat (i8 17)
7979
ret <vscale x 2 x i8> %vc
8080
}
8181

@@ -104,11 +104,11 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
104104
define <vscale x 4 x i8> @vsub_vx_nxv4i8_0(<vscale x 4 x i8> %va) {
105105
; CHECK-LABEL: vsub_vx_nxv4i8_0:
106106
; CHECK: # %bb.0:
107-
; CHECK-NEXT: li a0, 1
107+
; CHECK-NEXT: li a0, 17
108108
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
109109
; CHECK-NEXT: vsub.vx v8, v8, a0
110110
; CHECK-NEXT: ret
111-
%vc = sub <vscale x 4 x i8> %va, splat (i8 1)
111+
%vc = sub <vscale x 4 x i8> %va, splat (i8 17)
112112
ret <vscale x 4 x i8> %vc
113113
}
114114

@@ -137,11 +137,11 @@ define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
137137
define <vscale x 8 x i8> @vsub_vx_nxv8i8_0(<vscale x 8 x i8> %va) {
138138
; CHECK-LABEL: vsub_vx_nxv8i8_0:
139139
; CHECK: # %bb.0:
140-
; CHECK-NEXT: li a0, 1
140+
; CHECK-NEXT: li a0, 17
141141
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
142142
; CHECK-NEXT: vsub.vx v8, v8, a0
143143
; CHECK-NEXT: ret
144-
%vc = sub <vscale x 8 x i8> %va, splat (i8 1)
144+
%vc = sub <vscale x 8 x i8> %va, splat (i8 17)
145145
ret <vscale x 8 x i8> %vc
146146
}
147147

@@ -170,11 +170,11 @@ define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
170170
define <vscale x 16 x i8> @vsub_vx_nxv16i8_0(<vscale x 16 x i8> %va) {
171171
; CHECK-LABEL: vsub_vx_nxv16i8_0:
172172
; CHECK: # %bb.0:
173-
; CHECK-NEXT: li a0, 1
173+
; CHECK-NEXT: li a0, 17
174174
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
175175
; CHECK-NEXT: vsub.vx v8, v8, a0
176176
; CHECK-NEXT: ret
177-
%vc = sub <vscale x 16 x i8> %va, splat (i8 1)
177+
%vc = sub <vscale x 16 x i8> %va, splat (i8 17)
178178
ret <vscale x 16 x i8> %vc
179179
}
180180

@@ -203,11 +203,11 @@ define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
203203
define <vscale x 32 x i8> @vsub_vx_nxv32i8_0(<vscale x 32 x i8> %va) {
204204
; CHECK-LABEL: vsub_vx_nxv32i8_0:
205205
; CHECK: # %bb.0:
206-
; CHECK-NEXT: li a0, 1
206+
; CHECK-NEXT: li a0, 17
207207
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
208208
; CHECK-NEXT: vsub.vx v8, v8, a0
209209
; CHECK-NEXT: ret
210-
%vc = sub <vscale x 32 x i8> %va, splat (i8 1)
210+
%vc = sub <vscale x 32 x i8> %va, splat (i8 17)
211211
ret <vscale x 32 x i8> %vc
212212
}
213213

@@ -236,11 +236,11 @@ define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
236236
define <vscale x 64 x i8> @vsub_vx_nxv64i8_0(<vscale x 64 x i8> %va) {
237237
; CHECK-LABEL: vsub_vx_nxv64i8_0:
238238
; CHECK: # %bb.0:
239-
; CHECK-NEXT: li a0, 1
239+
; CHECK-NEXT: li a0, 17
240240
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
241241
; CHECK-NEXT: vsub.vx v8, v8, a0
242242
; CHECK-NEXT: ret
243-
%vc = sub <vscale x 64 x i8> %va, splat (i8 1)
243+
%vc = sub <vscale x 64 x i8> %va, splat (i8 17)
244244
ret <vscale x 64 x i8> %vc
245245
}
246246

@@ -269,11 +269,11 @@ define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
269269
define <vscale x 1 x i16> @vsub_vx_nxv1i16_0(<vscale x 1 x i16> %va) {
270270
; CHECK-LABEL: vsub_vx_nxv1i16_0:
271271
; CHECK: # %bb.0:
272-
; CHECK-NEXT: li a0, 1
272+
; CHECK-NEXT: li a0, 17
273273
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
274274
; CHECK-NEXT: vsub.vx v8, v8, a0
275275
; CHECK-NEXT: ret
276-
%vc = sub <vscale x 1 x i16> %va, splat (i16 1)
276+
%vc = sub <vscale x 1 x i16> %va, splat (i16 17)
277277
ret <vscale x 1 x i16> %vc
278278
}
279279

@@ -302,11 +302,11 @@ define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
302302
define <vscale x 2 x i16> @vsub_vx_nxv2i16_0(<vscale x 2 x i16> %va) {
303303
; CHECK-LABEL: vsub_vx_nxv2i16_0:
304304
; CHECK: # %bb.0:
305-
; CHECK-NEXT: li a0, 1
305+
; CHECK-NEXT: li a0, 17
306306
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
307307
; CHECK-NEXT: vsub.vx v8, v8, a0
308308
; CHECK-NEXT: ret
309-
%vc = sub <vscale x 2 x i16> %va, splat (i16 1)
309+
%vc = sub <vscale x 2 x i16> %va, splat (i16 17)
310310
ret <vscale x 2 x i16> %vc
311311
}
312312

@@ -335,11 +335,11 @@ define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
335335
define <vscale x 4 x i16> @vsub_vx_nxv4i16_0(<vscale x 4 x i16> %va) {
336336
; CHECK-LABEL: vsub_vx_nxv4i16_0:
337337
; CHECK: # %bb.0:
338-
; CHECK-NEXT: li a0, 1
338+
; CHECK-NEXT: li a0, 17
339339
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
340340
; CHECK-NEXT: vsub.vx v8, v8, a0
341341
; CHECK-NEXT: ret
342-
%vc = sub <vscale x 4 x i16> %va, splat (i16 1)
342+
%vc = sub <vscale x 4 x i16> %va, splat (i16 17)
343343
ret <vscale x 4 x i16> %vc
344344
}
345345

@@ -368,11 +368,11 @@ define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
368368
define <vscale x 8 x i16> @vsub_vx_nxv8i16_0(<vscale x 8 x i16> %va) {
369369
; CHECK-LABEL: vsub_vx_nxv8i16_0:
370370
; CHECK: # %bb.0:
371-
; CHECK-NEXT: li a0, 1
371+
; CHECK-NEXT: li a0, 17
372372
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
373373
; CHECK-NEXT: vsub.vx v8, v8, a0
374374
; CHECK-NEXT: ret
375-
%vc = sub <vscale x 8 x i16> %va, splat (i16 1)
375+
%vc = sub <vscale x 8 x i16> %va, splat (i16 17)
376376
ret <vscale x 8 x i16> %vc
377377
}
378378

@@ -401,11 +401,11 @@ define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
401401
define <vscale x 16 x i16> @vsub_vx_nxv16i16_0(<vscale x 16 x i16> %va) {
402402
; CHECK-LABEL: vsub_vx_nxv16i16_0:
403403
; CHECK: # %bb.0:
404-
; CHECK-NEXT: li a0, 1
404+
; CHECK-NEXT: li a0, 17
405405
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
406406
; CHECK-NEXT: vsub.vx v8, v8, a0
407407
; CHECK-NEXT: ret
408-
%vc = sub <vscale x 16 x i16> %va, splat (i16 1)
408+
%vc = sub <vscale x 16 x i16> %va, splat (i16 17)
409409
ret <vscale x 16 x i16> %vc
410410
}
411411

@@ -434,11 +434,11 @@ define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
434434
define <vscale x 32 x i16> @vsub_vx_nxv32i16_0(<vscale x 32 x i16> %va) {
435435
; CHECK-LABEL: vsub_vx_nxv32i16_0:
436436
; CHECK: # %bb.0:
437-
; CHECK-NEXT: li a0, 1
437+
; CHECK-NEXT: li a0, 17
438438
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
439439
; CHECK-NEXT: vsub.vx v8, v8, a0
440440
; CHECK-NEXT: ret
441-
%vc = sub <vscale x 32 x i16> %va, splat (i16 1)
441+
%vc = sub <vscale x 32 x i16> %va, splat (i16 17)
442442
ret <vscale x 32 x i16> %vc
443443
}
444444

@@ -467,11 +467,11 @@ define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
467467
define <vscale x 1 x i32> @vsub_vx_nxv1i32_0(<vscale x 1 x i32> %va) {
468468
; CHECK-LABEL: vsub_vx_nxv1i32_0:
469469
; CHECK: # %bb.0:
470-
; CHECK-NEXT: li a0, 1
470+
; CHECK-NEXT: li a0, 17
471471
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
472472
; CHECK-NEXT: vsub.vx v8, v8, a0
473473
; CHECK-NEXT: ret
474-
%vc = sub <vscale x 1 x i32> %va, splat (i32 1)
474+
%vc = sub <vscale x 1 x i32> %va, splat (i32 17)
475475
ret <vscale x 1 x i32> %vc
476476
}
477477

@@ -500,11 +500,11 @@ define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
500500
define <vscale x 2 x i32> @vsub_vx_nxv2i32_0(<vscale x 2 x i32> %va) {
501501
; CHECK-LABEL: vsub_vx_nxv2i32_0:
502502
; CHECK: # %bb.0:
503-
; CHECK-NEXT: li a0, 1
503+
; CHECK-NEXT: li a0, 17
504504
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
505505
; CHECK-NEXT: vsub.vx v8, v8, a0
506506
; CHECK-NEXT: ret
507-
%vc = sub <vscale x 2 x i32> %va, splat (i32 1)
507+
%vc = sub <vscale x 2 x i32> %va, splat (i32 17)
508508
ret <vscale x 2 x i32> %vc
509509
}
510510

@@ -533,11 +533,11 @@ define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
533533
define <vscale x 4 x i32> @vsub_vx_nxv4i32_0(<vscale x 4 x i32> %va) {
534534
; CHECK-LABEL: vsub_vx_nxv4i32_0:
535535
; CHECK: # %bb.0:
536-
; CHECK-NEXT: li a0, 1
536+
; CHECK-NEXT: li a0, 17
537537
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
538538
; CHECK-NEXT: vsub.vx v8, v8, a0
539539
; CHECK-NEXT: ret
540-
%vc = sub <vscale x 4 x i32> %va, splat (i32 1)
540+
%vc = sub <vscale x 4 x i32> %va, splat (i32 17)
541541
ret <vscale x 4 x i32> %vc
542542
}
543543

@@ -566,11 +566,11 @@ define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
566566
define <vscale x 8 x i32> @vsub_vx_nxv8i32_0(<vscale x 8 x i32> %va) {
567567
; CHECK-LABEL: vsub_vx_nxv8i32_0:
568568
; CHECK: # %bb.0:
569-
; CHECK-NEXT: li a0, 1
569+
; CHECK-NEXT: li a0, 17
570570
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
571571
; CHECK-NEXT: vsub.vx v8, v8, a0
572572
; CHECK-NEXT: ret
573-
%vc = sub <vscale x 8 x i32> %va, splat (i32 1)
573+
%vc = sub <vscale x 8 x i32> %va, splat (i32 17)
574574
ret <vscale x 8 x i32> %vc
575575
}
576576

@@ -599,11 +599,11 @@ define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
599599
define <vscale x 16 x i32> @vsub_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
600600
; CHECK-LABEL: vsub_vx_nxv16i32_0:
601601
; CHECK: # %bb.0:
602-
; CHECK-NEXT: li a0, 1
602+
; CHECK-NEXT: li a0, 17
603603
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
604604
; CHECK-NEXT: vsub.vx v8, v8, a0
605605
; CHECK-NEXT: ret
606-
%vc = sub <vscale x 16 x i32> %va, splat (i32 1)
606+
%vc = sub <vscale x 16 x i32> %va, splat (i32 17)
607607
ret <vscale x 16 x i32> %vc
608608
}
609609

@@ -646,11 +646,11 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
646646
define <vscale x 1 x i64> @vsub_vx_nxv1i64_0(<vscale x 1 x i64> %va) {
647647
; CHECK-LABEL: vsub_vx_nxv1i64_0:
648648
; CHECK: # %bb.0:
649-
; CHECK-NEXT: li a0, 1
649+
; CHECK-NEXT: li a0, 17
650650
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
651651
; CHECK-NEXT: vsub.vx v8, v8, a0
652652
; CHECK-NEXT: ret
653-
%vc = sub <vscale x 1 x i64> %va, splat (i64 1)
653+
%vc = sub <vscale x 1 x i64> %va, splat (i64 17)
654654
ret <vscale x 1 x i64> %vc
655655
}
656656

@@ -693,11 +693,11 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
693693
define <vscale x 2 x i64> @vsub_vx_nxv2i64_0(<vscale x 2 x i64> %va) {
694694
; CHECK-LABEL: vsub_vx_nxv2i64_0:
695695
; CHECK: # %bb.0:
696-
; CHECK-NEXT: li a0, 1
696+
; CHECK-NEXT: li a0, 17
697697
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
698698
; CHECK-NEXT: vsub.vx v8, v8, a0
699699
; CHECK-NEXT: ret
700-
%vc = sub <vscale x 2 x i64> %va, splat (i64 1)
700+
%vc = sub <vscale x 2 x i64> %va, splat (i64 17)
701701
ret <vscale x 2 x i64> %vc
702702
}
703703

@@ -740,11 +740,11 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
740740
define <vscale x 4 x i64> @vsub_vx_nxv4i64_0(<vscale x 4 x i64> %va) {
741741
; CHECK-LABEL: vsub_vx_nxv4i64_0:
742742
; CHECK: # %bb.0:
743-
; CHECK-NEXT: li a0, 1
743+
; CHECK-NEXT: li a0, 17
744744
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
745745
; CHECK-NEXT: vsub.vx v8, v8, a0
746746
; CHECK-NEXT: ret
747-
%vc = sub <vscale x 4 x i64> %va, splat (i64 1)
747+
%vc = sub <vscale x 4 x i64> %va, splat (i64 17)
748748
ret <vscale x 4 x i64> %vc
749749
}
750750

@@ -787,11 +787,11 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
787787
define <vscale x 8 x i64> @vsub_vx_nxv8i64_0(<vscale x 8 x i64> %va) {
788788
; CHECK-LABEL: vsub_vx_nxv8i64_0:
789789
; CHECK: # %bb.0:
790-
; CHECK-NEXT: li a0, 1
790+
; CHECK-NEXT: li a0, 17
791791
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
792792
; CHECK-NEXT: vsub.vx v8, v8, a0
793793
; CHECK-NEXT: ret
794-
%vc = sub <vscale x 8 x i64> %va, splat (i64 1)
794+
%vc = sub <vscale x 8 x i64> %va, splat (i64 17)
795795
ret <vscale x 8 x i64> %vc
796796
}
797797

@@ -850,6 +850,72 @@ define <vscale x 8 x i32> @vsub_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
850850
ret <vscale x 8 x i32> %vc
851851
}
852852

853+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_one(<vscale x 8 x i32> %va) {
854+
; CHECK-LABEL: vsub_vi_nxv8i32_one:
855+
; CHECK: # %bb.0:
856+
; CHECK-NEXT: li a0, 1
857+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
858+
; CHECK-NEXT: vsub.vx v8, v8, a0
859+
; CHECK-NEXT: ret
860+
%vc = sub <vscale x 8 x i32> %va, splat (i32 1)
861+
ret <vscale x 8 x i32> %vc
862+
}
863+
864+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_minusone(<vscale x 8 x i32> %va) {
865+
; CHECK-LABEL: vsub_vi_nxv8i32_minusone:
866+
; CHECK: # %bb.0:
867+
; CHECK-NEXT: li a0, -1
868+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
869+
; CHECK-NEXT: vsub.vx v8, v8, a0
870+
; CHECK-NEXT: ret
871+
%vc = sub <vscale x 8 x i32> %va, splat (i32 -1)
872+
ret <vscale x 8 x i32> %vc
873+
}
874+
875+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_15(<vscale x 8 x i32> %va) {
876+
; CHECK-LABEL: vsub_vi_nxv8i32_15:
877+
; CHECK: # %bb.0:
878+
; CHECK-NEXT: li a0, 15
879+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
880+
; CHECK-NEXT: vsub.vx v8, v8, a0
881+
; CHECK-NEXT: ret
882+
%vc = sub <vscale x 8 x i32> %va, splat (i32 15)
883+
ret <vscale x 8 x i32> %vc
884+
}
885+
886+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_16(<vscale x 8 x i32> %va) {
887+
; CHECK-LABEL: vsub_vi_nxv8i32_16:
888+
; CHECK: # %bb.0:
889+
; CHECK-NEXT: li a0, 16
890+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
891+
; CHECK-NEXT: vsub.vx v8, v8, a0
892+
; CHECK-NEXT: ret
893+
%vc = sub <vscale x 8 x i32> %va, splat (i32 16)
894+
ret <vscale x 8 x i32> %vc
895+
}
896+
897+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_minus15(<vscale x 8 x i32> %va) {
898+
; CHECK-LABEL: vsub_vi_nxv8i32_minus15:
899+
; CHECK: # %bb.0:
900+
; CHECK-NEXT: li a0, -15
901+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
902+
; CHECK-NEXT: vsub.vx v8, v8, a0
903+
; CHECK-NEXT: ret
904+
%vc = sub <vscale x 8 x i32> %va, splat (i32 -15)
905+
ret <vscale x 8 x i32> %vc
906+
}
907+
908+
define <vscale x 8 x i32> @vsub_vi_nxv8i32_minus16(<vscale x 8 x i32> %va) {
909+
; CHECK-LABEL: vsub_vi_nxv8i32_minus16:
910+
; CHECK: # %bb.0:
911+
; CHECK-NEXT: li a0, -16
912+
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
913+
; CHECK-NEXT: vsub.vx v8, v8, a0
914+
; CHECK-NEXT: ret
915+
%vc = sub <vscale x 8 x i32> %va, splat (i32 -16)
916+
ret <vscale x 8 x i32> %vc
917+
}
918+
853919
define <vscale x 8 x i32> @vsub_vi_mask_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %mask) {
854920
; CHECK-LABEL: vsub_vi_mask_nxv8i32:
855921
; CHECK: # %bb.0:

0 commit comments

Comments
 (0)