@@ -27,11 +27,11 @@ define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
27
27
define <vscale x 1 x i8 > @vsub_vx_nxv1i8_0 (<vscale x 1 x i8 > %va ) {
28
28
; CHECK-LABEL: vsub_vx_nxv1i8_0:
29
29
; CHECK: # %bb.0:
30
- ; CHECK-NEXT: li a0, 1
30
+ ; CHECK-NEXT: li a0, 17
31
31
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
32
32
; CHECK-NEXT: vsub.vx v8, v8, a0
33
33
; CHECK-NEXT: ret
34
- %vc = sub <vscale x 1 x i8 > %va , splat (i8 1 )
34
+ %vc = sub <vscale x 1 x i8 > %va , splat (i8 17 )
35
35
ret <vscale x 1 x i8 > %vc
36
36
}
37
37
@@ -71,11 +71,11 @@ define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
71
71
define <vscale x 2 x i8 > @vsub_vx_nxv2i8_0 (<vscale x 2 x i8 > %va ) {
72
72
; CHECK-LABEL: vsub_vx_nxv2i8_0:
73
73
; CHECK: # %bb.0:
74
- ; CHECK-NEXT: li a0, 1
74
+ ; CHECK-NEXT: li a0, 17
75
75
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
76
76
; CHECK-NEXT: vsub.vx v8, v8, a0
77
77
; CHECK-NEXT: ret
78
- %vc = sub <vscale x 2 x i8 > %va , splat (i8 1 )
78
+ %vc = sub <vscale x 2 x i8 > %va , splat (i8 17 )
79
79
ret <vscale x 2 x i8 > %vc
80
80
}
81
81
@@ -104,11 +104,11 @@ define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
104
104
define <vscale x 4 x i8 > @vsub_vx_nxv4i8_0 (<vscale x 4 x i8 > %va ) {
105
105
; CHECK-LABEL: vsub_vx_nxv4i8_0:
106
106
; CHECK: # %bb.0:
107
- ; CHECK-NEXT: li a0, 1
107
+ ; CHECK-NEXT: li a0, 17
108
108
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
109
109
; CHECK-NEXT: vsub.vx v8, v8, a0
110
110
; CHECK-NEXT: ret
111
- %vc = sub <vscale x 4 x i8 > %va , splat (i8 1 )
111
+ %vc = sub <vscale x 4 x i8 > %va , splat (i8 17 )
112
112
ret <vscale x 4 x i8 > %vc
113
113
}
114
114
@@ -137,11 +137,11 @@ define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
137
137
define <vscale x 8 x i8 > @vsub_vx_nxv8i8_0 (<vscale x 8 x i8 > %va ) {
138
138
; CHECK-LABEL: vsub_vx_nxv8i8_0:
139
139
; CHECK: # %bb.0:
140
- ; CHECK-NEXT: li a0, 1
140
+ ; CHECK-NEXT: li a0, 17
141
141
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
142
142
; CHECK-NEXT: vsub.vx v8, v8, a0
143
143
; CHECK-NEXT: ret
144
- %vc = sub <vscale x 8 x i8 > %va , splat (i8 1 )
144
+ %vc = sub <vscale x 8 x i8 > %va , splat (i8 17 )
145
145
ret <vscale x 8 x i8 > %vc
146
146
}
147
147
@@ -170,11 +170,11 @@ define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b
170
170
define <vscale x 16 x i8 > @vsub_vx_nxv16i8_0 (<vscale x 16 x i8 > %va ) {
171
171
; CHECK-LABEL: vsub_vx_nxv16i8_0:
172
172
; CHECK: # %bb.0:
173
- ; CHECK-NEXT: li a0, 1
173
+ ; CHECK-NEXT: li a0, 17
174
174
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
175
175
; CHECK-NEXT: vsub.vx v8, v8, a0
176
176
; CHECK-NEXT: ret
177
- %vc = sub <vscale x 16 x i8 > %va , splat (i8 1 )
177
+ %vc = sub <vscale x 16 x i8 > %va , splat (i8 17 )
178
178
ret <vscale x 16 x i8 > %vc
179
179
}
180
180
@@ -203,11 +203,11 @@ define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b
203
203
define <vscale x 32 x i8 > @vsub_vx_nxv32i8_0 (<vscale x 32 x i8 > %va ) {
204
204
; CHECK-LABEL: vsub_vx_nxv32i8_0:
205
205
; CHECK: # %bb.0:
206
- ; CHECK-NEXT: li a0, 1
206
+ ; CHECK-NEXT: li a0, 17
207
207
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
208
208
; CHECK-NEXT: vsub.vx v8, v8, a0
209
209
; CHECK-NEXT: ret
210
- %vc = sub <vscale x 32 x i8 > %va , splat (i8 1 )
210
+ %vc = sub <vscale x 32 x i8 > %va , splat (i8 17 )
211
211
ret <vscale x 32 x i8 > %vc
212
212
}
213
213
@@ -236,11 +236,11 @@ define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b
236
236
define <vscale x 64 x i8 > @vsub_vx_nxv64i8_0 (<vscale x 64 x i8 > %va ) {
237
237
; CHECK-LABEL: vsub_vx_nxv64i8_0:
238
238
; CHECK: # %bb.0:
239
- ; CHECK-NEXT: li a0, 1
239
+ ; CHECK-NEXT: li a0, 17
240
240
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
241
241
; CHECK-NEXT: vsub.vx v8, v8, a0
242
242
; CHECK-NEXT: ret
243
- %vc = sub <vscale x 64 x i8 > %va , splat (i8 1 )
243
+ %vc = sub <vscale x 64 x i8 > %va , splat (i8 17 )
244
244
ret <vscale x 64 x i8 > %vc
245
245
}
246
246
@@ -269,11 +269,11 @@ define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %
269
269
define <vscale x 1 x i16 > @vsub_vx_nxv1i16_0 (<vscale x 1 x i16 > %va ) {
270
270
; CHECK-LABEL: vsub_vx_nxv1i16_0:
271
271
; CHECK: # %bb.0:
272
- ; CHECK-NEXT: li a0, 1
272
+ ; CHECK-NEXT: li a0, 17
273
273
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
274
274
; CHECK-NEXT: vsub.vx v8, v8, a0
275
275
; CHECK-NEXT: ret
276
- %vc = sub <vscale x 1 x i16 > %va , splat (i16 1 )
276
+ %vc = sub <vscale x 1 x i16 > %va , splat (i16 17 )
277
277
ret <vscale x 1 x i16 > %vc
278
278
}
279
279
@@ -302,11 +302,11 @@ define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %
302
302
define <vscale x 2 x i16 > @vsub_vx_nxv2i16_0 (<vscale x 2 x i16 > %va ) {
303
303
; CHECK-LABEL: vsub_vx_nxv2i16_0:
304
304
; CHECK: # %bb.0:
305
- ; CHECK-NEXT: li a0, 1
305
+ ; CHECK-NEXT: li a0, 17
306
306
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
307
307
; CHECK-NEXT: vsub.vx v8, v8, a0
308
308
; CHECK-NEXT: ret
309
- %vc = sub <vscale x 2 x i16 > %va , splat (i16 1 )
309
+ %vc = sub <vscale x 2 x i16 > %va , splat (i16 17 )
310
310
ret <vscale x 2 x i16 > %vc
311
311
}
312
312
@@ -335,11 +335,11 @@ define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %
335
335
define <vscale x 4 x i16 > @vsub_vx_nxv4i16_0 (<vscale x 4 x i16 > %va ) {
336
336
; CHECK-LABEL: vsub_vx_nxv4i16_0:
337
337
; CHECK: # %bb.0:
338
- ; CHECK-NEXT: li a0, 1
338
+ ; CHECK-NEXT: li a0, 17
339
339
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
340
340
; CHECK-NEXT: vsub.vx v8, v8, a0
341
341
; CHECK-NEXT: ret
342
- %vc = sub <vscale x 4 x i16 > %va , splat (i16 1 )
342
+ %vc = sub <vscale x 4 x i16 > %va , splat (i16 17 )
343
343
ret <vscale x 4 x i16 > %vc
344
344
}
345
345
@@ -368,11 +368,11 @@ define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %
368
368
define <vscale x 8 x i16 > @vsub_vx_nxv8i16_0 (<vscale x 8 x i16 > %va ) {
369
369
; CHECK-LABEL: vsub_vx_nxv8i16_0:
370
370
; CHECK: # %bb.0:
371
- ; CHECK-NEXT: li a0, 1
371
+ ; CHECK-NEXT: li a0, 17
372
372
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
373
373
; CHECK-NEXT: vsub.vx v8, v8, a0
374
374
; CHECK-NEXT: ret
375
- %vc = sub <vscale x 8 x i16 > %va , splat (i16 1 )
375
+ %vc = sub <vscale x 8 x i16 > %va , splat (i16 17 )
376
376
ret <vscale x 8 x i16 > %vc
377
377
}
378
378
@@ -401,11 +401,11 @@ define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signex
401
401
define <vscale x 16 x i16 > @vsub_vx_nxv16i16_0 (<vscale x 16 x i16 > %va ) {
402
402
; CHECK-LABEL: vsub_vx_nxv16i16_0:
403
403
; CHECK: # %bb.0:
404
- ; CHECK-NEXT: li a0, 1
404
+ ; CHECK-NEXT: li a0, 17
405
405
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
406
406
; CHECK-NEXT: vsub.vx v8, v8, a0
407
407
; CHECK-NEXT: ret
408
- %vc = sub <vscale x 16 x i16 > %va , splat (i16 1 )
408
+ %vc = sub <vscale x 16 x i16 > %va , splat (i16 17 )
409
409
ret <vscale x 16 x i16 > %vc
410
410
}
411
411
@@ -434,11 +434,11 @@ define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signex
434
434
define <vscale x 32 x i16 > @vsub_vx_nxv32i16_0 (<vscale x 32 x i16 > %va ) {
435
435
; CHECK-LABEL: vsub_vx_nxv32i16_0:
436
436
; CHECK: # %bb.0:
437
- ; CHECK-NEXT: li a0, 1
437
+ ; CHECK-NEXT: li a0, 17
438
438
; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
439
439
; CHECK-NEXT: vsub.vx v8, v8, a0
440
440
; CHECK-NEXT: ret
441
- %vc = sub <vscale x 32 x i16 > %va , splat (i16 1 )
441
+ %vc = sub <vscale x 32 x i16 > %va , splat (i16 17 )
442
442
ret <vscale x 32 x i16 > %vc
443
443
}
444
444
@@ -467,11 +467,11 @@ define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %
467
467
define <vscale x 1 x i32 > @vsub_vx_nxv1i32_0 (<vscale x 1 x i32 > %va ) {
468
468
; CHECK-LABEL: vsub_vx_nxv1i32_0:
469
469
; CHECK: # %bb.0:
470
- ; CHECK-NEXT: li a0, 1
470
+ ; CHECK-NEXT: li a0, 17
471
471
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
472
472
; CHECK-NEXT: vsub.vx v8, v8, a0
473
473
; CHECK-NEXT: ret
474
- %vc = sub <vscale x 1 x i32 > %va , splat (i32 1 )
474
+ %vc = sub <vscale x 1 x i32 > %va , splat (i32 17 )
475
475
ret <vscale x 1 x i32 > %vc
476
476
}
477
477
@@ -500,11 +500,11 @@ define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %
500
500
define <vscale x 2 x i32 > @vsub_vx_nxv2i32_0 (<vscale x 2 x i32 > %va ) {
501
501
; CHECK-LABEL: vsub_vx_nxv2i32_0:
502
502
; CHECK: # %bb.0:
503
- ; CHECK-NEXT: li a0, 1
503
+ ; CHECK-NEXT: li a0, 17
504
504
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
505
505
; CHECK-NEXT: vsub.vx v8, v8, a0
506
506
; CHECK-NEXT: ret
507
- %vc = sub <vscale x 2 x i32 > %va , splat (i32 1 )
507
+ %vc = sub <vscale x 2 x i32 > %va , splat (i32 17 )
508
508
ret <vscale x 2 x i32 > %vc
509
509
}
510
510
@@ -533,11 +533,11 @@ define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %
533
533
define <vscale x 4 x i32 > @vsub_vx_nxv4i32_0 (<vscale x 4 x i32 > %va ) {
534
534
; CHECK-LABEL: vsub_vx_nxv4i32_0:
535
535
; CHECK: # %bb.0:
536
- ; CHECK-NEXT: li a0, 1
536
+ ; CHECK-NEXT: li a0, 17
537
537
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
538
538
; CHECK-NEXT: vsub.vx v8, v8, a0
539
539
; CHECK-NEXT: ret
540
- %vc = sub <vscale x 4 x i32 > %va , splat (i32 1 )
540
+ %vc = sub <vscale x 4 x i32 > %va , splat (i32 17 )
541
541
ret <vscale x 4 x i32 > %vc
542
542
}
543
543
@@ -566,11 +566,11 @@ define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %
566
566
define <vscale x 8 x i32 > @vsub_vx_nxv8i32_0 (<vscale x 8 x i32 > %va ) {
567
567
; CHECK-LABEL: vsub_vx_nxv8i32_0:
568
568
; CHECK: # %bb.0:
569
- ; CHECK-NEXT: li a0, 1
569
+ ; CHECK-NEXT: li a0, 17
570
570
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
571
571
; CHECK-NEXT: vsub.vx v8, v8, a0
572
572
; CHECK-NEXT: ret
573
- %vc = sub <vscale x 8 x i32 > %va , splat (i32 1 )
573
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 17 )
574
574
ret <vscale x 8 x i32 > %vc
575
575
}
576
576
@@ -599,11 +599,11 @@ define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signex
599
599
define <vscale x 16 x i32 > @vsub_vx_nxv16i32_0 (<vscale x 16 x i32 > %va ) {
600
600
; CHECK-LABEL: vsub_vx_nxv16i32_0:
601
601
; CHECK: # %bb.0:
602
- ; CHECK-NEXT: li a0, 1
602
+ ; CHECK-NEXT: li a0, 17
603
603
; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
604
604
; CHECK-NEXT: vsub.vx v8, v8, a0
605
605
; CHECK-NEXT: ret
606
- %vc = sub <vscale x 16 x i32 > %va , splat (i32 1 )
606
+ %vc = sub <vscale x 16 x i32 > %va , splat (i32 17 )
607
607
ret <vscale x 16 x i32 > %vc
608
608
}
609
609
@@ -646,11 +646,11 @@ define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
646
646
define <vscale x 1 x i64 > @vsub_vx_nxv1i64_0 (<vscale x 1 x i64 > %va ) {
647
647
; CHECK-LABEL: vsub_vx_nxv1i64_0:
648
648
; CHECK: # %bb.0:
649
- ; CHECK-NEXT: li a0, 1
649
+ ; CHECK-NEXT: li a0, 17
650
650
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
651
651
; CHECK-NEXT: vsub.vx v8, v8, a0
652
652
; CHECK-NEXT: ret
653
- %vc = sub <vscale x 1 x i64 > %va , splat (i64 1 )
653
+ %vc = sub <vscale x 1 x i64 > %va , splat (i64 17 )
654
654
ret <vscale x 1 x i64 > %vc
655
655
}
656
656
@@ -693,11 +693,11 @@ define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
693
693
define <vscale x 2 x i64 > @vsub_vx_nxv2i64_0 (<vscale x 2 x i64 > %va ) {
694
694
; CHECK-LABEL: vsub_vx_nxv2i64_0:
695
695
; CHECK: # %bb.0:
696
- ; CHECK-NEXT: li a0, 1
696
+ ; CHECK-NEXT: li a0, 17
697
697
; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
698
698
; CHECK-NEXT: vsub.vx v8, v8, a0
699
699
; CHECK-NEXT: ret
700
- %vc = sub <vscale x 2 x i64 > %va , splat (i64 1 )
700
+ %vc = sub <vscale x 2 x i64 > %va , splat (i64 17 )
701
701
ret <vscale x 2 x i64 > %vc
702
702
}
703
703
@@ -740,11 +740,11 @@ define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
740
740
define <vscale x 4 x i64 > @vsub_vx_nxv4i64_0 (<vscale x 4 x i64 > %va ) {
741
741
; CHECK-LABEL: vsub_vx_nxv4i64_0:
742
742
; CHECK: # %bb.0:
743
- ; CHECK-NEXT: li a0, 1
743
+ ; CHECK-NEXT: li a0, 17
744
744
; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
745
745
; CHECK-NEXT: vsub.vx v8, v8, a0
746
746
; CHECK-NEXT: ret
747
- %vc = sub <vscale x 4 x i64 > %va , splat (i64 1 )
747
+ %vc = sub <vscale x 4 x i64 > %va , splat (i64 17 )
748
748
ret <vscale x 4 x i64 > %vc
749
749
}
750
750
@@ -787,11 +787,11 @@ define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
787
787
define <vscale x 8 x i64 > @vsub_vx_nxv8i64_0 (<vscale x 8 x i64 > %va ) {
788
788
; CHECK-LABEL: vsub_vx_nxv8i64_0:
789
789
; CHECK: # %bb.0:
790
- ; CHECK-NEXT: li a0, 1
790
+ ; CHECK-NEXT: li a0, 17
791
791
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
792
792
; CHECK-NEXT: vsub.vx v8, v8, a0
793
793
; CHECK-NEXT: ret
794
- %vc = sub <vscale x 8 x i64 > %va , splat (i64 1 )
794
+ %vc = sub <vscale x 8 x i64 > %va , splat (i64 17 )
795
795
ret <vscale x 8 x i64 > %vc
796
796
}
797
797
@@ -850,6 +850,72 @@ define <vscale x 8 x i32> @vsub_vx_mask_nxv8i32(<vscale x 8 x i32> %va, i32 sign
850
850
ret <vscale x 8 x i32 > %vc
851
851
}
852
852
853
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_one (<vscale x 8 x i32 > %va ) {
854
+ ; CHECK-LABEL: vsub_vi_nxv8i32_one:
855
+ ; CHECK: # %bb.0:
856
+ ; CHECK-NEXT: li a0, 1
857
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
858
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
859
+ ; CHECK-NEXT: ret
860
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 1 )
861
+ ret <vscale x 8 x i32 > %vc
862
+ }
863
+
864
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_minusone (<vscale x 8 x i32 > %va ) {
865
+ ; CHECK-LABEL: vsub_vi_nxv8i32_minusone:
866
+ ; CHECK: # %bb.0:
867
+ ; CHECK-NEXT: li a0, -1
868
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
869
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
870
+ ; CHECK-NEXT: ret
871
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 -1 )
872
+ ret <vscale x 8 x i32 > %vc
873
+ }
874
+
875
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_15 (<vscale x 8 x i32 > %va ) {
876
+ ; CHECK-LABEL: vsub_vi_nxv8i32_15:
877
+ ; CHECK: # %bb.0:
878
+ ; CHECK-NEXT: li a0, 15
879
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
880
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
881
+ ; CHECK-NEXT: ret
882
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 15 )
883
+ ret <vscale x 8 x i32 > %vc
884
+ }
885
+
886
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_16 (<vscale x 8 x i32 > %va ) {
887
+ ; CHECK-LABEL: vsub_vi_nxv8i32_16:
888
+ ; CHECK: # %bb.0:
889
+ ; CHECK-NEXT: li a0, 16
890
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
891
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
892
+ ; CHECK-NEXT: ret
893
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 16 )
894
+ ret <vscale x 8 x i32 > %vc
895
+ }
896
+
897
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_minus15 (<vscale x 8 x i32 > %va ) {
898
+ ; CHECK-LABEL: vsub_vi_nxv8i32_minus15:
899
+ ; CHECK: # %bb.0:
900
+ ; CHECK-NEXT: li a0, -15
901
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
902
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
903
+ ; CHECK-NEXT: ret
904
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 -15 )
905
+ ret <vscale x 8 x i32 > %vc
906
+ }
907
+
908
+ define <vscale x 8 x i32 > @vsub_vi_nxv8i32_minus16 (<vscale x 8 x i32 > %va ) {
909
+ ; CHECK-LABEL: vsub_vi_nxv8i32_minus16:
910
+ ; CHECK: # %bb.0:
911
+ ; CHECK-NEXT: li a0, -16
912
+ ; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
913
+ ; CHECK-NEXT: vsub.vx v8, v8, a0
914
+ ; CHECK-NEXT: ret
915
+ %vc = sub <vscale x 8 x i32 > %va , splat (i32 -16 )
916
+ ret <vscale x 8 x i32 > %vc
917
+ }
918
+
853
919
define <vscale x 8 x i32 > @vsub_vi_mask_nxv8i32 (<vscale x 8 x i32 > %va , <vscale x 8 x i1 > %mask ) {
854
920
; CHECK-LABEL: vsub_vi_mask_nxv8i32:
855
921
; CHECK: # %bb.0:
0 commit comments