Skip to content

Commit 246b57c

Browse files
Fix tests in flang/test/Lower/PowerPC after splat change.
1 parent 88e9b37 commit 246b57c

File tree

6 files changed

+70
-67
lines changed

6 files changed

+70
-67
lines changed

flang/test/Lower/PowerPC/ppc-vec-cmp.f90

Lines changed: 18 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
1-
! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
1+
! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2+
! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
23
! REQUIRES: target=powerpc{{.*}}
34

45
!----------------------
@@ -14,7 +15,7 @@ subroutine vec_cmpge_test_i8(arg1, arg2)
1415
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
1516
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
1617
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
17-
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
18+
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
1819
end subroutine vec_cmpge_test_i8
1920

2021
! CHECK-LABEL: vec_cmpge_test_i4
@@ -26,7 +27,7 @@ subroutine vec_cmpge_test_i4(arg1, arg2)
2627
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
2728
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
2829
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
29-
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
30+
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
3031
end subroutine vec_cmpge_test_i4
3132

3233
! CHECK-LABEL: vec_cmpge_test_i2
@@ -38,7 +39,7 @@ subroutine vec_cmpge_test_i2(arg1, arg2)
3839
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
3940
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
4041
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
41-
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
42+
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
4243
end subroutine vec_cmpge_test_i2
4344

4445
! CHECK-LABEL: vec_cmpge_test_i1
@@ -50,7 +51,7 @@ subroutine vec_cmpge_test_i1(arg1, arg2)
5051
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
5152
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
5253
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
53-
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
54+
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
5455
end subroutine vec_cmpge_test_i1
5556

5657
! CHECK-LABEL: vec_cmpge_test_u8
@@ -62,7 +63,7 @@ subroutine vec_cmpge_test_u8(arg1, arg2)
6263
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
6364
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
6465
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
65-
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
66+
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
6667
end subroutine vec_cmpge_test_u8
6768

6869
! CHECK-LABEL: vec_cmpge_test_u4
@@ -74,7 +75,7 @@ subroutine vec_cmpge_test_u4(arg1, arg2)
7475
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
7576
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
7677
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
77-
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
78+
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
7879
end subroutine vec_cmpge_test_u4
7980

8081
! CHECK-LABEL: vec_cmpge_test_u2
@@ -86,7 +87,7 @@ subroutine vec_cmpge_test_u2(arg1, arg2)
8687
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
8788
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
8889
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
89-
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
90+
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
9091
end subroutine vec_cmpge_test_u2
9192

9293
! CHECK-LABEL: vec_cmpge_test_u1
@@ -98,7 +99,7 @@ subroutine vec_cmpge_test_u1(arg1, arg2)
9899
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
99100
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
100101
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
101-
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
102+
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
102103
end subroutine vec_cmpge_test_u1
103104

104105
subroutine vec_cmpge_test_r4(arg1, arg2)
@@ -248,7 +249,7 @@ subroutine vec_cmple_test_i8(arg1, arg2)
248249
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
249250
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
250251
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
251-
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
252+
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
252253
end subroutine vec_cmple_test_i8
253254

254255
! CHECK-LABEL: vec_cmple_test_i4
@@ -260,7 +261,7 @@ subroutine vec_cmple_test_i4(arg1, arg2)
260261
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
261262
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
262263
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
263-
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
264+
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
264265
end subroutine vec_cmple_test_i4
265266

266267
! CHECK-LABEL: vec_cmple_test_i2
@@ -272,7 +273,7 @@ subroutine vec_cmple_test_i2(arg1, arg2)
272273
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
273274
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
274275
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
275-
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
276+
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
276277
end subroutine vec_cmple_test_i2
277278

278279
! CHECK-LABEL: vec_cmple_test_i1
@@ -284,7 +285,7 @@ subroutine vec_cmple_test_i1(arg1, arg2)
284285
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
285286
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
286287
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
287-
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
288+
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
288289
end subroutine vec_cmple_test_i1
289290

290291
! CHECK-LABEL: vec_cmple_test_u8
@@ -296,7 +297,7 @@ subroutine vec_cmple_test_u8(arg1, arg2)
296297
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
297298
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
298299
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
299-
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
300+
! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat (i64 -1)
300301
end subroutine vec_cmple_test_u8
301302

302303
! CHECK-LABEL: vec_cmple_test_u4
@@ -308,7 +309,7 @@ subroutine vec_cmple_test_u4(arg1, arg2)
308309
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
309310
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
310311
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
311-
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
312+
! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat (i32 -1)
312313
end subroutine vec_cmple_test_u4
313314

314315
! CHECK-LABEL: vec_cmple_test_u2
@@ -320,7 +321,7 @@ subroutine vec_cmple_test_u2(arg1, arg2)
320321
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
321322
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
322323
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
323-
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
324+
! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat (i16 -1)
324325
end subroutine vec_cmple_test_u2
325326

326327
! CHECK-LABEL: vec_cmple_test_u1
@@ -332,7 +333,7 @@ subroutine vec_cmple_test_u1(arg1, arg2)
332333
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
333334
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
334335
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
335-
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
336+
! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat (i8 -1)
336337
end subroutine vec_cmple_test_u1
337338

338339
! CHECK-LABEL: vec_cmple_test_r4

flang/test/Lower/PowerPC/ppc-vec-convert.f90

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ subroutine vec_ctf_test_i8i1(arg1)
5757

5858
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
5959
! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
60-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
60+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
6161
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
6262
end subroutine vec_ctf_test_i8i1
6363

@@ -69,7 +69,7 @@ subroutine vec_ctf_test_i8i2(arg1)
6969

7070
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
7171
! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
72-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
72+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
7373
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
7474
end subroutine vec_ctf_test_i8i2
7575

@@ -81,7 +81,7 @@ subroutine vec_ctf_test_i8i4(arg1)
8181

8282
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
8383
! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
84-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
84+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
8585
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
8686
end subroutine vec_ctf_test_i8i4
8787

@@ -93,7 +93,7 @@ subroutine vec_ctf_test_i8i8(arg1)
9393

9494
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
9595
! LLVMIR: %[[carg:.*]] = sitofp <2 x i64> %[[arg1]] to <2 x double>
96-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
96+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
9797
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
9898
end subroutine vec_ctf_test_i8i8
9999

@@ -149,7 +149,7 @@ subroutine vec_ctf_test_u8i1(arg1)
149149

150150
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
151151
! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
152-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
152+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
153153
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
154154
end subroutine vec_ctf_test_u8i1
155155

@@ -161,7 +161,7 @@ subroutine vec_ctf_test_u8i2(arg1)
161161

162162
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
163163
! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
164-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
164+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
165165
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
166166
end subroutine vec_ctf_test_u8i2
167167

@@ -173,7 +173,7 @@ subroutine vec_ctf_test_u8i4(arg1)
173173

174174
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
175175
! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
176-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
176+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
177177
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
178178
end subroutine vec_ctf_test_u8i4
179179

@@ -185,7 +185,7 @@ subroutine vec_ctf_test_u8i8(arg1)
185185

186186
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
187187
! LLVMIR: %[[carg:.*]] = uitofp <2 x i64> %[[arg1]] to <2 x double>
188-
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], <double 1.250000e-01, double 1.250000e-01>
188+
! LLVMIR: %[[r:.*]] = fmul <2 x double> %[[carg]], splat (double 1.250000e-01)
189189
! LLVMIR: store <2 x double> %[[r]], ptr %{{.*}}, align 16
190190
end subroutine vec_ctf_test_u8i8
191191

0 commit comments

Comments
 (0)