1
- ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
1
+ ! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2
+ ! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2
3
! REQUIRES: target=powerpc{{.*}}
3
4
4
5
!- ---------------------
@@ -14,7 +15,7 @@ subroutine vec_cmpge_test_i8(arg1, arg2)
14
15
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
15
16
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
16
17
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
17
- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
18
+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
18
19
end subroutine vec_cmpge_test_i8
19
20
20
21
! CHECK-LABEL: vec_cmpge_test_i4
@@ -26,7 +27,7 @@ subroutine vec_cmpge_test_i4(arg1, arg2)
26
27
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
27
28
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
28
29
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
29
- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
30
+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
30
31
end subroutine vec_cmpge_test_i4
31
32
32
33
! CHECK-LABEL: vec_cmpge_test_i2
@@ -38,7 +39,7 @@ subroutine vec_cmpge_test_i2(arg1, arg2)
38
39
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
39
40
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
40
41
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
41
- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
42
+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
42
43
end subroutine vec_cmpge_test_i2
43
44
44
45
! CHECK-LABEL: vec_cmpge_test_i1
@@ -50,7 +51,7 @@ subroutine vec_cmpge_test_i1(arg1, arg2)
50
51
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
51
52
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
52
53
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
53
- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
54
+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
54
55
end subroutine vec_cmpge_test_i1
55
56
56
57
! CHECK-LABEL: vec_cmpge_test_u8
@@ -62,7 +63,7 @@ subroutine vec_cmpge_test_u8(arg1, arg2)
62
63
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
63
64
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
64
65
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg2]], <2 x i64> %[[arg1]])
65
- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
66
+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
66
67
end subroutine vec_cmpge_test_u8
67
68
68
69
! CHECK-LABEL: vec_cmpge_test_u4
@@ -74,7 +75,7 @@ subroutine vec_cmpge_test_u4(arg1, arg2)
74
75
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
75
76
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
76
77
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg2]], <4 x i32> %[[arg1]])
77
- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
78
+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
78
79
end subroutine vec_cmpge_test_u4
79
80
80
81
! CHECK-LABEL: vec_cmpge_test_u2
@@ -86,7 +87,7 @@ subroutine vec_cmpge_test_u2(arg1, arg2)
86
87
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
87
88
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
88
89
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg2]], <8 x i16> %[[arg1]])
89
- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
90
+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
90
91
end subroutine vec_cmpge_test_u2
91
92
92
93
! CHECK-LABEL: vec_cmpge_test_u1
@@ -98,7 +99,7 @@ subroutine vec_cmpge_test_u1(arg1, arg2)
98
99
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
99
100
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
100
101
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg2]], <16 x i8> %[[arg1]])
101
- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
102
+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
102
103
end subroutine vec_cmpge_test_u1
103
104
104
105
subroutine vec_cmpge_test_r4 (arg1 , arg2 )
@@ -248,7 +249,7 @@ subroutine vec_cmple_test_i8(arg1, arg2)
248
249
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
249
250
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
250
251
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
251
- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
252
+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
252
253
end subroutine vec_cmple_test_i8
253
254
254
255
! CHECK-LABEL: vec_cmple_test_i4
@@ -260,7 +261,7 @@ subroutine vec_cmple_test_i4(arg1, arg2)
260
261
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
261
262
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
262
263
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtsw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
263
- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
264
+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
264
265
end subroutine vec_cmple_test_i4
265
266
266
267
! CHECK-LABEL: vec_cmple_test_i2
@@ -272,7 +273,7 @@ subroutine vec_cmple_test_i2(arg1, arg2)
272
273
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
273
274
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
274
275
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtsh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
275
- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
276
+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
276
277
end subroutine vec_cmple_test_i2
277
278
278
279
! CHECK-LABEL: vec_cmple_test_i1
@@ -284,7 +285,7 @@ subroutine vec_cmple_test_i1(arg1, arg2)
284
285
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
285
286
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
286
287
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtsb(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
287
- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
288
+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
288
289
end subroutine vec_cmple_test_i1
289
290
290
291
! CHECK-LABEL: vec_cmple_test_u8
@@ -296,7 +297,7 @@ subroutine vec_cmple_test_u8(arg1, arg2)
296
297
! LLVMIR: %[[arg1:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
297
298
! LLVMIR: %[[arg2:.*]] = load <2 x i64>, ptr %{{.*}}, align 16
298
299
! LLVMIR: %[[res:.*]] = call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %[[arg1]], <2 x i64> %[[arg2]])
299
- ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], <i64 -1, i64 -1>
300
+ ! LLVMIR: %{{[0-9]+}} = xor <2 x i64> %[[res]], splat ( i64 -1)
300
301
end subroutine vec_cmple_test_u8
301
302
302
303
! CHECK-LABEL: vec_cmple_test_u4
@@ -308,7 +309,7 @@ subroutine vec_cmple_test_u4(arg1, arg2)
308
309
! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
309
310
! LLVMIR: %[[arg2:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
310
311
! LLVMIR: %[[res:.*]] = call <4 x i32> @llvm.ppc.altivec.vcmpgtuw(<4 x i32> %[[arg1]], <4 x i32> %[[arg2]])
311
- ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], <i32 -1, i32 -1, i32 -1, i32 -1>
312
+ ! LLVMIR: %{{[0-9]+}} = xor <4 x i32> %[[res]], splat ( i32 -1)
312
313
end subroutine vec_cmple_test_u4
313
314
314
315
! CHECK-LABEL: vec_cmple_test_u2
@@ -320,7 +321,7 @@ subroutine vec_cmple_test_u2(arg1, arg2)
320
321
! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
321
322
! LLVMIR: %[[arg2:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
322
323
! LLVMIR: %[[res:.*]] = call <8 x i16> @llvm.ppc.altivec.vcmpgtuh(<8 x i16> %[[arg1]], <8 x i16> %[[arg2]])
323
- ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
324
+ ! LLVMIR: %{{[0-9]+}} = xor <8 x i16> %[[res]], splat ( i16 -1)
324
325
end subroutine vec_cmple_test_u2
325
326
326
327
! CHECK-LABEL: vec_cmple_test_u1
@@ -332,7 +333,7 @@ subroutine vec_cmple_test_u1(arg1, arg2)
332
333
! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
333
334
! LLVMIR: %[[arg2:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
334
335
! LLVMIR: %[[res:.*]] = call <16 x i8> @llvm.ppc.altivec.vcmpgtub(<16 x i8> %[[arg1]], <16 x i8> %[[arg2]])
335
- ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
336
+ ! LLVMIR: %{{[0-9]+}} = xor <16 x i8> %[[res]], splat ( i8 -1)
336
337
end subroutine vec_cmple_test_u1
337
338
338
339
! CHECK-LABEL: vec_cmple_test_r4
0 commit comments