|
1 |
| -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py |
2 | 1 | // REQUIRES: aarch64-registered-target
|
3 | 2 | // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
4 |
| -// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK |
| 3 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s |
5 | 4 | // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
|
6 |
| -// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK |
7 |
| -// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o /dev/null %s |
| 5 | +// RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s |
| 6 | +// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -o - %s >/dev/null |
8 | 7 | #include <arm_sve.h>
|
9 | 8 |
|
10 | 9 | #ifdef SVE_OVERLOADED_FORMS
|
|
14 | 13 | #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
|
15 | 14 | #endif
|
16 | 15 |
|
17 |
| -// CHECK-LABEL: @test_svacge_f16( |
18 |
| -// CHECK-NEXT: entry: |
19 |
| -// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) |
20 |
| -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.facge.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]]) |
21 |
| -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP1]]) |
22 |
| -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
23 |
| -// |
24 |
| -// CPP-CHECK-LABEL: @_Z15test_svacge_f16u10__SVBool_tu13__SVFloat16_tu13__SVFloat16_t( |
25 |
| -// CPP-CHECK-NEXT: entry: |
26 |
| -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]]) |
27 |
| -// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.facge.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]]) |
28 |
| -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> [[TMP1]]) |
29 |
| -// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
30 |
| -// |
31 | 16 | svbool_t test_svacge_f16(svbool_t pg, svfloat16_t op1, svfloat16_t op2)
|
32 | 17 | {
|
| 18 | + // CHECK-LABEL: test_svacge_f16 |
| 19 | + // CHECK: %[[PG:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> %pg) |
| 20 | + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.facge.nxv8f16(<vscale x 8 x i1> %[[PG]], <vscale x 8 x half> %op1, <vscale x 8 x half> %op2) |
| 21 | + // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]]) |
| 22 | + // CHECK: ret <vscale x 16 x i1> %[[CAST]] |
33 | 23 | return SVE_ACLE_FUNC(svacge,_f16,,)(pg, op1, op2);
|
34 | 24 | }
|
35 | 25 |
|
36 |
| -// CHECK-LABEL: @test_svacge_f32( |
37 |
| -// CHECK-NEXT: entry: |
38 |
| -// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) |
39 |
| -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]]) |
40 |
| -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP1]]) |
41 |
| -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
42 |
| -// |
43 |
| -// CPP-CHECK-LABEL: @_Z15test_svacge_f32u10__SVBool_tu13__SVFloat32_tu13__SVFloat32_t( |
44 |
| -// CPP-CHECK-NEXT: entry: |
45 |
| -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) |
46 |
| -// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]]) |
47 |
| -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP1]]) |
48 |
| -// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
49 |
| -// |
50 | 26 | svbool_t test_svacge_f32(svbool_t pg, svfloat32_t op1, svfloat32_t op2)
|
51 | 27 | {
|
| 28 | + // CHECK-LABEL: test_svacge_f32 |
| 29 | + // CHECK: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) |
| 30 | + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %op2) |
| 31 | + // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]]) |
| 32 | + // CHECK: ret <vscale x 16 x i1> %[[CAST]] |
52 | 33 | return SVE_ACLE_FUNC(svacge,_f32,,)(pg, op1, op2);
|
53 | 34 | }
|
54 | 35 |
|
55 |
| -// CHECK-LABEL: @test_svacge_f64( |
56 |
| -// CHECK-NEXT: entry: |
57 |
| -// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) |
58 |
| -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]]) |
59 |
| -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP1]]) |
60 |
| -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
61 |
| -// |
62 |
| -// CPP-CHECK-LABEL: @_Z15test_svacge_f64u10__SVBool_tu13__SVFloat64_tu13__SVFloat64_t( |
63 |
| -// CPP-CHECK-NEXT: entry: |
64 |
| -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) |
65 |
| -// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]]) |
66 |
| -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP1]]) |
67 |
| -// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP2]] |
68 |
| -// |
69 | 36 | svbool_t test_svacge_f64(svbool_t pg, svfloat64_t op1, svfloat64_t op2)
|
70 | 37 | {
|
| 38 | + // CHECK-LABEL: test_svacge_f64 |
| 39 | + // CHECK: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) |
| 40 | + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %op2) |
| 41 | + // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]]) |
| 42 | + // CHECK: ret <vscale x 16 x i1> %[[CAST]] |
71 | 43 | return SVE_ACLE_FUNC(svacge,_f64,,)(pg, op1, op2);
|
72 | 44 | }
|
73 | 45 |
|
74 |
| -// CHECK-LABEL: @test_svacge_n_f32( |
75 |
| -// CHECK-NEXT: entry: |
76 |
| -// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) |
77 |
| -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float [[OP2:%.*]]) |
78 |
| -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[TMP1]]) |
79 |
| -// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP2]]) |
80 |
| -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]] |
81 |
| -// |
82 |
| -// CPP-CHECK-LABEL: @_Z17test_svacge_n_f32u10__SVBool_tu13__SVFloat32_tf( |
83 |
| -// CPP-CHECK-NEXT: entry: |
84 |
| -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]]) |
85 |
| -// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float [[OP2:%.*]]) |
86 |
| -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[TMP1]]) |
87 |
| -// CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> [[TMP2]]) |
88 |
| -// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]] |
89 |
| -// |
90 | 46 | svbool_t test_svacge_n_f32(svbool_t pg, svfloat32_t op1, float32_t op2)
|
91 | 47 | {
|
| 48 | + // CHECK-LABEL: test_svacge_n_f32 |
| 49 | + // CHECK-DAG: %[[PG:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> %pg) |
| 50 | + // CHECK-DAG: %[[DUP:.*]] = call <vscale x 4 x float> @llvm.aarch64.sve.dup.x.nxv4f32(float %op2) |
| 51 | + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.facge.nxv4f32(<vscale x 4 x i1> %[[PG]], <vscale x 4 x float> %op1, <vscale x 4 x float> %[[DUP]]) |
| 52 | + // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]]) |
| 53 | + // CHECK: ret <vscale x 16 x i1> %[[CAST]] |
92 | 54 | return SVE_ACLE_FUNC(svacge,_n_f32,,)(pg, op1, op2);
|
93 | 55 | }
|
94 | 56 |
|
95 |
| -// CHECK-LABEL: @test_svacge_n_f64( |
96 |
| -// CHECK-NEXT: entry: |
97 |
| -// CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) |
98 |
| -// CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double [[OP2:%.*]]) |
99 |
| -// CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[TMP1]]) |
100 |
| -// CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP2]]) |
101 |
| -// CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]] |
102 |
| -// |
103 |
| -// CPP-CHECK-LABEL: @_Z17test_svacge_n_f64u10__SVBool_tu13__SVFloat64_td( |
104 |
| -// CPP-CHECK-NEXT: entry: |
105 |
| -// CPP-CHECK-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]]) |
106 |
| -// CPP-CHECK-NEXT: [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double [[OP2:%.*]]) |
107 |
| -// CPP-CHECK-NEXT: [[TMP2:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[TMP1]]) |
108 |
| -// CPP-CHECK-NEXT: [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> [[TMP2]]) |
109 |
| -// CPP-CHECK-NEXT: ret <vscale x 16 x i1> [[TMP3]] |
110 |
| -// |
111 | 57 | svbool_t test_svacge_n_f64(svbool_t pg, svfloat64_t op1, float64_t op2)
|
112 | 58 | {
|
| 59 | + // CHECK-LABEL: test_svacge_n_f64 |
| 60 | + // CHECK-DAG: %[[PG:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> %pg) |
| 61 | + // CHECK-DAG: %[[DUP:.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.dup.x.nxv2f64(double %op2) |
| 62 | + // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %[[PG]], <vscale x 2 x double> %op1, <vscale x 2 x double> %[[DUP]]) |
| 63 | + // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]]) |
| 64 | + // CHECK: ret <vscale x 16 x i1> %[[CAST]] |
113 | 65 | return SVE_ACLE_FUNC(svacge,_n_f64,,)(pg, op1, op2);
|
114 | 66 | }
|
0 commit comments