@@ -11,12 +11,8 @@ __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
11
11
// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
12
12
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13
13
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
14
- // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
15
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
16
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
17
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
18
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
19
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
14
+ // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64
15
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
20
16
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
21
17
// CHECK-NEXT: ret i32 [[TMP1]]
22
18
//
@@ -32,12 +28,8 @@ __INT32_TYPE__ test1(__INT32_TYPE__ a) {
32
28
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
33
29
// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
34
30
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
35
- // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64
36
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
37
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
38
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
39
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
40
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
31
+ // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64
32
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
41
33
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
42
34
// CHECK-NEXT: ret i32 [[TMP1]]
43
35
//
@@ -55,11 +47,7 @@ __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
55
47
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
56
48
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
57
49
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
58
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1
59
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
60
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
61
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
62
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
50
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ]
63
51
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
64
52
// CHECK-NEXT: ret i32 [[TMP1]]
65
53
//
@@ -75,11 +63,7 @@ __INT32_TYPE__ test3(__INT32_TYPE__ a) {
75
63
// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
76
64
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
77
65
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
78
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1
79
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
80
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
81
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
82
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
66
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ]
83
67
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
84
68
// CHECK-NEXT: ret i32 [[TMP1]]
85
69
//
@@ -115,12 +99,8 @@ __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)))
115
99
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
116
100
// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
117
101
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
118
- // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
119
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
120
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
121
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
122
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
123
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
102
+ // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
103
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
124
104
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
125
105
// CHECK-NEXT: ret i32 [[TMP9]]
126
106
//
@@ -157,12 +137,8 @@ __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(
157
137
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
158
138
// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
159
139
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
160
- // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
161
- // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
162
- // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
163
- // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
164
- // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
165
- // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
140
+ // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
141
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
166
142
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
167
143
// CHECK-NEXT: ret i32 [[TMP14]]
168
144
//
0 commit comments