@@ -11,8 +11,12 @@ __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
11
11
// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4
12
12
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
13
13
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
14
- // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64
15
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
14
+ // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
15
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
16
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
17
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
18
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
19
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
16
20
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
17
21
// CHECK-NEXT: ret i32 [[TMP1]]
18
22
//
@@ -28,8 +32,12 @@ __INT32_TYPE__ test1(__INT32_TYPE__ a) {
28
32
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
29
33
// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
30
34
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
31
- // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64
32
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
35
+ // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64
36
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
37
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
38
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
39
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
40
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
33
41
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
34
42
// CHECK-NEXT: ret i32 [[TMP1]]
35
43
//
@@ -47,7 +55,11 @@ __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
47
55
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
48
56
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
49
57
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
50
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ]
58
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1
59
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
60
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
61
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
62
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
51
63
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
52
64
// CHECK-NEXT: ret i32 [[TMP1]]
53
65
//
@@ -63,7 +75,11 @@ __INT32_TYPE__ test3(__INT32_TYPE__ a) {
63
75
// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8
64
76
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
65
77
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
66
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ]
78
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1
79
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
80
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
81
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
82
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
67
83
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
68
84
// CHECK-NEXT: ret i32 [[TMP1]]
69
85
//
@@ -99,8 +115,12 @@ __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)))
99
115
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
100
116
// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
101
117
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
102
- // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
103
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
118
+ // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
119
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
120
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
121
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
122
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
123
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
104
124
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
105
125
// CHECK-NEXT: ret i32 [[TMP9]]
106
126
//
@@ -137,8 +157,12 @@ __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(
137
157
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
138
158
// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
139
159
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
140
- // CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64
141
- // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]
160
+ // CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
161
+ // CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
162
+ // CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
163
+ // CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
164
+ // CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
165
+ // CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
142
166
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
143
167
// CHECK-NEXT: ret i32 [[TMP14]]
144
168
//
0 commit comments