Skip to content

Commit 2f97ff8

Browse files
committed
[SLP] Add additional memory versioning tests.
1 parent 95ba9f9 commit 2f97ff8

File tree

2 files changed

+1107
-0
lines changed

2 files changed

+1107
-0
lines changed
Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,176 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2+
; RUN: opt -slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
3+
4+
define void @loop1(i32* %A, i32* %B, i64 %N) {
5+
; CHECK-LABEL: @loop1(
6+
; CHECK-NEXT: entry:
7+
; CHECK-NEXT: br label [[LOOP:%.*]]
8+
; CHECK: loop:
9+
; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
10+
; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IV]]
11+
; CHECK-NEXT: [[B_0:%.*]] = load i32, i32* [[B_GEP_0]], align 4
12+
; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IV]]
13+
; CHECK-NEXT: [[A_0:%.*]] = load i32, i32* [[A_GEP_0]], align 4
14+
; CHECK-NEXT: [[ADD_0:%.*]] = add i32 [[A_0]], 20
15+
; CHECK-NEXT: [[XOR_0:%.*]] = xor i32 [[ADD_0]], [[B_0]]
16+
; CHECK-NEXT: store i32 [[XOR_0]], i32* [[A_GEP_0]], align 4
17+
; CHECK-NEXT: [[IV_1:%.*]] = or i64 [[IV]], 1
18+
; CHECK-NEXT: [[B_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_1]]
19+
; CHECK-NEXT: [[B_1:%.*]] = load i32, i32* [[B_GEP_1]], align 4
20+
; CHECK-NEXT: [[A_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_1]]
21+
; CHECK-NEXT: [[A_1:%.*]] = load i32, i32* [[A_GEP_1]], align 4
22+
; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[A_1]], 20
23+
; CHECK-NEXT: [[XOR_1:%.*]] = xor i32 [[ADD_1]], [[B_1]]
24+
; CHECK-NEXT: store i32 [[XOR_1]], i32* [[A_GEP_1]], align 4
25+
; CHECK-NEXT: [[IV_2:%.*]] = or i64 [[IV]], 2
26+
; CHECK-NEXT: [[B_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_2]]
27+
; CHECK-NEXT: [[B_2:%.*]] = load i32, i32* [[B_GEP_2]], align 4
28+
; CHECK-NEXT: [[A_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_2]]
29+
; CHECK-NEXT: [[A_2:%.*]] = load i32, i32* [[A_GEP_2]], align 4
30+
; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[A_2]], 20
31+
; CHECK-NEXT: [[XOR_2:%.*]] = xor i32 [[ADD_2]], [[B_2]]
32+
; CHECK-NEXT: store i32 [[XOR_2]], i32* [[A_GEP_2]], align 4
33+
; CHECK-NEXT: [[IV_3:%.*]] = or i64 [[IV]], 3
34+
; CHECK-NEXT: [[B_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_3]]
35+
; CHECK-NEXT: [[B_3:%.*]] = load i32, i32* [[B_GEP_3]], align 4
36+
; CHECK-NEXT: [[A_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_3]]
37+
; CHECK-NEXT: [[A_3:%.*]] = load i32, i32* [[A_GEP_3]], align 4
38+
; CHECK-NEXT: [[ADD_3:%.*]] = add i32 [[A_3]], 20
39+
; CHECK-NEXT: [[XOR_3:%.*]] = xor i32 [[ADD_3]], [[B_3]]
40+
; CHECK-NEXT: store i32 [[XOR_3]], i32* [[A_GEP_3]], align 4
41+
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 16
42+
; CHECK-NEXT: [[COND:%.*]] = icmp ult i64 [[IV_NEXT]], [[N:%.*]]
43+
; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
44+
; CHECK: exit:
45+
; CHECK-NEXT: ret void
46+
;
47+
entry:
48+
br label %loop
49+
50+
loop:
51+
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
52+
%B.gep.0 = getelementptr inbounds i32, i32* %B, i64 %iv
53+
%B.0 = load i32, i32* %B.gep.0, align 4
54+
%A.gep.0 = getelementptr inbounds i32, i32* %A, i64 %iv
55+
%A.0 = load i32, i32* %A.gep.0, align 4
56+
%add.0 = add i32 %A.0, 20
57+
%xor.0 = xor i32 %add.0, %B.0
58+
store i32 %xor.0, i32* %A.gep.0, align 4
59+
%iv.1 = or i64 %iv, 1
60+
%B.gep.1 = getelementptr inbounds i32, i32* %B, i64 %iv.1
61+
%B.1 = load i32, i32* %B.gep.1, align 4
62+
%A.gep.1 = getelementptr inbounds i32, i32* %A, i64 %iv.1
63+
%A.1 = load i32, i32* %A.gep.1, align 4
64+
%add.1 = add i32 %A.1, 20
65+
%xor.1 = xor i32 %add.1, %B.1
66+
store i32 %xor.1, i32* %A.gep.1, align 4
67+
%iv.2 = or i64 %iv, 2
68+
%B.gep.2 = getelementptr inbounds i32, i32* %B, i64 %iv.2
69+
%B.2 = load i32, i32* %B.gep.2, align 4
70+
%A.gep.2 = getelementptr inbounds i32, i32* %A, i64 %iv.2
71+
%A.2 = load i32, i32* %A.gep.2, align 4
72+
%add.2 = add i32 %A.2, 20
73+
%xor.2 = xor i32 %add.2, %B.2
74+
store i32 %xor.2, i32* %A.gep.2, align 4
75+
%iv.3 = or i64 %iv, 3
76+
%B.gep.3 = getelementptr inbounds i32, i32* %B, i64 %iv.3
77+
%B.3 = load i32, i32* %B.gep.3, align 4
78+
%A.gep.3 = getelementptr inbounds i32, i32* %A, i64 %iv.3
79+
%A.3 = load i32, i32* %A.gep.3, align 4
80+
%add.3 = add i32 %A.3, 20
81+
%xor.3 = xor i32 %add.3, %B.3
82+
store i32 %xor.3, i32* %A.gep.3, align 4
83+
%iv.next = add nuw nsw i64 %iv, 16
84+
%cond = icmp ult i64 %iv.next, %N
85+
br i1 %cond, label %loop, label %exit
86+
87+
exit:
88+
ret void
89+
}
90+
91+
define void @loop_iv_update_at_start(float* %src, float* %dst) #0 {
92+
; CHECK-LABEL: @loop_iv_update_at_start(
93+
; CHECK-NEXT: entry:
94+
; CHECK-NEXT: br label [[LOOP:%.*]]
95+
; CHECK: loop:
96+
; CHECK-NEXT: [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
97+
; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV]], 1
98+
; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[IV]], 2000
99+
; CHECK-NEXT: [[SRC_GEP_0:%.*]] = getelementptr inbounds float, float* [[SRC:%.*]], i64 0
100+
; CHECK-NEXT: [[SRC_0:%.*]] = load float, float* [[SRC_GEP_0]], align 8
101+
; CHECK-NEXT: [[ADD_0:%.*]] = fadd float [[SRC_0]], 1.000000e+00
102+
; CHECK-NEXT: [[MUL_0:%.*]] = fmul float [[ADD_0]], [[SRC_0]]
103+
; CHECK-NEXT: [[DST_GEP_0:%.*]] = getelementptr inbounds float, float* [[DST:%.*]], i64 0
104+
; CHECK-NEXT: store float [[MUL_0]], float* [[DST_GEP_0]], align 8
105+
; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 1
106+
; CHECK-NEXT: [[SRC_1:%.*]] = load float, float* [[SRC_GEP_1]], align 8
107+
; CHECK-NEXT: [[ADD_1:%.*]] = fadd float [[SRC_1]], 1.000000e+00
108+
; CHECK-NEXT: [[MUL_1:%.*]] = fmul float [[ADD_1]], [[SRC_1]]
109+
; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds float, float* [[DST]], i64 1
110+
; CHECK-NEXT: store float [[MUL_1]], float* [[DST_GEP_1]], align 8
111+
; CHECK-NEXT: [[SRC_GEP_2:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 2
112+
; CHECK-NEXT: [[SRC_2:%.*]] = load float, float* [[SRC_GEP_2]], align 8
113+
; CHECK-NEXT: [[ADD_2:%.*]] = fadd float [[SRC_2]], 1.000000e+00
114+
; CHECK-NEXT: [[MUL_2:%.*]] = fmul float [[ADD_2]], [[SRC_2]]
115+
; CHECK-NEXT: [[DST_GEP_2:%.*]] = getelementptr inbounds float, float* [[DST]], i64 2
116+
; CHECK-NEXT: store float [[MUL_2]], float* [[DST_GEP_2]], align 8
117+
; CHECK-NEXT: [[SRC_GEP_3:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 3
118+
; CHECK-NEXT: [[SRC_3:%.*]] = load float, float* [[SRC_GEP_3]], align 8
119+
; CHECK-NEXT: [[ADD_3:%.*]] = fadd float [[SRC_3]], 1.000000e+00
120+
; CHECK-NEXT: [[MUL_3:%.*]] = fmul float [[ADD_3]], [[SRC_3]]
121+
; CHECK-NEXT: [[DST_GEP_3:%.*]] = getelementptr inbounds float, float* [[DST]], i64 3
122+
; CHECK-NEXT: store float [[MUL_3]], float* [[DST_GEP_3]], align 8
123+
; CHECK-NEXT: [[SRC_GEP_4:%.*]] = getelementptr inbounds float, float* [[SRC]], i64 4
124+
; CHECK-NEXT: [[SRC_4:%.*]] = load float, float* [[SRC_GEP_4]], align 8
125+
; CHECK-NEXT: [[ADD_4:%.*]] = fadd float [[SRC_4]], 1.000000e+00
126+
; CHECK-NEXT: [[MUL_4:%.*]] = fmul float [[ADD_4]], [[SRC_4]]
127+
; CHECK-NEXT: [[DST_GEP_4:%.*]] = getelementptr inbounds float, float* [[DST]], i64 4
128+
; CHECK-NEXT: store float [[MUL_4]], float* [[DST_GEP_4]], align 8
129+
; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
130+
; CHECK: exit:
131+
; CHECK-NEXT: ret void
132+
;
133+
entry:
134+
br label %loop
135+
136+
loop:
137+
%iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
138+
%iv.next = add i32 %iv, 1
139+
%cond = icmp ult i32 %iv, 2000
140+
141+
%src.gep.0 = getelementptr inbounds float, float* %src, i64 0
142+
%src.0 = load float, float* %src.gep.0, align 8
143+
%add.0 = fadd float %src.0, 1.0
144+
%mul.0 = fmul float %add.0, %src.0
145+
%dst.gep.0 = getelementptr inbounds float, float* %dst, i64 0
146+
store float %mul.0, float* %dst.gep.0, align 8
147+
148+
%src.gep.1 = getelementptr inbounds float, float* %src, i64 1
149+
%src.1 = load float, float* %src.gep.1, align 8
150+
%add.1 = fadd float %src.1, 1.0
151+
%mul.1 = fmul float %add.1, %src.1
152+
%dst.gep.1 = getelementptr inbounds float, float* %dst, i64 1
153+
store float %mul.1, float* %dst.gep.1, align 8
154+
%src.gep.2 = getelementptr inbounds float, float* %src, i64 2
155+
%src.2 = load float, float* %src.gep.2, align 8
156+
%add.2 = fadd float %src.2, 1.0
157+
%mul.2 = fmul float %add.2, %src.2
158+
%dst.gep.2 = getelementptr inbounds float, float* %dst, i64 2
159+
store float %mul.2, float* %dst.gep.2, align 8
160+
%src.gep.3 = getelementptr inbounds float, float* %src, i64 3
161+
%src.3 = load float, float* %src.gep.3, align 8
162+
%add.3 = fadd float %src.3, 1.0
163+
%mul.3 = fmul float %add.3, %src.3
164+
%dst.gep.3 = getelementptr inbounds float, float* %dst, i64 3
165+
store float %mul.3, float* %dst.gep.3, align 8
166+
%src.gep.4 = getelementptr inbounds float, float* %src, i64 4
167+
%src.4 = load float, float* %src.gep.4, align 8
168+
%add.4 = fadd float %src.4, 1.0
169+
%mul.4 = fmul float %add.4, %src.4
170+
%dst.gep.4 = getelementptr inbounds float, float* %dst, i64 4
171+
store float %mul.4, float* %dst.gep.4, align 8
172+
br i1 %cond, label %loop, label %exit
173+
174+
exit:
175+
ret void
176+
}

0 commit comments

Comments
 (0)