Skip to content
This repository was archived by the owner on Apr 23, 2020. It is now read-only.

Commit 771994b

Browse files
committed
[SLP] Fix tests checks, NFC.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@325605 91177308-0d34-0410-b5e6-96231b3b80d8
1 parent 20db096 commit 771994b

File tree

5 files changed

+249
-74
lines changed

5 files changed

+249
-74
lines changed

test/Transforms/SLPVectorizer/X86/addsub.ll

+132-46
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
12
; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s
23
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
34
target triple = "x86_64-unknown-linux-gnu"
@@ -12,14 +13,22 @@ target triple = "x86_64-unknown-linux-gnu"
1213
@fa = common global [4 x float] zeroinitializer, align 16
1314
@fd = common global [4 x float] zeroinitializer, align 16
1415

15-
; CHECK-LABEL: @addsub
16-
; CHECK: %5 = add nsw <4 x i32> %3, %4
17-
; CHECK: %6 = add nsw <4 x i32> %2, %5
18-
; CHECK: %7 = sub nsw <4 x i32> %2, %5
19-
; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
20-
2116
; Function Attrs: nounwind uwtable
2217
define void @addsub() #0 {
18+
; CHECK-LABEL: @addsub(
19+
; CHECK-NEXT: entry:
20+
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4
21+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @c to <4 x i32>*), align 4
22+
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]]
23+
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @d to <4 x i32>*), align 4
24+
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @e to <4 x i32>*), align 4
25+
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]]
26+
; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]]
27+
; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
28+
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
29+
; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4
30+
; CHECK-NEXT: ret void
31+
;
2332
entry:
2433
%0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4
2534
%1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4
@@ -56,14 +65,22 @@ entry:
5665
ret void
5766
}
5867

59-
; CHECK-LABEL: @subadd
60-
; CHECK: %5 = add nsw <4 x i32> %3, %4
61-
; CHECK: %6 = sub nsw <4 x i32> %2, %5
62-
; CHECK: %7 = add nsw <4 x i32> %2, %5
63-
; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
64-
6568
; Function Attrs: nounwind uwtable
6669
define void @subadd() #0 {
70+
; CHECK-LABEL: @subadd(
71+
; CHECK-NEXT: entry:
72+
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4
73+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @c to <4 x i32>*), align 4
74+
; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]]
75+
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @d to <4 x i32>*), align 4
76+
; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @e to <4 x i32>*), align 4
77+
; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]]
78+
; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
79+
; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]]
80+
; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
81+
; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4
82+
; CHECK-NEXT: ret void
83+
;
6784
entry:
6885
%0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4
6986
%1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4
@@ -100,12 +117,18 @@ entry:
100117
ret void
101118
}
102119

103-
; CHECK-LABEL: @faddfsub
104-
; CHECK: %2 = fadd <4 x float> %0, %1
105-
; CHECK: %3 = fsub <4 x float> %0, %1
106-
; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
107120
; Function Attrs: nounwind uwtable
108121
define void @faddfsub() #0 {
122+
; CHECK-LABEL: @faddfsub(
123+
; CHECK-NEXT: entry:
124+
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4
125+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4
126+
; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[TMP0]], [[TMP1]]
127+
; CHECK-NEXT: [[TMP3:%.*]] = fsub <4 x float> [[TMP0]], [[TMP1]]
128+
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
129+
; CHECK-NEXT: store <4 x float> [[TMP4]], <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4
130+
; CHECK-NEXT: ret void
131+
;
109132
entry:
110133
%0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
111134
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
@@ -126,12 +149,18 @@ entry:
126149
ret void
127150
}
128151

129-
; CHECK-LABEL: @fsubfadd
130-
; CHECK: %2 = fsub <4 x float> %0, %1
131-
; CHECK: %3 = fadd <4 x float> %0, %1
132-
; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
133152
; Function Attrs: nounwind uwtable
134153
define void @fsubfadd() #0 {
154+
; CHECK-LABEL: @fsubfadd(
155+
; CHECK-NEXT: entry:
156+
; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4
157+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4
158+
; CHECK-NEXT: [[TMP2:%.*]] = fsub <4 x float> [[TMP0]], [[TMP1]]
159+
; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[TMP0]], [[TMP1]]
160+
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
161+
; CHECK-NEXT: store <4 x float> [[TMP4]], <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4
162+
; CHECK-NEXT: ret void
163+
;
135164
entry:
136165
%0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
137166
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
@@ -152,12 +181,28 @@ entry:
152181
ret void
153182
}
154183

155-
; CHECK-LABEL: @No_faddfsub
156-
; CHECK-NOT: fadd <4 x float>
157-
; CHECK-NOT: fsub <4 x float>
158-
; CHECK-NOT: shufflevector
159184
; Function Attrs: nounwind uwtable
160185
define void @No_faddfsub() #0 {
186+
; CHECK-LABEL: @No_faddfsub(
187+
; CHECK-NEXT: entry:
188+
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
189+
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
190+
; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[TMP1]]
191+
; CHECK-NEXT: store float [[ADD]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
192+
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
193+
; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
194+
; CHECK-NEXT: [[ADD1:%.*]] = fadd float [[TMP2]], [[TMP3]]
195+
; CHECK-NEXT: store float [[ADD1]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
196+
; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
197+
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
198+
; CHECK-NEXT: [[ADD2:%.*]] = fadd float [[TMP4]], [[TMP5]]
199+
; CHECK-NEXT: store float [[ADD2]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
200+
; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
201+
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
202+
; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP6]], [[TMP7]]
203+
; CHECK-NEXT: store float [[SUB]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
204+
; CHECK-NEXT: ret void
205+
;
161206
entry:
162207
%0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
163208
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
@@ -184,11 +229,16 @@ entry:
184229
; fc[2] = fa[2]+fb[2];
185230
; fc[3] = fa[3]-fb[3];
186231

187-
; CHECK-LABEL: @reorder_alt
188-
; CHECK: %3 = fadd <4 x float> %1, %2
189-
; CHECK: %4 = fsub <4 x float> %1, %2
190-
; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
191232
define void @reorder_alt() #0 {
233+
; CHECK-LABEL: @reorder_alt(
234+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4
235+
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4
236+
; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[TMP1]], [[TMP2]]
237+
; CHECK-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP1]], [[TMP2]]
238+
; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> [[TMP4]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
239+
; CHECK-NEXT: store <4 x float> [[TMP5]], <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4
240+
; CHECK-NEXT: ret void
241+
;
192242
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
193243
%2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
194244
%3 = fadd float %1, %2
@@ -212,16 +262,22 @@ define void @reorder_alt() #0 {
212262
; fc[0] = fa[0]+(fb[0]-fd[0]);
213263
; fc[1] = fa[1]-(fb[1]+fd[1]);
214264
; fc[2] = fa[2]+(fb[2]-fd[2]);
215-
; fc[3] = fa[3]-(fd[3]+fb[3]); //swapped fd and fb
265+
; fc[3] = fa[3]-(fd[3]+fb[3]); //swapped fd and fb
216266

217-
; CHECK-LABEL: @reorder_alt_subTree
218-
; CHECK: %4 = fsub <4 x float> %3, %2
219-
; CHECK: %5 = fadd <4 x float> %3, %2
220-
; CHECK: %6 = shufflevector <4 x float> %4, <4 x float> %5, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
221-
; CHECK: %7 = fadd <4 x float> %1, %6
222-
; CHECK: %8 = fsub <4 x float> %1, %6
223-
; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
224267
define void @reorder_alt_subTree() #0 {
268+
; CHECK-LABEL: @reorder_alt_subTree(
269+
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4
270+
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fd to <4 x float>*), align 4
271+
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4
272+
; CHECK-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP3]], [[TMP2]]
273+
; CHECK-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP3]], [[TMP2]]
274+
; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
275+
; CHECK-NEXT: [[TMP7:%.*]] = fadd <4 x float> [[TMP1]], [[TMP6]]
276+
; CHECK-NEXT: [[TMP8:%.*]] = fsub <4 x float> [[TMP1]], [[TMP6]]
277+
; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
278+
; CHECK-NEXT: store <4 x float> [[TMP9]], <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4
279+
; CHECK-NEXT: ret void
280+
;
225281
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
226282
%2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
227283
%3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 0), align 4
@@ -251,13 +307,28 @@ define void @reorder_alt_subTree() #0 {
251307

252308
; Check vectorization of following code for double data type-
253309
; c[0] = (a[0]+b[0])-d[0];
254-
; c[1] = d[1]+(a[1]+b[1]); //swapped d[1] and (a[1]+b[1])
310+
; c[1] = d[1]+(a[1]+b[1]); //swapped d[1] and (a[1]+b[1])
255311

256-
; CHECK-LABEL: @reorder_alt_rightsubTree
257-
; CHECK: fadd <2 x double>
258-
; CHECK: fsub <2 x double>
259-
; CHECK: shufflevector <2 x double>
260312
define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b, double* noalias nocapture readonly %d) {
313+
; CHECK-LABEL: @reorder_alt_rightsubTree(
314+
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, double* [[D:%.*]], i64 1
315+
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[D]] to <2 x double>*
316+
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
317+
; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1
318+
; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[A]] to <2 x double>*
319+
; CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, <2 x double>* [[TMP5]], align 8
320+
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1
321+
; CHECK-NEXT: [[TMP8:%.*]] = bitcast double* [[B]] to <2 x double>*
322+
; CHECK-NEXT: [[TMP9:%.*]] = load <2 x double>, <2 x double>* [[TMP8]], align 8
323+
; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[TMP6]], [[TMP9]]
324+
; CHECK-NEXT: [[TMP11:%.*]] = fsub <2 x double> [[TMP10]], [[TMP3]]
325+
; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[TMP10]], [[TMP3]]
326+
; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP11]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 3>
327+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, double* [[C:%.*]], i64 1
328+
; CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[C]] to <2 x double>*
329+
; CHECK-NEXT: store <2 x double> [[TMP13]], <2 x double>* [[TMP15]], align 8
330+
; CHECK-NEXT: ret void
331+
;
261332
%1 = load double, double* %a
262333
%2 = load double, double* %b
263334
%3 = fadd double %1, %2
@@ -283,13 +354,28 @@ define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias noca
283354
; fc[2] = fa[2]+fb[2];
284355
; fc[3] = fb[3]-fa[3];
285356
; In the above code we can swap the 1st and 2nd operation as fadd is commutative
286-
; but not 2nd or 4th as fsub is not commutative.
357+
; but not 2nd or 4th as fsub is not commutative.
287358

288-
; CHECK-LABEL: @no_vec_shuff_reorder
289-
; CHECK-NOT: fadd <4 x float>
290-
; CHECK-NOT: fsub <4 x float>
291-
; CHECK-NOT: shufflevector
292359
define void @no_vec_shuff_reorder() #0 {
360+
; CHECK-LABEL: @no_vec_shuff_reorder(
361+
; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
362+
; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
363+
; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]]
364+
; CHECK-NEXT: store float [[TMP3]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4
365+
; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4
366+
; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4
367+
; CHECK-NEXT: [[TMP6:%.*]] = fsub float [[TMP4]], [[TMP5]]
368+
; CHECK-NEXT: store float [[TMP6]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4
369+
; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4
370+
; CHECK-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4
371+
; CHECK-NEXT: [[TMP9:%.*]] = fadd float [[TMP7]], [[TMP8]]
372+
; CHECK-NEXT: store float [[TMP9]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4
373+
; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4
374+
; CHECK-NEXT: [[TMP11:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4
375+
; CHECK-NEXT: [[TMP12:%.*]] = fsub float [[TMP10]], [[TMP11]]
376+
; CHECK-NEXT: store float [[TMP12]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4
377+
; CHECK-NEXT: ret void
378+
;
293379
%1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4
294380
%2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4
295381
%3 = fadd float %1, %2

test/Transforms/SLPVectorizer/X86/compare-reduce.ll

+31-8
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,42 @@
1+
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
12
; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
23

34
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
45
target triple = "x86_64-apple-macosx10.7.0"
56

67
@.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1
78

8-
;CHECK-LABEL: @reduce_compare(
9-
;CHECK: load <2 x double>
10-
;CHECK: fmul <2 x double>
11-
;CHECK: fmul <2 x double>
12-
;CHECK: fadd <2 x double>
13-
;CHECK: extractelement
14-
;CHECK: extractelement
15-
;CHECK: ret
169
define void @reduce_compare(double* nocapture %A, i32 %n) {
10+
; CHECK-LABEL: @reduce_compare(
11+
; CHECK-NEXT: entry:
12+
; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double
13+
; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[CONV]], i32 0
14+
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[CONV]], i32 1
15+
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
16+
; CHECK: for.body:
17+
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
18+
; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1
19+
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[TMP2]]
20+
; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>*
21+
; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8
22+
; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP1]], [[TMP4]]
23+
; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> <double 7.000000e+00, double 4.000000e+00>, [[TMP5]]
24+
; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> <double 5.000000e+00, double 9.000000e+00>, [[TMP6]]
25+
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 0
26+
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 1
27+
; CHECK-NEXT: [[CMP11:%.*]] = fcmp ogt double [[TMP8]], [[TMP9]]
28+
; CHECK-NEXT: br i1 [[CMP11]], label [[IF_THEN:%.*]], label [[FOR_INC]]
29+
; CHECK: if.then:
30+
; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0))
31+
; CHECK-NEXT: br label [[FOR_INC]]
32+
; CHECK: for.inc:
33+
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
34+
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
35+
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 100
36+
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
37+
; CHECK: for.end:
38+
; CHECK-NEXT: ret void
39+
;
1740
entry:
1841
%conv = sitofp i32 %n to double
1942
br label %for.body

0 commit comments

Comments
 (0)