|
| 1 | +; RUN: opt -passes=loop-vectorize %s -force-vector-width=1 -force-vector-interleave=2 -S -o - | FileCheck %s |
| 2 | + |
| 3 | +define void @foo(ptr addrspace(1) %in) { |
| 4 | +entry: |
| 5 | + br label %loop |
| 6 | + |
| 7 | +loop: |
| 8 | + %iter = phi i64 [ %next, %loop ], [ 0, %entry ] |
| 9 | + %ascast = addrspacecast ptr addrspace(1) %in to ptr |
| 10 | + %next = add i64 %iter, 1 |
| 11 | + %arrayidx = getelementptr inbounds i64, ptr %ascast, i64 %next |
| 12 | + store i64 %next, ptr %arrayidx, align 4 |
| 13 | + |
| 14 | +; check that we find the two interleaved blocks with ascast, gep and store: |
| 15 | +; CHECK: pred.store.if: |
| 16 | +; CHECK: [[ID1:%.*]] = add i64 %{{.*}}, 1 |
| 17 | +; CHECK: [[AS1:%.*]] = addrspacecast ptr addrspace(1) %{{.*}} to ptr |
| 18 | +; CHECK: [[GEP1:%.*]] = getelementptr inbounds i64, ptr [[AS1]], i64 [[ID1]] |
| 19 | +; CHECK: store i64 [[ID1]], ptr [[GEP1]] |
| 20 | + |
| 21 | +; CHECK: pred.store.if1: |
| 22 | +; CHECK: [[ID2:%.*]] = add i64 %{{.*}}, 1 |
| 23 | +; CHECK: [[AS2:%.*]] = addrspacecast ptr addrspace(1) %in to ptr |
| 24 | +; CHECK: [[GEP2:%.*]] = getelementptr inbounds i64, ptr [[AS2]], i64 [[ID2]] |
| 25 | +; CHECK: store i64 [[ID2]], ptr %9, align 4 |
| 26 | + |
| 27 | + %cmp = icmp eq i64 %next, 7 |
| 28 | + br i1 %cmp, label %exit, label %loop |
| 29 | + |
| 30 | +; check that we branch to the exit block |
| 31 | +; CHECK: middle.block: |
| 32 | +; CHECK: br i1 true, label %exit, label %scalar.ph |
| 33 | + |
| 34 | +exit: |
| 35 | + ret void |
| 36 | +; CHECK: exit: |
| 37 | +; CHECK: ret void |
| 38 | +} |
| 39 | + |
| 40 | +; CHECK: !{{[0-9]*}} = !{!"llvm.loop.isvectorized", i32 1} |
0 commit comments