|
| 1 | +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| 2 | +; RUN: opt -S -passes=loop-vectorize < %s | FileCheck %s |
| 3 | + |
| 4 | +target triple = "aarch64-unknown-linux-gnu" |
| 5 | + |
| 6 | +; There is a flag attached to the loop that requests tail-folding, |
| 7 | +; but this cannot be honoured because the early exit requires a |
| 8 | +; scalar epilogue. So we should fall back on normal vectorization |
| 9 | +; with a scalar epilogue. |
| 10 | + |
| 11 | +define void @foo(ptr noalias %dst, ptr noalias readonly %src, i32 %n) #0 { |
| 12 | +; CHECK-LABEL: @foo( |
| 13 | +; CHECK-NEXT: entry: |
| 14 | +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() |
| 15 | +; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[TMP0]], 4 |
| 16 | +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i32 32, [[TMP1]] |
| 17 | +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] |
| 18 | +; CHECK: vector.ph: |
| 19 | +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.vscale.i32() |
| 20 | +; CHECK-NEXT: [[TMP3:%.*]] = mul i32 [[TMP2]], 4 |
| 21 | +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 32, [[TMP3]] |
| 22 | +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[N_MOD_VF]], 0 |
| 23 | +; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP3]], i32 [[N_MOD_VF]] |
| 24 | +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 32, [[TMP5]] |
| 25 | +; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vscale.i32() |
| 26 | +; CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[TMP6]], 4 |
| 27 | +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] |
| 28 | +; CHECK: vector.body: |
| 29 | +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] |
| 30 | +; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[INDEX]], 0 |
| 31 | +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[SRC:%.*]], i32 [[TMP8]] |
| 32 | +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP9]], i32 0 |
| 33 | +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP10]], align 4 |
| 34 | +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i32 [[TMP8]] |
| 35 | +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0 |
| 36 | +; CHECK-NEXT: store <vscale x 4 x i32> [[WIDE_LOAD]], ptr [[TMP12]], align 4 |
| 37 | +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP7]] |
| 38 | +; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] |
| 39 | +; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| 40 | +; CHECK: middle.block: |
| 41 | +; CHECK-NEXT: br label [[SCALAR_PH]] |
| 42 | +; CHECK: scalar.ph: |
| 43 | +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] |
| 44 | +; CHECK-NEXT: br label [[FOR_BODY:%.*]] |
| 45 | +; CHECK: for.body: |
| 46 | +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] |
| 47 | +; CHECK-NEXT: [[EXIT_EARLY_COND:%.*]] = icmp sgt i32 [[INDVARS_IV]], 30 |
| 48 | +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[SRC]], i32 [[INDVARS_IV]] |
| 49 | +; CHECK-NEXT: [[VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 |
| 50 | +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[DST]], i32 [[INDVARS_IV]] |
| 51 | +; CHECK-NEXT: store i32 [[VAL]], ptr [[ARRAYIDX2]], align 4 |
| 52 | +; CHECK-NEXT: br i1 [[EXIT_EARLY_COND]], label [[FOR_END:%.*]], label [[FOR_INC]] |
| 53 | +; CHECK: for.inc: |
| 54 | +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i32 [[INDVARS_IV]], 1 |
| 55 | +; CHECK-NEXT: br label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] |
| 56 | +; CHECK: for.end: |
| 57 | +; CHECK-NEXT: ret void |
| 58 | +; |
| 59 | +entry: |
| 60 | + br label %for.body |
| 61 | + |
| 62 | +for.body: |
| 63 | + %indvars.iv = phi i32 [ 0, %entry ], [ %indvars.iv.next, %for.inc ] |
| 64 | + %exit.early.cond = icmp sgt i32 %indvars.iv, 30 |
| 65 | + %arrayidx = getelementptr inbounds i32, ptr %src, i32 %indvars.iv |
| 66 | + %val = load i32, ptr %arrayidx, align 4 |
| 67 | + %arrayidx2 = getelementptr inbounds i32, ptr %dst, i32 %indvars.iv |
| 68 | + store i32 %val, ptr %arrayidx2, align 4 |
| 69 | + br i1 %exit.early.cond, label %for.end, label %for.inc |
| 70 | + |
| 71 | +for.inc: |
| 72 | + %indvars.iv.next = add nsw i32 %indvars.iv, 1 |
| 73 | + br label %for.body, !llvm.loop !0 |
| 74 | + |
| 75 | +for.end: |
| 76 | + ret void |
| 77 | +} |
| 78 | + |
| 79 | + |
| 80 | +!0 = distinct !{!0, !1} |
| 81 | +!1 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} |
| 82 | + |
| 83 | +attributes #0 = { "target-features"="+sve" vscale_range(1,16) } |
0 commit comments