Skip to content

Commit 3294f86

Browse files
committed
[LoopVectorize] Add support for reverse loops in isDereferenceableAndAlignedInLoop
Currently when we encounter a negative step in the induction variable isDereferenceableAndAlignedInLoop bails out because the element size is signed greater than the step. This patch adds support for negative steps in cases where we detect the start address for the load is of the form base + offset. In this case the address decrements in each iteration so we need to calculate the access size differently. The motivation for this patch comes from PR #88385 where a reviewer requested reusing isDereferenceableAndAlignedInLoop, but that PR itself does support reverse loops.
1 parent 22c593d commit 3294f86

File tree

2 files changed

+94
-60
lines changed

2 files changed

+94
-60
lines changed

llvm/lib/Analysis/Loads.cpp

Lines changed: 50 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -293,26 +293,65 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
293293

294294
// TODO: Handle overlapping accesses.
295295
// We should be computing AccessSize as (TC - 1) * Step + EltSize.
296-
if (EltSize.sgt(Step->getAPInt()))
296+
bool StepIsNegative = Step->getAPInt().isNegative();
297+
APInt AbsStep = Step->getAPInt().abs();
298+
if (EltSize.ugt(AbsStep))
299+
return false;
300+
301+
// For the moment, restrict ourselves to the case where the access size is a
302+
// multiple of the requested alignment and the base is aligned.
303+
// TODO: generalize if a case found which warrants
304+
if (EltSize.urem(Alignment.value()) != 0)
297305
return false;
298306

299307
// Compute the total access size for access patterns with unit stride and
300308
// patterns with gaps. For patterns with unit stride, Step and EltSize are the
301309
// same.
302310
// For patterns with gaps (i.e. non unit stride), we are
303311
// accessing EltSize bytes at every Step.
304-
APInt AccessSize = TC * Step->getAPInt();
312+
APInt AccessSize = TC * AbsStep;
305313

306314
assert(SE.isLoopInvariant(AddRec->getStart(), L) &&
307315
"implied by addrec definition");
308316
Value *Base = nullptr;
309317
if (auto *StartS = dyn_cast<SCEVUnknown>(AddRec->getStart())) {
318+
if (StepIsNegative)
319+
return false;
310320
Base = StartS->getValue();
311321
} else if (auto *StartS = dyn_cast<SCEVAddExpr>(AddRec->getStart())) {
312-
// Handle (NewBase + offset) as start value.
313-
const auto *Offset = dyn_cast<SCEVConstant>(StartS->getOperand(0));
314-
const auto *NewBase = dyn_cast<SCEVUnknown>(StartS->getOperand(1));
315-
if (StartS->getNumOperands() == 2 && Offset && NewBase) {
322+
const SCEV *End = AddRec->evaluateAtIteration(
323+
SE.getConstant(StartS->getType(), TC - 1), SE);
324+
325+
// The step recurrence could be negative so it's necessary to find the min
326+
// and max accessed addresses in the loop.
327+
const SCEV *Min = SE.getUMinExpr(StartS, End);
328+
const SCEV *Max = SE.getUMaxExpr(StartS, End);
329+
if (isa<SCEVCouldNotCompute>(Min) || isa<SCEVCouldNotCompute>(Max))
330+
return false;
331+
332+
// Now calculate the total access size, which is (max - min) + element_size.
333+
const SCEV *Diff = SE.getMinusSCEV(Max, Min);
334+
if (isa<SCEVCouldNotCompute>(Diff))
335+
return false;
336+
337+
const SCEV *AS = SE.getAddExpr(
338+
Diff, SE.getConstant(Diff->getType(), EltSize.getZExtValue()));
339+
auto *ASC = dyn_cast<SCEVConstant>(AS);
340+
if (!ASC)
341+
return false;
342+
343+
if (const SCEVUnknown *NewBase = dyn_cast<SCEVUnknown>(Min)) {
344+
Base = NewBase->getValue();
345+
AccessSize = ASC->getAPInt();
346+
} else if (auto *MinAddRec = dyn_cast<SCEVAddExpr>(Min)) {
347+
if (MinAddRec->getNumOperands() != 2)
348+
return false;
349+
350+
const auto *Offset = dyn_cast<SCEVConstant>(MinAddRec->getOperand(0));
351+
const auto *NewBase = dyn_cast<SCEVUnknown>(MinAddRec->getOperand(1));
352+
if (!Offset || !NewBase)
353+
return false;
354+
316355
// The following code below assumes the offset is unsigned, but GEP
317356
// offsets are treated as signed so we can end up with a signed value
318357
// here too. For example, suppose the initial PHI value is (i8 255),
@@ -325,22 +364,14 @@ bool llvm::isDereferenceableAndAlignedInLoop(LoadInst *LI, Loop *L,
325364
// TODO: generalize if a case found which warrants
326365
if (Offset->getAPInt().urem(Alignment.value()) != 0)
327366
return false;
328-
Base = NewBase->getValue();
329-
bool Overflow = false;
330-
AccessSize = AccessSize.uadd_ov(Offset->getAPInt(), Overflow);
331-
if (Overflow)
332-
return false;
333-
}
334-
}
335367

336-
if (!Base)
368+
AccessSize = ASC->getAPInt() + Offset->getAPInt();
369+
Base = NewBase->getValue();
370+
} else
371+
return false;
372+
} else
337373
return false;
338374

339-
// For the moment, restrict ourselves to the case where the access size is a
340-
// multiple of the requested alignment and the base is aligned.
341-
// TODO: generalize if a case found which warrants
342-
if (EltSize.urem(Alignment.value()) != 0)
343-
return false;
344375
return isDereferenceableAndAlignedPointer(Base, Alignment, AccessSize, DL,
345376
HeaderFirstNonPHI, AC, &DT);
346377
}

llvm/test/Transforms/LoopVectorize/load-deref-pred-align.ll

Lines changed: 44 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -311,7 +311,7 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) {
311311
; CHECK: vector.ph:
312312
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
313313
; CHECK: vector.body:
314-
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE2:%.*]] ]
314+
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE4:%.*]] ]
315315
; CHECK-NEXT: [[OFFSET_IDX:%.*]] = sub i64 1023, [[INDEX]]
316316
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
317317
; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[TMP0]]
@@ -321,30 +321,33 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) {
321321
; CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <2 x i32> [[WIDE_LOAD]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
322322
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <2 x i32> [[REVERSE]], <i32 3, i32 3>
323323
; CHECK-NEXT: [[TMP5:%.*]] = xor <2 x i1> [[TMP4]], <i1 true, i1 true>
324-
; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
325-
; CHECK-NEXT: br i1 [[TMP6]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
324+
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP0]]
325+
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i32, ptr [[TMP6]], i32 0
326+
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i32, ptr [[TMP7]], i32 -1
327+
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i32>, ptr [[TMP8]], align 4
328+
; CHECK-NEXT: [[REVERSE2:%.*]] = shufflevector <2 x i32> [[WIDE_LOAD1]], <2 x i32> poison, <2 x i32> <i32 1, i32 0>
329+
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
330+
; CHECK-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
326331
; CHECK: pred.store.if:
327-
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP0]]
328-
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
329-
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP0]]
330-
; CHECK-NEXT: [[TMP10:%.*]] = shl nsw i32 [[TMP8]], 2
331-
; CHECK-NEXT: store i32 [[TMP10]], ptr [[TMP9]], align 4
332+
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP0]]
333+
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i32> [[REVERSE2]], i32 0
334+
; CHECK-NEXT: [[TMP12:%.*]] = shl nsw i32 [[TMP11]], 2
335+
; CHECK-NEXT: store i32 [[TMP12]], ptr [[TMP10]], align 4
332336
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
333337
; CHECK: pred.store.continue:
334-
; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
335-
; CHECK-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
336-
; CHECK: pred.store.if1:
337-
; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], -1
338-
; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP12]]
339-
; CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4
340-
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP12]]
341-
; CHECK-NEXT: [[TMP16:%.*]] = shl nsw i32 [[TMP14]], 2
342-
; CHECK-NEXT: store i32 [[TMP16]], ptr [[TMP15]], align 4
343-
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
344-
; CHECK: pred.store.continue2:
338+
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
339+
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF3:%.*]], label [[PRED_STORE_CONTINUE4]]
340+
; CHECK: pred.store.if3:
341+
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], -1
342+
; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP14]]
343+
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i32> [[REVERSE2]], i32 1
344+
; CHECK-NEXT: [[TMP17:%.*]] = shl nsw i32 [[TMP16]], 2
345+
; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP15]], align 4
346+
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE4]]
347+
; CHECK: pred.store.continue4:
345348
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
346-
; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
347-
; CHECK-NEXT: br i1 [[TMP17]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
349+
; CHECK-NEXT: [[TMP18:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
350+
; CHECK-NEXT: br i1 [[TMP18]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
348351
; CHECK: middle.block:
349352
; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]]
350353
; CHECK: scalar.ph:
@@ -353,13 +356,13 @@ define void @test_rev_loops_deref_loads(ptr nocapture noundef writeonly %dest) {
353356
; CHECK: for.body:
354357
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ]
355358
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_CMP]], i64 0, i64 [[INDVARS_IV]]
356-
; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
357-
; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP18]], 3
359+
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
360+
; CHECK-NEXT: [[CMP3_NOT:%.*]] = icmp eq i32 [[TMP19]], 3
358361
; CHECK-NEXT: br i1 [[CMP3_NOT]], label [[FOR_INC]], label [[IF_THEN:%.*]]
359362
; CHECK: if.then:
360363
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[INDVARS_IV]]
361-
; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
362-
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP19]], 2
364+
; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4
365+
; CHECK-NEXT: [[MUL:%.*]] = shl nsw i32 [[TMP20]], 2
363366
; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[INDVARS_IV]]
364367
; CHECK-NEXT: store i32 [[MUL]], ptr [[ARRAYIDX7]], align 4
365368
; CHECK-NEXT: br label [[FOR_INC]]
@@ -635,26 +638,26 @@ define void @test_rev_loops_strided_deref_loads(ptr nocapture noundef writeonly
635638
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <2 x i32> [[REVERSE]], <i32 3, i32 3>
636639
; CHECK-NEXT: [[TMP5:%.*]] = xor <2 x i1> [[TMP4]], <i1 true, i1 true>
637640
; CHECK-NEXT: [[TMP6:%.*]] = mul <2 x i64> [[VEC_IND]], <i64 2, i64 2>
638-
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
639-
; CHECK-NEXT: br i1 [[TMP7]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
641+
; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
642+
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP7]]
643+
; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
644+
; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP9]]
645+
; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP8]], align 4
646+
; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP10]], align 4
647+
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP5]], i32 0
648+
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
640649
; CHECK: pred.store.if:
641-
; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
642-
; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP8]]
643-
; CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP9]], align 4
644-
; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP0]]
645-
; CHECK-NEXT: [[TMP12:%.*]] = shl nsw i32 [[TMP10]], 2
646-
; CHECK-NEXT: store i32 [[TMP12]], ptr [[TMP11]], align 4
650+
; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP0]]
651+
; CHECK-NEXT: [[TMP15:%.*]] = shl nsw i32 [[TMP11]], 2
652+
; CHECK-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
647653
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE]]
648654
; CHECK: pred.store.continue:
649-
; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
650-
; CHECK-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
655+
; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP5]], i32 1
656+
; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_STORE_IF1:%.*]], label [[PRED_STORE_CONTINUE2]]
651657
; CHECK: pred.store.if1:
652-
; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], -1
653-
; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
654-
; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_SRC]], i64 0, i64 [[TMP15]]
655-
; CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP16]], align 4
656-
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP14]]
657-
; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i32 [[TMP17]], 2
658+
; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[OFFSET_IDX]], -1
659+
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1024 x i32], ptr [[LOCAL_DEST]], i64 0, i64 [[TMP17]]
660+
; CHECK-NEXT: [[TMP19:%.*]] = shl nsw i32 [[TMP12]], 2
658661
; CHECK-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
659662
; CHECK-NEXT: br label [[PRED_STORE_CONTINUE2]]
660663
; CHECK: pred.store.continue2:

0 commit comments

Comments
 (0)