@@ -799,8 +799,13 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
799
799
Value *Ptr , PredicatedScalarEvolution &PSE) {
800
800
// The access function must stride over the innermost loop.
801
801
if (Lp != AR->getLoop ()) {
802
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not striding over innermost loop "
803
- << *Ptr << " SCEV: " << *AR << " \n " );
802
+ LLVM_DEBUG ({
803
+ dbgs () << " LAA: Bad stride - Not striding over innermost loop " ;
804
+ if (Ptr )
805
+ dbgs () << *Ptr << " " ;
806
+
807
+ dbgs () << " SCEV: " << *AR << " \n " ;
808
+ });
804
809
return std::nullopt;
805
810
}
806
811
@@ -810,8 +815,12 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
810
815
// Calculate the pointer stride and check if it is constant.
811
816
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
812
817
if (!C) {
813
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not a constant strided " << *Ptr
814
- << " SCEV: " << *AR << " \n " );
818
+ LLVM_DEBUG ({
819
+ dbgs () << " LAA: Bad stride - Not a constant strided " ;
820
+ if (Ptr )
821
+ dbgs () << *Ptr << " " ;
822
+ dbgs () << " SCEV: " << *AR << " \n " ;
823
+ });
815
824
return std::nullopt;
816
825
}
817
826
@@ -838,7 +847,8 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
838
847
static bool isNoWrapAddRec (Value *Ptr , const SCEVAddRecExpr *AR,
839
848
PredicatedScalarEvolution &PSE, const Loop *L);
840
849
841
- // / Check whether a pointer address cannot wrap.
850
+ // / Check whether a pointer address cannot wrap. If \p Ptr is not nullptr, use
851
+ // / informating from the IR pointer value to determine no-wrap.
842
852
static bool isNoWrap (PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
843
853
Value *Ptr , Type *AccessTy, const Loop *L, bool Assume,
844
854
std::optional<int64_t > Stride = std::nullopt) {
@@ -852,7 +862,7 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
852
862
// location will be larger than half the pointer index type space. In that
853
863
// case, the GEP would be poison and any memory access dependent on it would
854
864
// be immediate UB when executed.
855
- if (auto *GEP = dyn_cast <GetElementPtrInst>(Ptr );
865
+ if (auto *GEP = dyn_cast_if_present <GetElementPtrInst>(Ptr );
856
866
GEP && GEP->hasNoUnsignedSignedWrap ())
857
867
return true ;
858
868
@@ -868,6 +878,9 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
868
878
return true ;
869
879
}
870
880
881
+ if (!Ptr )
882
+ return false ;
883
+
871
884
if (Assume) {
872
885
PSE.setNoOverflow (Ptr , SCEVWrapPredicate::IncrementNUSW);
873
886
LLVM_DEBUG (dbgs () << " LAA: Pointer may wrap:\n "
@@ -1135,13 +1148,10 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1135
1148
1136
1149
// When we run after a failing dependency check we have to make sure
1137
1150
// we don't have wrapping pointers.
1138
- if (ShouldCheckWrap) {
1139
- // Skip wrap checking when translating pointers.
1140
- if (TranslatedPtrs.size () > 1 )
1141
- return false ;
1142
-
1143
- if (!isNoWrap (PSE, AR, Ptr , AccessTy, TheLoop, Assume))
1144
- return false ;
1151
+ if (ShouldCheckWrap &&
1152
+ !isNoWrap (PSE, AR, TranslatedPtrs.size () == 1 ? Ptr : nullptr , AccessTy,
1153
+ TheLoop, Assume)) {
1154
+ return false ;
1145
1155
}
1146
1156
}
1147
1157
@@ -1454,6 +1464,9 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1454
1464
if (AR->getNoWrapFlags (SCEV::NoWrapMask))
1455
1465
return true ;
1456
1466
1467
+ if (!Ptr )
1468
+ return false ;
1469
+
1457
1470
if (PSE.hasNoOverflow (Ptr , SCEVWrapPredicate::IncrementNUSW))
1458
1471
return true ;
1459
1472
0 commit comments