@@ -800,8 +800,13 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
800
800
Value *Ptr , PredicatedScalarEvolution &PSE) {
801
801
// The access function must stride over the innermost loop.
802
802
if (Lp != AR->getLoop ()) {
803
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not striding over innermost loop "
804
- << *Ptr << " SCEV: " << *AR << " \n " );
803
+ LLVM_DEBUG ({
804
+ dbgs () << " LAA: Bad stride - Not striding over innermost loop " ;
805
+ if (Ptr )
806
+ dbgs () << *Ptr << " " ;
807
+
808
+ dbgs () << " SCEV: " << *AR << " \n " ;
809
+ });
805
810
return std::nullopt;
806
811
}
807
812
@@ -811,8 +816,12 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
811
816
// Calculate the pointer stride and check if it is constant.
812
817
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
813
818
if (!C) {
814
- LLVM_DEBUG (dbgs () << " LAA: Bad stride - Not a constant strided " << *Ptr
815
- << " SCEV: " << *AR << " \n " );
819
+ LLVM_DEBUG ({
820
+ dbgs () << " LAA: Bad stride - Not a constant strided " ;
821
+ if (Ptr )
822
+ dbgs () << *Ptr << " " ;
823
+ dbgs () << " SCEV: " << *AR << " \n " ;
824
+ });
816
825
return std::nullopt;
817
826
}
818
827
@@ -839,29 +848,29 @@ getStrideFromAddRec(const SCEVAddRecExpr *AR, const Loop *Lp, Type *AccessTy,
839
848
static bool isNoWrapGEP (Value *Ptr , PredicatedScalarEvolution &PSE,
840
849
const Loop *L);
841
850
842
- // / Check whether \p AR is a non-wrapping AddRec, or if \p Ptr is a non-wrapping
843
- // / GEP .
851
+ // / Check whether \p AR is a non-wrapping AddRec. If \p Ptr is not nullptr, use
852
+ // / informating from the IR pointer value to determine no-wrap .
844
853
static bool isNoWrap (PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
845
854
Value *Ptr , Type *AccessTy, const Loop *L, bool Assume,
846
855
std::optional<int64_t > Stride = std::nullopt) {
847
856
// FIXME: This should probably only return true for NUW.
848
857
if (AR->getNoWrapFlags (SCEV::NoWrapMask))
849
858
return true ;
850
859
851
- if (PSE.hasNoOverflow (Ptr , SCEVWrapPredicate::IncrementNUSW))
860
+ if (Ptr && PSE.hasNoOverflow (Ptr , SCEVWrapPredicate::IncrementNUSW))
852
861
return true ;
853
862
854
863
// The address calculation must not wrap. Otherwise, a dependence could be
855
864
// inverted.
856
- if (isNoWrapGEP (Ptr , PSE, L))
865
+ if (Ptr && isNoWrapGEP (Ptr , PSE, L))
857
866
return true ;
858
867
859
868
// An nusw getelementptr that is an AddRec cannot wrap. If it would wrap,
860
869
// the distance between the previously accessed location and the wrapped
861
870
// location will be larger than half the pointer index type space. In that
862
871
// case, the GEP would be poison and any memory access dependent on it would
863
872
// be immediate UB when executed.
864
- if (auto *GEP = dyn_cast <GetElementPtrInst>(Ptr );
873
+ if (auto *GEP = dyn_cast_if_present <GetElementPtrInst>(Ptr );
865
874
GEP && GEP->hasNoUnsignedSignedWrap ())
866
875
return true ;
867
876
@@ -877,7 +886,7 @@ static bool isNoWrap(PredicatedScalarEvolution &PSE, const SCEVAddRecExpr *AR,
877
886
return true ;
878
887
}
879
888
880
- if (Assume) {
889
+ if (Ptr && Assume) {
881
890
PSE.setNoOverflow (Ptr , SCEVWrapPredicate::IncrementNUSW);
882
891
LLVM_DEBUG (dbgs () << " LAA: Pointer may wrap:\n "
883
892
<< " LAA: Pointer: " << *Ptr << " \n "
@@ -1144,13 +1153,10 @@ bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1144
1153
1145
1154
// When we run after a failing dependency check we have to make sure
1146
1155
// we don't have wrapping pointers.
1147
- if (ShouldCheckWrap) {
1148
- // Skip wrap checking when translating pointers.
1149
- if (TranslatedPtrs.size () > 1 )
1150
- return false ;
1151
-
1152
- if (!isNoWrap (PSE, AR, Ptr , AccessTy, TheLoop, Assume))
1153
- return false ;
1156
+ if (ShouldCheckWrap &&
1157
+ !isNoWrap (PSE, AR, TranslatedPtrs.size () == 1 ? Ptr : nullptr , AccessTy,
1158
+ TheLoop, Assume)) {
1159
+ return false ;
1154
1160
}
1155
1161
}
1156
1162
0 commit comments