Skip to content

[BasicAA] Do not decompose past casts with different index width #119365

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Dec 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion clang/test/CodeGen/SystemZ/zos-mixed-ptr-sizes-malloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ void *__malloc31(size_t);

int test_1() {
// X64-LABEL: define {{.*}} i32 @test_1()
// X64: ret i32 135
// X64: ret i32 %add20
int *__ptr32 a;
int *b;
int i;
Expand Down
65 changes: 26 additions & 39 deletions llvm/lib/Analysis/BasicAliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -494,20 +494,6 @@ static LinearExpression GetLinearExpression(
return Val;
}

/// To ensure a pointer offset fits in an integer of size IndexSize
/// (in bits) when that size is smaller than the maximum index size. This is
/// an issue, for example, in particular for 32b pointers with negative indices
/// that rely on two's complement wrap-arounds for precise alias information
/// where the maximum index size is 64b.
static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
if (ShiftBits != 0) {
Offset <<= ShiftBits;
Offset.ashrInPlace(ShiftBits);
}
}

namespace {
// A linear transformation of a Value; this class represents
// ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
Expand Down Expand Up @@ -594,9 +580,9 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
SearchTimes++;
const Instruction *CxtI = dyn_cast<Instruction>(V);

unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

looks like getMaxIndexSizeInBits() is only used here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Dropped the API in 07aab4a.

unsigned IndexSize = DL.getIndexTypeSizeInBits(V->getType());
DecomposedGEP Decomposed;
Decomposed.Offset = APInt(MaxIndexSize, 0);
Decomposed.Offset = APInt(IndexSize, 0);
do {
// See if this is a bitcast or GEP.
const Operator *Op = dyn_cast<Operator>(V);
Expand All @@ -614,7 +600,14 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,

if (Op->getOpcode() == Instruction::BitCast ||
Op->getOpcode() == Instruction::AddrSpaceCast) {
V = Op->getOperand(0);
Value *NewV = Op->getOperand(0);
// Don't look through casts between address spaces with differing index
// widths.
if (DL.getIndexTypeSizeInBits(NewV->getType()) != IndexSize) {
Decomposed.Base = V;
return Decomposed;
}
V = NewV;
continue;
}

Expand Down Expand Up @@ -651,12 +644,8 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,

assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");

unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
unsigned IndexSize = DL.getIndexSizeInBits(AS);
// Assume all GEP operands are constants until proven otherwise.
bool GepHasConstantOffset = true;
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
I != E; ++I, ++GTI) {
const Value *Index = *I;
Expand Down Expand Up @@ -684,7 +673,7 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
}

Decomposed.Offset += AllocTypeSize.getFixedValue() *
CIdx->getValue().sextOrTrunc(MaxIndexSize);
CIdx->getValue().sextOrTrunc(IndexSize);
continue;
}

Expand All @@ -694,8 +683,6 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
return Decomposed;
}

GepHasConstantOffset = false;

// If the integer type is smaller than the index size, it is implicitly
// sign extended or truncated to index size.
bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
Expand All @@ -710,8 +697,8 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
// Scale by the type size.
unsigned TypeSize = AllocTypeSize.getFixedValue();
LE = LE.mul(APInt(IndexSize, TypeSize), NUW, NUSW);
Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
APInt Scale = LE.Scale.sext(MaxIndexSize);
Decomposed.Offset += LE.Offset;
APInt Scale = LE.Scale;
if (!LE.IsNUW)
Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();

Expand All @@ -731,21 +718,13 @@ BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
}
}

// Make sure that we have a scale that makes sense for this target's
// index size.
adjustToIndexSize(Scale, IndexSize);

if (!!Scale) {
VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
/* IsNegated */ false};
Decomposed.VarIndices.push_back(Entry);
}
}

// Take care of wrap-arounds
if (GepHasConstantOffset)
adjustToIndexSize(Decomposed.Offset, IndexSize);

// Analyze the base pointer next.
V = GEPOp->getOperand(0);
} while (--MaxLookup);
Expand Down Expand Up @@ -1084,6 +1063,14 @@ AliasResult BasicAAResult::aliasGEP(
const GEPOperator *GEP1, LocationSize V1Size,
const Value *V2, LocationSize V2Size,
const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
auto BaseObjectsAlias = [&]() {
AliasResult BaseAlias =
AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
: AliasResult::MayAlias;
};

if (!V1Size.hasValue() && !V2Size.hasValue()) {
// TODO: This limitation exists for compile-time reasons. Relax it if we
// can avoid exponential pathological cases.
Expand All @@ -1092,11 +1079,7 @@ AliasResult BasicAAResult::aliasGEP(

// If both accesses have unknown size, we can only check whether the base
// objects don't alias.
AliasResult BaseAlias =
AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
: AliasResult::MayAlias;
return BaseObjectsAlias();
}

DominatorTree *DT = getDT(AAQI);
Expand All @@ -1107,6 +1090,10 @@ AliasResult BasicAAResult::aliasGEP(
if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
return AliasResult::MayAlias;

// Fall back to base objects if pointers have different index widths.
if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
return BaseObjectsAlias();

// Swap GEP1 and GEP2 if GEP2 has more variable indices.
if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
std::swap(DecompGEP1, DecompGEP2);
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/Analysis/BasicAA/smaller-index-size-overflow.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@

target datalayout = "p1:32:32"

; FIXME: This is a miscompile.
; CHECK: NoAlias: i32 addrspace(1)* %gep1, i32 addrspace(1)* %gep2
; CHECK: PartialAlias: i32 addrspace(1)* %gep1, i32 addrspace(1)* %gep2
define void @test(ptr addrspace(1) %p) {
%gep1 = getelementptr i8, ptr addrspace(1) %p, i32 u0x7fffffff
%gep2 = getelementptr i8, ptr addrspace(1) %p, i32 u0x80000001
Expand Down
Loading