Skip to content

Commit 601e102

Browse files
authored
[CodeGen] Use LocationSize for MMO getSize (#84751)
This is part of #70452 that changes the type used for the external interface of MMO to LocationSize as opposed to uint64_t. This means the constructors take LocationSize, and convert ~UINT64_C(0) to LocationSize::beforeOrAfter(). The getSize methods return a LocationSize. This allows us to be more precise with unknown sizes, not accidentally treating them as unsigned values, and in the future should allow us to add proper scalable vector support but none of that is included in this patch. It should mostly be an NFC. Global ISel is still expected to use the underlying LLT as it needs, and are not expected to see unknown sizes for generic operations. Most of the changes are hopefully fairly mechanical, adding a lot of getValue() calls and protecting them with hasValue() where needed.
1 parent 5143a12 commit 601e102

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+277
-206
lines changed

llvm/include/llvm/Analysis/MemoryLocation.h

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -191,8 +191,14 @@ class LocationSize {
191191
return Value == Other.Value;
192192
}
193193

194+
bool operator==(const TypeSize &Other) const {
195+
return hasValue() && getValue() == Other;
196+
}
197+
194198
bool operator!=(const LocationSize &Other) const { return !(*this == Other); }
195199

200+
bool operator!=(const TypeSize &Other) const { return !(*this == Other); }
201+
196202
// Ordering operators are not provided, since it's unclear if there's only one
197203
// reasonable way to compare:
198204
// - values that don't exist against values that do, and
@@ -293,8 +299,9 @@ class MemoryLocation {
293299

294300
// Return the exact size if the exact size is known at compiletime,
295301
// otherwise return MemoryLocation::UnknownSize.
296-
static uint64_t getSizeOrUnknown(const TypeSize &T) {
297-
return T.isScalable() ? UnknownSize : T.getFixedValue();
302+
static LocationSize getSizeOrUnknown(const TypeSize &T) {
303+
return T.isScalable() ? LocationSize::beforeOrAfterPointer()
304+
: LocationSize::precise(T.getFixedValue());
298305
}
299306

300307
MemoryLocation() : Ptr(nullptr), Size(LocationSize::beforeOrAfterPointer()) {}

llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -647,15 +647,15 @@ bool GIMatchTableExecutor::executeMatchTable(
647647

648648
unsigned Size = MRI.getType(MO.getReg()).getSizeInBits();
649649
if (MatcherOpcode == GIM_CheckMemorySizeEqualToLLT &&
650-
MMO->getSizeInBits() != Size) {
650+
MMO->getSizeInBits().getValue() != Size) {
651651
if (handleReject() == RejectAndGiveUp)
652652
return false;
653653
} else if (MatcherOpcode == GIM_CheckMemorySizeLessThanLLT &&
654-
MMO->getSizeInBits() >= Size) {
654+
MMO->getSizeInBits().getValue() >= Size) {
655655
if (handleReject() == RejectAndGiveUp)
656656
return false;
657657
} else if (MatcherOpcode == GIM_CheckMemorySizeGreaterThanLLT &&
658-
MMO->getSizeInBits() <= Size)
658+
MMO->getSizeInBits().getValue() <= Size)
659659
if (handleReject() == RejectAndGiveUp)
660660
return false;
661661

llvm/include/llvm/CodeGen/GlobalISel/GenericMachineInstrs.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ class GMemOperation : public GenericMachineInstr {
5454
bool isUnordered() const { return getMMO().isUnordered(); }
5555

5656
/// Returns the size in bytes of the memory access.
57-
uint64_t getMemSize() const { return getMMO().getSize(); }
57+
LocationSize getMemSize() const { return getMMO().getSize(); }
5858
/// Returns the size in bits of the memory access.
59-
uint64_t getMemSizeInBits() const { return getMMO().getSizeInBits(); }
59+
LocationSize getMemSizeInBits() const { return getMMO().getSizeInBits(); }
6060

6161
static bool classof(const MachineInstr *MI) {
6262
return GenericMachineInstr::classof(MI) && MI->hasOneMemOperand();

llvm/include/llvm/CodeGen/MachineFunction.h

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1026,18 +1026,27 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
10261026
/// MachineMemOperands are owned by the MachineFunction and need not be
10271027
/// explicitly deallocated.
10281028
MachineMemOperand *getMachineMemOperand(
1029-
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
1029+
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
10301030
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
10311031
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
10321032
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
10331033
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
1034-
10351034
MachineMemOperand *getMachineMemOperand(
1036-
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
1037-
Align base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
1035+
MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size,
1036+
Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(),
10381037
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
10391038
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
10401039
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
1040+
MachineMemOperand *getMachineMemOperand(
1041+
MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, uint64_t Size,
1042+
Align BaseAlignment, const AAMDNodes &AAInfo = AAMDNodes(),
1043+
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
1044+
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
1045+
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) {
1046+
return getMachineMemOperand(PtrInfo, F, LocationSize::precise(Size),
1047+
BaseAlignment, AAInfo, Ranges, SSID, Ordering,
1048+
FailureOrdering);
1049+
}
10411050

10421051
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
10431052
/// an existing one, adjusting by an offset and using the given size.
@@ -1046,9 +1055,16 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
10461055
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
10471056
int64_t Offset, LLT Ty);
10481057
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
1049-
int64_t Offset, uint64_t Size) {
1058+
int64_t Offset, LocationSize Size) {
10501059
return getMachineMemOperand(
1051-
MMO, Offset, Size == ~UINT64_C(0) ? LLT() : LLT::scalar(8 * Size));
1060+
MMO, Offset,
1061+
!Size.hasValue() || Size.isScalable()
1062+
? LLT()
1063+
: LLT::scalar(8 * Size.getValue().getKnownMinValue()));
1064+
}
1065+
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
1066+
int64_t Offset, uint64_t Size) {
1067+
return getMachineMemOperand(MMO, Offset, LocationSize::precise(Size));
10521068
}
10531069

10541070
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
@@ -1057,10 +1073,15 @@ class LLVM_EXTERNAL_VISIBILITY MachineFunction {
10571073
/// explicitly deallocated.
10581074
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
10591075
const MachinePointerInfo &PtrInfo,
1060-
uint64_t Size);
1076+
LocationSize Size);
10611077
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
10621078
const MachinePointerInfo &PtrInfo,
10631079
LLT Ty);
1080+
MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO,
1081+
const MachinePointerInfo &PtrInfo,
1082+
uint64_t Size) {
1083+
return getMachineMemOperand(MMO, PtrInfo, LocationSize::precise(Size));
1084+
}
10641085

10651086
/// Allocate a new MachineMemOperand by copying an existing one,
10661087
/// replacing only AliasAnalysis information. MachineMemOperands are owned

llvm/include/llvm/CodeGen/MachineMemOperand.h

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#include "llvm/ADT/BitmaskEnum.h"
1919
#include "llvm/ADT/PointerUnion.h"
20+
#include "llvm/Analysis/MemoryLocation.h"
2021
#include "llvm/CodeGen/PseudoSourceValue.h"
2122
#include "llvm/CodeGenTypes/LowLevelType.h"
2223
#include "llvm/IR/DerivedTypes.h"
@@ -186,7 +187,7 @@ class MachineMemOperand {
186187
/// and atomic ordering requirements must also be specified. For cmpxchg
187188
/// atomic operations the atomic ordering requirements when store does not
188189
/// occur must also be specified.
189-
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, uint64_t s,
190+
MachineMemOperand(MachinePointerInfo PtrInfo, Flags flags, LocationSize TS,
190191
Align a, const AAMDNodes &AAInfo = AAMDNodes(),
191192
const MDNode *Ranges = nullptr,
192193
SyncScope::ID SSID = SyncScope::System,
@@ -235,13 +236,17 @@ class MachineMemOperand {
235236
LLT getMemoryType() const { return MemoryType; }
236237

237238
/// Return the size in bytes of the memory reference.
238-
uint64_t getSize() const {
239-
return MemoryType.isValid() ? MemoryType.getSizeInBytes() : ~UINT64_C(0);
239+
LocationSize getSize() const {
240+
return MemoryType.isValid()
241+
? LocationSize::precise(MemoryType.getSizeInBytes())
242+
: LocationSize::beforeOrAfterPointer();
240243
}
241244

242245
/// Return the size in bits of the memory reference.
243-
uint64_t getSizeInBits() const {
244-
return MemoryType.isValid() ? MemoryType.getSizeInBits() : ~UINT64_C(0);
246+
LocationSize getSizeInBits() const {
247+
return MemoryType.isValid()
248+
? LocationSize::precise(MemoryType.getSizeInBits())
249+
: LocationSize::beforeOrAfterPointer();
245250
}
246251

247252
LLT getType() const {

llvm/include/llvm/CodeGen/SelectionDAG.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1299,15 +1299,15 @@ class SelectionDAG {
12991299
EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment,
13001300
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
13011301
MachineMemOperand::MOStore,
1302-
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
1302+
LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes());
13031303

13041304
inline SDValue getMemIntrinsicNode(
13051305
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
13061306
EVT MemVT, MachinePointerInfo PtrInfo,
13071307
MaybeAlign Alignment = std::nullopt,
13081308
MachineMemOperand::Flags Flags = MachineMemOperand::MOLoad |
13091309
MachineMemOperand::MOStore,
1310-
uint64_t Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
1310+
LocationSize Size = 0, const AAMDNodes &AAInfo = AAMDNodes()) {
13111311
// Ensure that codegen never sees alignment 0
13121312
return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, PtrInfo,
13131313
Alignment.value_or(getEVTAlign(MemVT)), Flags,

llvm/lib/CodeGen/DFAPacketizer.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -252,12 +252,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
252252
bool VLIWPacketizerList::alias(const MachineMemOperand &Op1,
253253
const MachineMemOperand &Op2,
254254
bool UseTBAA) const {
255-
if (!Op1.getValue() || !Op2.getValue())
255+
if (!Op1.getValue() || !Op2.getValue() || !Op1.getSize().hasValue() ||
256+
!Op2.getSize().hasValue())
256257
return true;
257258

258259
int64_t MinOffset = std::min(Op1.getOffset(), Op2.getOffset());
259-
int64_t Overlapa = Op1.getSize() + Op1.getOffset() - MinOffset;
260-
int64_t Overlapb = Op2.getSize() + Op2.getOffset() - MinOffset;
260+
int64_t Overlapa = Op1.getSize().getValue() + Op1.getOffset() - MinOffset;
261+
int64_t Overlapb = Op2.getSize().getValue() + Op2.getOffset() - MinOffset;
261262

262263
AliasResult AAResult =
263264
AA->alias(MemoryLocation(Op1.getValue(), Overlapa,

llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -770,12 +770,12 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
770770
LLT RegTy = MRI.getType(LoadReg);
771771
Register PtrReg = LoadMI->getPointerReg();
772772
unsigned RegSize = RegTy.getSizeInBits();
773-
uint64_t LoadSizeBits = LoadMI->getMemSizeInBits();
773+
LocationSize LoadSizeBits = LoadMI->getMemSizeInBits();
774774
unsigned MaskSizeBits = MaskVal.countr_one();
775775

776776
// The mask may not be larger than the in-memory type, as it might cover sign
777777
// extended bits
778-
if (MaskSizeBits > LoadSizeBits)
778+
if (MaskSizeBits > LoadSizeBits.getValue())
779779
return false;
780780

781781
// If the mask covers the whole destination register, there's nothing to
@@ -795,7 +795,8 @@ bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI,
795795
// still adjust the opcode to indicate the high bit behavior.
796796
if (LoadMI->isSimple())
797797
MemDesc.MemoryTy = LLT::scalar(MaskSizeBits);
798-
else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize)
798+
else if (LoadSizeBits.getValue() > MaskSizeBits ||
799+
LoadSizeBits.getValue() == RegSize)
799800
return false;
800801

801802
// TODO: Could check if it's legal with the reduced or original memory size.
@@ -860,7 +861,8 @@ bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
860861
if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) {
861862
// If truncating more than the original extended value, abort.
862863
auto LoadSizeBits = LoadMI->getMemSizeInBits();
863-
if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits)
864+
if (TruncSrc &&
865+
MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits.getValue())
864866
return false;
865867
if (LoadSizeBits == SizeInBits)
866868
return true;
@@ -891,7 +893,7 @@ bool CombinerHelper::matchSextInRegOfLoad(
891893
if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg))
892894
return false;
893895

894-
uint64_t MemBits = LoadDef->getMemSizeInBits();
896+
uint64_t MemBits = LoadDef->getMemSizeInBits().getValue();
895897

896898
// If the sign extend extends from a narrower width than the load's width,
897899
// then we can narrow the load width when we combine to a G_SEXTLOAD.

llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,8 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
415415
if (DstTy.isVector())
416416
break;
417417
// Everything above the retrieved bits is zero
418-
Known.Zero.setBitsFrom((*MI.memoperands_begin())->getSizeInBits());
418+
Known.Zero.setBitsFrom(
419+
(*MI.memoperands_begin())->getSizeInBits().getValue());
419420
break;
420421
}
421422
case TargetOpcode::G_ASHR: {
@@ -666,7 +667,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
666667

667668
// e.g. i16->i32 = '17' bits known.
668669
const MachineMemOperand *MMO = *MI.memoperands_begin();
669-
return TyBits - MMO->getSizeInBits() + 1;
670+
return TyBits - MMO->getSizeInBits().getValue() + 1;
670671
}
671672
case TargetOpcode::G_ZEXTLOAD: {
672673
// FIXME: We need an in-memory type representation.
@@ -675,7 +676,7 @@ unsigned GISelKnownBits::computeNumSignBits(Register R,
675676

676677
// e.g. i16->i32 = '16' bits known.
677678
const MachineMemOperand *MMO = *MI.memoperands_begin();
678-
return TyBits - MMO->getSizeInBits();
679+
return TyBits - MMO->getSizeInBits().getValue();
679680
}
680681
case TargetOpcode::G_TRUNC: {
681682
Register Src = MI.getOperand(1).getReg();

llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1317,7 +1317,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
13171317
if (DstTy.isVector())
13181318
return UnableToLegalize;
13191319

1320-
if (8 * LoadMI.getMemSize() != DstTy.getSizeInBits()) {
1320+
if (8 * LoadMI.getMemSize().getValue() != DstTy.getSizeInBits()) {
13211321
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
13221322
MIRBuilder.buildLoad(TmpReg, LoadMI.getPointerReg(), LoadMI.getMMO());
13231323
MIRBuilder.buildAnyExt(DstReg, TmpReg);
@@ -1335,7 +1335,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
13351335

13361336
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
13371337
auto &MMO = LoadMI.getMMO();
1338-
unsigned MemSize = MMO.getSizeInBits();
1338+
unsigned MemSize = MMO.getSizeInBits().getValue();
13391339

13401340
if (MemSize == NarrowSize) {
13411341
MIRBuilder.buildLoad(TmpReg, PtrReg, MMO);
@@ -1368,7 +1368,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
13681368
if (SrcTy.isVector() && LeftoverBits != 0)
13691369
return UnableToLegalize;
13701370

1371-
if (8 * StoreMI.getMemSize() != SrcTy.getSizeInBits()) {
1371+
if (8 * StoreMI.getMemSize().getValue() != SrcTy.getSizeInBits()) {
13721372
Register TmpReg = MRI.createGenericVirtualRegister(NarrowTy);
13731373
MIRBuilder.buildTrunc(TmpReg, SrcReg);
13741374
MIRBuilder.buildStore(TmpReg, StoreMI.getPointerReg(), StoreMI.getMMO());
@@ -4456,7 +4456,7 @@ LegalizerHelper::reduceLoadStoreWidth(GLoadStore &LdStMI, unsigned TypeIdx,
44564456
LLT ValTy = MRI.getType(ValReg);
44574457

44584458
// FIXME: Do we need a distinct NarrowMemory legalize action?
4459-
if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize()) {
4459+
if (ValTy.getSizeInBits() != 8 * LdStMI.getMemSize().getValue()) {
44604460
LLVM_DEBUG(dbgs() << "Can't narrow extload/truncstore\n");
44614461
return UnableToLegalize;
44624462
}

llvm/lib/CodeGen/GlobalISel/LoadStoreOpt.cpp

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -117,12 +117,8 @@ bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
117117
if (!BasePtr0.BaseReg.isValid() || !BasePtr1.BaseReg.isValid())
118118
return false;
119119

120-
LocationSize Size1 = LdSt1->getMemSize() != MemoryLocation::UnknownSize
121-
? LdSt1->getMemSize()
122-
: LocationSize::beforeOrAfterPointer();
123-
LocationSize Size2 = LdSt2->getMemSize() != MemoryLocation::UnknownSize
124-
? LdSt2->getMemSize()
125-
: LocationSize::beforeOrAfterPointer();
120+
LocationSize Size1 = LdSt1->getMemSize();
121+
LocationSize Size2 = LdSt2->getMemSize();
126122

127123
int64_t PtrDiff;
128124
if (BasePtr0.BaseReg == BasePtr1.BaseReg) {
@@ -214,14 +210,9 @@ bool GISelAddressing::instMayAlias(const MachineInstr &MI,
214210
Offset = 0;
215211
}
216212

217-
TypeSize Size = LS->getMMO().getMemoryType().getSizeInBytes();
218-
return {LS->isVolatile(),
219-
LS->isAtomic(),
220-
BaseReg,
221-
Offset /*base offset*/,
222-
Size.isScalable() ? LocationSize::beforeOrAfterPointer()
223-
: LocationSize::precise(Size),
224-
&LS->getMMO()};
213+
LocationSize Size = LS->getMMO().getSize();
214+
return {LS->isVolatile(), LS->isAtomic(), BaseReg,
215+
Offset /*base offset*/, Size, &LS->getMMO()};
225216
}
226217
// FIXME: support recognizing lifetime instructions.
227218
// Default.

llvm/lib/CodeGen/LiveDebugValues/InstrRefBasedImpl.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1356,10 +1356,11 @@ InstrRefBasedLDV::findLocationForMemOperand(const MachineInstr &MI) {
13561356
// from the stack at some point. Happily the memory operand will tell us
13571357
// the size written to the stack.
13581358
auto *MemOperand = *MI.memoperands_begin();
1359-
unsigned SizeInBits = MemOperand->getSizeInBits();
1359+
LocationSize SizeInBits = MemOperand->getSizeInBits();
1360+
assert(SizeInBits.hasValue() && "Expected to find a valid size!");
13601361

13611362
// Find that position in the stack indexes we're tracking.
1362-
auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits, 0});
1363+
auto IdxIt = MTracker->StackSlotIdxes.find({SizeInBits.getValue(), 0});
13631364
if (IdxIt == MTracker->StackSlotIdxes.end())
13641365
// That index is not tracked. This is suprising, and unlikely to ever
13651366
// occur, but the safe action is to indicate the variable is optimised out.

llvm/lib/CodeGen/MIRVRegNamerUtils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
123123
llvm::transform(MI.uses(), std::back_inserter(MIOperands), GetHashableMO);
124124

125125
for (const auto *Op : MI.memoperands()) {
126-
MIOperands.push_back((unsigned)Op->getSize());
126+
MIOperands.push_back((unsigned)Op->getSize().getValue());
127127
MIOperands.push_back((unsigned)Op->getFlags());
128128
MIOperands.push_back((unsigned)Op->getOffset());
129129
MIOperands.push_back((unsigned)Op->getSuccessOrdering());

0 commit comments

Comments
 (0)