Skip to content

[mlir][vector] Standardize base Naming Across Vector Ops (NFC) #137859

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions mlir/include/mlir/Dialect/Vector/IR/VectorOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ def Vector_TransferReadOp :
AttrSizedOperandSegments,
DestinationStyleOpInterface
]>,
Arguments<(ins AnyShaped:$source,
Arguments<(ins AnyShaped:$base,
Variadic<Index>:$indices,
AffineMapAttr:$permutation_map,
AnyType:$padding,
Expand Down Expand Up @@ -1522,7 +1522,7 @@ def Vector_TransferWriteOp :
DestinationStyleOpInterface
]>,
Arguments<(ins AnyVectorOfAnyRank:$valueToStore,
AnyShaped:$source,
AnyShaped:$base,
Variadic<Index>:$indices,
AffineMapAttr:$permutation_map,
Optional<VectorOfNonZeroRankOf<[I1]>>:$mask,
Expand Down Expand Up @@ -1663,7 +1663,7 @@ def Vector_TransferWriteOp :
/// ops of other dialects.
Value getValue() { return getVector(); }

MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); }
MutableOperandRange getDpsInitsMutable() { return getBaseMutable(); }
}];

let hasFolder = 1;
Expand Down
10 changes: 8 additions & 2 deletions mlir/include/mlir/Interfaces/VectorInterfaces.td
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
TODO: Change name of operand, which is not accurate for xfer_write.
}],
/*retTy=*/"::mlir::Value",
/*methodName=*/"getSource",
/*methodName=*/"getBase",
/*args=*/(ins)
>,
InterfaceMethod<
Expand Down Expand Up @@ -187,6 +187,12 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
return inBounds;
}

/// Wrapper for getBase, which replaced getSource.
[[deprecated("Use getBase instead!")]]
::mlir::Value getSource() {
return $_op.getBase();
}

/// Return the number of leading shaped dimensions (of the "source" operand)
/// that do not participate in the permutation map.
unsigned getLeadingShapedRank() {
Expand All @@ -203,7 +209,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {

/// Return the shaped type of the "source" operand value.
::mlir::ShapedType getShapedType() {
return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());
return ::llvm::cast<::mlir::ShapedType>($_op.getBase().getType());
}

/// Return the number of dimensions that participate in the permutation map.
Expand Down
12 changes: 6 additions & 6 deletions mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ struct TransferReadToArmSMELowering
return rewriter.notifyMatchFailure(transferReadOp,
"not a valid vector type for SME");

if (!llvm::isa<MemRefType>(transferReadOp.getSource().getType()))
if (!llvm::isa<MemRefType>(transferReadOp.getBase().getType()))
return rewriter.notifyMatchFailure(transferReadOp, "not a memref source");

// Out-of-bounds dims are not supported.
Expand All @@ -84,7 +84,7 @@ struct TransferReadToArmSMELowering
auto mask = transferReadOp.getMask();
auto padding = mask ? transferReadOp.getPadding() : nullptr;
rewriter.replaceOpWithNewOp<arm_sme::TileLoadOp>(
transferReadOp, vectorType, transferReadOp.getSource(),
transferReadOp, vectorType, transferReadOp.getBase(),
transferReadOp.getIndices(), padding, mask, layout);

return success();
Expand Down Expand Up @@ -128,7 +128,7 @@ struct TransferWriteToArmSMELowering
if (!arm_sme::isValidSMETileVectorType(vType))
return failure();

if (!llvm::isa<MemRefType>(writeOp.getSource().getType()))
if (!llvm::isa<MemRefType>(writeOp.getBase().getType()))
return failure();

// Out-of-bounds dims are not supported.
Expand All @@ -149,7 +149,7 @@ struct TransferWriteToArmSMELowering
: arm_sme::TileSliceLayout::Horizontal;

rewriter.replaceOpWithNewOp<arm_sme::TileStoreOp>(
writeOp, writeOp.getVector(), writeOp.getSource(), writeOp.getIndices(),
writeOp, writeOp.getVector(), writeOp.getBase(), writeOp.getIndices(),
writeOp.getMask(), layout);
return success();
}
Expand Down Expand Up @@ -686,7 +686,7 @@ struct FoldTransferWriteOfExtractTileSlice

LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
PatternRewriter &rewriter) const final {
if (!isa<MemRefType>(writeOp.getSource().getType()))
if (!isa<MemRefType>(writeOp.getBase().getType()))
return rewriter.notifyMatchFailure(writeOp, "destination not a memref");

if (writeOp.hasOutOfBoundsDim())
Expand All @@ -713,7 +713,7 @@ struct FoldTransferWriteOfExtractTileSlice

rewriter.replaceOpWithNewOp<arm_sme::StoreTileSliceOp>(
writeOp, extractTileSlice.getTile(),
extractTileSlice.getTileSliceIndex(), mask, writeOp.getSource(),
extractTileSlice.getTileSliceIndex(), mask, writeOp.getBase(),
writeOp.getIndices(), extractTileSlice.getLayout());
return success();
}
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -486,7 +486,7 @@ struct CombineTransferReadOpTranspose final
Value result =
rewriter
.create<vector::TransferReadOp>(
loc, resultType, transferReadOp.getSource(),
loc, resultType, transferReadOp.getBase(),
transferReadOp.getIndices(), AffineMapAttr::get(newMap),
transferReadOp.getPadding(), transferReadOp.getMask(),
transferReadOp.getInBoundsAttr())
Expand Down Expand Up @@ -581,7 +581,7 @@ convertTransferReadOp(RewriterBase &rewriter, vector::TransferReadOp op,
gpu::MMAMatrixType type =
gpu::MMAMatrixType::get(op.getVectorType().getShape(), elType, fragType);
Value load = rewriter.create<gpu::SubgroupMmaLoadMatrixOp>(
op.getLoc(), type, op.getSource(), op.getIndices(),
op.getLoc(), type, op.getBase(), op.getIndices(),
rewriter.getIndexAttr(*stride),
isTranspose ? rewriter.getUnitAttr() : UnitAttr());
valueMapping[mappingResult] = load;
Expand Down Expand Up @@ -612,7 +612,7 @@ convertTransferWriteOp(RewriterBase &rewriter, vector::TransferWriteOp op,

Value matrix = it->second;
auto store = rewriter.create<gpu::SubgroupMmaStoreMatrixOp>(
op.getLoc(), matrix, op.getSource(), op.getIndices(),
op.getLoc(), matrix, op.getBase(), op.getIndices(),
rewriter.getIndexAttr(*stride), /*transpose=*/UnitAttr());
(void)store;

Expand Down Expand Up @@ -759,7 +759,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op,
indices);

nvgpu::LdMatrixOp newOp = rewriter.create<nvgpu::LdMatrixOp>(
loc, vectorType, op.getSource(), indices, *transpose, params->numTiles);
loc, vectorType, op.getBase(), indices, *transpose, params->numTiles);
valueMapping[op] = newOp->getResult(0);
return success();
}
Expand Down Expand Up @@ -819,7 +819,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);

Value el = rewriter.create<vector::LoadOp>(loc, loadedElType,
op.getSource(), newIndices);
op.getBase(), newIndices);
result = rewriter.create<vector::InsertOp>(loc, el, result, i);
}
} else {
Expand All @@ -842,7 +842,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
getXferIndices<vector::TransferReadOp>(
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
Value el = rewriter.create<memref::LoadOp>(op.getLoc(), loadedElType,
op.getSource(), newIndices);
op.getBase(), newIndices);
result = rewriter.create<vector::InsertOp>(
op.getLoc(), el, result, ArrayRef<int64_t>{i, innerIdx});
}
Expand Down Expand Up @@ -876,7 +876,7 @@ convertTransferReadToLoads(RewriterBase &rewriter, vector::TransferReadOp op,
return rewriter.notifyMatchFailure(op, "no warpMatrixInfo");

bool isLdMatrixCompatible =
isSharedMemory(cast<MemRefType>(op.getSource().getType())) &&
isSharedMemory(cast<MemRefType>(op.getBase().getType())) &&
nvgpu::inferTileWidthInBits(*warpMatrixInfo) == 128;

VectorType vecTy = op.getVectorType();
Expand Down Expand Up @@ -934,7 +934,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op,
SmallVector<Value, 4> newIndices;
getXferIndices<vector::TransferWriteOp>(
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
rewriter.create<vector::StoreOp>(loc, el, op.getSource(), newIndices);
rewriter.create<vector::StoreOp>(loc, el, op.getBase(), newIndices);
}

LLVM_DEBUG(DBGS() << "erase: " << op << "\n");
Expand Down
22 changes: 10 additions & 12 deletions mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -198,8 +198,7 @@ static Value generateInBoundsCheck(
Location loc = xferOp.getLoc();
ImplicitLocOpBuilder lb(xferOp.getLoc(), b);
if (!xferOp.isDimInBounds(0) && !isBroadcast) {
Value memrefDim =
vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim);
Value memrefDim = vector::createOrFoldDimOp(b, loc, xferOp.getBase(), *dim);
AffineExpr d0, d1;
bindDims(xferOp.getContext(), d0, d1);
Value base = xferOp.getIndices()[*dim];
Expand Down Expand Up @@ -426,7 +425,7 @@ struct Strategy<TransferReadOp> {
auto vecType = dyn_cast<VectorType>(bufferType.getElementType());
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
auto newXferOp = b.create<vector::TransferReadOp>(
loc, vecType, xferOp.getSource(), xferIndices,
loc, vecType, xferOp.getBase(), xferIndices,
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
xferOp.getPadding(), Value(), inBoundsAttr);

Expand Down Expand Up @@ -512,7 +511,7 @@ struct Strategy<TransferWriteOp> {
Location loc = xferOp.getLoc();
auto vec = b.create<memref::LoadOp>(loc, buffer, loadIndices);
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
auto source = loopState.empty() ? xferOp.getSource() : loopState[0];
auto source = loopState.empty() ? xferOp.getBase() : loopState[0];
Type type = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();
auto newXferOp = b.create<vector::TransferWriteOp>(
loc, type, vec, source, xferIndices,
Expand Down Expand Up @@ -544,7 +543,7 @@ struct Strategy<TransferWriteOp> {

/// Return the initial loop state for the generated scf.for loop.
static Value initialLoopState(TransferWriteOp xferOp) {
return isTensorOp(xferOp) ? xferOp.getSource() : Value();
return isTensorOp(xferOp) ? xferOp.getBase() : Value();
}
};

Expand Down Expand Up @@ -1145,7 +1144,7 @@ struct ScalableTransposeTransferWriteConversion
ArrayRef<OpFoldResult>(*maskDims).drop_front());
}

Value initDest = isTensorOp(writeOp) ? writeOp.getSource() : Value{};
Value initDest = isTensorOp(writeOp) ? writeOp.getBase() : Value{};
ValueRange initLoopArgs = initDest ? initDest : ValueRange{};
auto result = rewriter.create<scf::ForOp>(
loc, lb, ub, step, initLoopArgs,
Expand All @@ -1165,7 +1164,7 @@ struct ScalableTransposeTransferWriteConversion

// Create the transfer_write for the slice.
Value dest =
loopIterArgs.empty() ? writeOp.getSource() : loopIterArgs.front();
loopIterArgs.empty() ? writeOp.getBase() : loopIterArgs.front();
auto newWriteOp = b.create<vector::TransferWriteOp>(
loc, sliceVec, dest, xferIndices,
ArrayRef<bool>(writeOp.getInBoundsValues()).drop_front());
Expand Down Expand Up @@ -1340,7 +1339,7 @@ struct UnrollTransferReadConversion

auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
auto newXferOp = b.create<vector::TransferReadOp>(
loc, newXferVecType, xferOp.getSource(), xferIndices,
loc, newXferVecType, xferOp.getBase(), xferIndices,
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
xferOp.getPadding(), Value(), inBoundsAttr);
maybeAssignMask(b, xferOp, newXferOp, i);
Expand Down Expand Up @@ -1449,7 +1448,7 @@ struct UnrollTransferWriteConversion
}

int64_t dimSize = inputVectorTy.getShape()[0];
Value source = xferOp.getSource(); // memref or tensor to be written to.
Value source = xferOp.getBase(); // memref or tensor to be written to.
auto sourceType = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();

// Generate fully unrolled loop of transfer ops.
Expand Down Expand Up @@ -1567,8 +1566,7 @@ struct Strategy1d<TransferReadOp> {
b, xferOp, iv, dim, TypeRange(xferOp.getVectorType()),
/*inBoundsCase=*/
[&](OpBuilder &b, Location loc) {
Value val =
b.create<memref::LoadOp>(loc, xferOp.getSource(), indices);
Value val = b.create<memref::LoadOp>(loc, xferOp.getBase(), indices);
return b.create<vector::InsertElementOp>(loc, val, vec, iv);
},
/*outOfBoundsCase=*/
Expand Down Expand Up @@ -1599,7 +1597,7 @@ struct Strategy1d<TransferWriteOp> {
/*inBoundsCase=*/[&](OpBuilder &b, Location loc) {
auto val =
b.create<vector::ExtractElementOp>(loc, xferOp.getVector(), iv);
b.create<memref::StoreOp>(loc, val, xferOp.getSource(), indices);
b.create<memref::StoreOp>(loc, val, xferOp.getBase(), indices);
});
b.create<scf::YieldOp>(loc);
}
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {

xegpu::CreateNdDescOp ndDesc =
createNdDescriptor(rewriter, loc, descType,
dyn_cast<TypedValue<MemRefType>>(readOp.getSource()),
dyn_cast<TypedValue<MemRefType>>(readOp.getBase()),
readOp.getIndices());

DenseI64ArrayAttr transposeAttr =
Expand Down Expand Up @@ -231,10 +231,10 @@ struct TransferWriteLowering
vecTy.getShape(), vecTy.getElementType(),
/*array_length=*/1, /*boundary_check=*/writeOp.hasOutOfBoundsDim(),
xegpu::MemorySpace::Global);
xegpu::CreateNdDescOp ndDesc = createNdDescriptor(
rewriter, loc, descType,
dyn_cast<TypedValue<MemRefType>>(writeOp.getSource()),
writeOp.getIndices());
xegpu::CreateNdDescOp ndDesc =
createNdDescriptor(rewriter, loc, descType,
dyn_cast<TypedValue<MemRefType>>(writeOp.getBase()),
writeOp.getIndices());

// By default, no specific caching policy is assigned.
xegpu::CachePolicyAttr hint = nullptr;
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
Value fill = builder.create<vector::SplatOp>(loc, unbroadcastedVectorType,
readOp.getPadding());
Value load = builder.create<vector::LoadOp>(
loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices());
loc, unbroadcastedVectorType, readOp.getBase(), readOp.getIndices());
Value res = builder.create<arith::SelectOp>(loc, unbroadcastedVectorType,
readOp.getMask(), load, fill);
// Insert a broadcasting op if required.
Expand Down Expand Up @@ -149,7 +149,7 @@ struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
}

Location loc = readOp.getLoc();
Value src = readOp.getSource();
Value src = readOp.getBase();

VectorType vectorType = readOp.getVectorType();
int64_t vectorSize = vectorType.getNumElements();
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ struct LegalizeTransferReadOpsByDecomposition
decomposeToSMETiles(rewriter, vectorType, smeTileType, transposed)) {
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
auto smeRead = rewriter.create<vector::TransferReadOp>(
loc, smeTileType, readOp.getSource(),
loc, smeTileType, readOp.getBase(),
getSMESubTileIndices(rewriter, loc, readOp.getIndices(), smeTile),
readOp.getPermutationMapAttr(), readOp.getPadding(), smeMask,
readOp.getInBoundsAttr());
Expand Down Expand Up @@ -359,7 +359,7 @@ struct LegalizeTransferWriteOpsByDecomposition
auto smeTileType = getSMETileTypeForElement(vectorType.getElementType());
auto inputSMETiles = adaptor.getValueToStore();

Value destTensorOrMemref = writeOp.getSource();
Value destTensorOrMemref = writeOp.getBase();
for (auto [index, smeTile] : llvm::enumerate(decomposeToSMETiles(
rewriter, vectorType, smeTileType, transposed))) {
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
Expand Down Expand Up @@ -497,7 +497,7 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop
auto slice =
rewriter.create<vector::ExtractOp>(loc, tile, tileSliceIndex);
rewriter.create<vector::TransferWriteOp>(
loc, slice, writeOp.getSource(), ValueRange{storeRow, storeCol},
loc, slice, writeOp.getBase(), ValueRange{storeRow, storeCol},
AffineMapAttr::get(writeOp.getPermutationMap().dropResult(0)),
sliceMask,
rewriter.getBoolArrayAttr(
Expand Down Expand Up @@ -677,7 +677,7 @@ struct LiftIllegalVectorTransposeToMemory
});
SmallVector<Value> strides(readType.getRank(), Value(one));
auto readSubview = rewriter.create<memref::SubViewOp>(
loc, illegalRead.getSource(), illegalRead.getIndices(), readSizes,
loc, illegalRead.getBase(), illegalRead.getIndices(), readSizes,
strides);

// Apply the transpose to all values/attributes of the transfer_read:
Expand Down Expand Up @@ -851,7 +851,7 @@ struct LowerIllegalTransposeStoreViaZA

// Note: We need to use `get_tile` as there's no vector-level `undef`.
Value undefTile = rewriter.create<arm_sme::GetTileOp>(loc, smeTileType);
Value destTensorOrMemref = writeOp.getSource();
Value destTensorOrMemref = writeOp.getBase();
auto numSlicesPerTile =
std::min(sourceType.getDimSize(0), smeTileType.getDimSize(0));
auto numSlices =
Expand Down
Loading