Skip to content

Commit 7fd6e8b

Browse files
committed
[mlir][vector] Standardize base Naming Across Vector Ops (NFC)
This change standardizes the naming convention for the argument representing the value to read from or write to in Vector ops that interface with Tensors or MemRefs. Specifically, it ensures that all such ops use the name `base` (i.e., the base address or location to which offsets are applied). Updated operations: * vector.transfer_read * vector.transfer_write For reference, these ops already use base: * vector.load, vector.store, vector.scatter, vector.gather, vector.expandload, vector.compressstore, vector.maskedstore, vector.maskedload This is a non-functional change (NFC) and does not alter the semantics of these operations. Implements #131602
1 parent 77f8335 commit 7fd6e8b

29 files changed

+139
-140
lines changed

mlir/include/mlir/Dialect/Vector/IR/VectorOps.td

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1273,7 +1273,7 @@ def Vector_TransferReadOp :
12731273
AttrSizedOperandSegments,
12741274
DestinationStyleOpInterface
12751275
]>,
1276-
Arguments<(ins AnyShaped:$source,
1276+
Arguments<(ins AnyShaped:$base,
12771277
Variadic<Index>:$indices,
12781278
AffineMapAttr:$permutation_map,
12791279
AnyType:$padding,
@@ -1470,26 +1470,26 @@ def Vector_TransferReadOp :
14701470
let builders = [
14711471
/// 1. Builder that sets padding to zero and an empty mask (variant with attrs).
14721472
OpBuilder<(ins "VectorType":$vectorType,
1473-
"Value":$source,
1473+
"Value":$base,
14741474
"ValueRange":$indices,
14751475
"AffineMapAttr":$permutationMapAttr,
14761476
"ArrayAttr":$inBoundsAttr)>,
14771477
/// 2. Builder that sets padding to zero and an empty mask (variant without attrs).
14781478
OpBuilder<(ins "VectorType":$vectorType,
1479-
"Value":$source,
1479+
"Value":$base,
14801480
"ValueRange":$indices,
14811481
"AffineMap":$permutationMap,
14821482
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
14831483
/// 3. Builder that sets permutation map to 'getMinorIdentityMap'.
14841484
OpBuilder<(ins "VectorType":$vectorType,
1485-
"Value":$source,
1485+
"Value":$base,
14861486
"ValueRange":$indices,
14871487
"Value":$padding,
14881488
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
14891489
/// 4. Builder that sets padding to zero and permutation map to
14901490
/// 'getMinorIdentityMap'.
14911491
OpBuilder<(ins "VectorType":$vectorType,
1492-
"Value":$source,
1492+
"Value":$base,
14931493
"ValueRange":$indices,
14941494
CArg<"std::optional<ArrayRef<bool>>", "::std::nullopt">:$inBounds)>,
14951495
];
@@ -1522,7 +1522,7 @@ def Vector_TransferWriteOp :
15221522
DestinationStyleOpInterface
15231523
]>,
15241524
Arguments<(ins AnyVectorOfAnyRank:$valueToStore,
1525-
AnyShaped:$source,
1525+
AnyShaped:$base,
15261526
Variadic<Index>:$indices,
15271527
AffineMapAttr:$permutation_map,
15281528
Optional<VectorOfNonZeroRankOf<[I1]>>:$mask,
@@ -1663,7 +1663,7 @@ def Vector_TransferWriteOp :
16631663
/// ops of other dialects.
16641664
Value getValue() { return getVector(); }
16651665

1666-
MutableOperandRange getDpsInitsMutable() { return getSourceMutable(); }
1666+
MutableOperandRange getDpsInitsMutable() { return getBaseMutable(); }
16671667
}];
16681668

16691669
let hasFolder = 1;

mlir/include/mlir/Interfaces/VectorInterfaces.td

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -108,10 +108,9 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
108108
on. In case of a "read" operation, that's the source from which the
109109
operation reads. In case of a "write" operation, that's the destination
110110
into which the operation writes.
111-
TODO: Change name of operand, which is not accurate for xfer_write.
112111
}],
113112
/*retTy=*/"::mlir::Value",
114-
/*methodName=*/"getSource",
113+
/*methodName=*/"getBase",
115114
/*args=*/(ins)
116115
>,
117116
InterfaceMethod<
@@ -203,7 +202,7 @@ def VectorTransferOpInterface : OpInterface<"VectorTransferOpInterface"> {
203202

204203
/// Return the shaped type of the "source" operand value.
205204
::mlir::ShapedType getShapedType() {
206-
return ::llvm::cast<::mlir::ShapedType>($_op.getSource().getType());
205+
return ::llvm::cast<::mlir::ShapedType>($_op.getBase().getType());
207206
}
208207

209208
/// Return the number of dimensions that participate in the permutation map.

mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ struct TransferReadToArmSMELowering
5858
return rewriter.notifyMatchFailure(transferReadOp,
5959
"not a valid vector type for SME");
6060

61-
if (!llvm::isa<MemRefType>(transferReadOp.getSource().getType()))
61+
if (!llvm::isa<MemRefType>(transferReadOp.getBase().getType()))
6262
return rewriter.notifyMatchFailure(transferReadOp, "not a memref source");
6363

6464
// Out-of-bounds dims are not supported.
@@ -84,7 +84,7 @@ struct TransferReadToArmSMELowering
8484
auto mask = transferReadOp.getMask();
8585
auto padding = mask ? transferReadOp.getPadding() : nullptr;
8686
rewriter.replaceOpWithNewOp<arm_sme::TileLoadOp>(
87-
transferReadOp, vectorType, transferReadOp.getSource(),
87+
transferReadOp, vectorType, transferReadOp.getBase(),
8888
transferReadOp.getIndices(), padding, mask, layout);
8989

9090
return success();
@@ -128,7 +128,7 @@ struct TransferWriteToArmSMELowering
128128
if (!arm_sme::isValidSMETileVectorType(vType))
129129
return failure();
130130

131-
if (!llvm::isa<MemRefType>(writeOp.getSource().getType()))
131+
if (!llvm::isa<MemRefType>(writeOp.getBase().getType()))
132132
return failure();
133133

134134
// Out-of-bounds dims are not supported.
@@ -149,7 +149,7 @@ struct TransferWriteToArmSMELowering
149149
: arm_sme::TileSliceLayout::Horizontal;
150150

151151
rewriter.replaceOpWithNewOp<arm_sme::TileStoreOp>(
152-
writeOp, writeOp.getVector(), writeOp.getSource(), writeOp.getIndices(),
152+
writeOp, writeOp.getVector(), writeOp.getBase(), writeOp.getIndices(),
153153
writeOp.getMask(), layout);
154154
return success();
155155
}
@@ -686,7 +686,7 @@ struct FoldTransferWriteOfExtractTileSlice
686686

687687
LogicalResult matchAndRewrite(vector::TransferWriteOp writeOp,
688688
PatternRewriter &rewriter) const final {
689-
if (!isa<MemRefType>(writeOp.getSource().getType()))
689+
if (!isa<MemRefType>(writeOp.getBase().getType()))
690690
return rewriter.notifyMatchFailure(writeOp, "destination not a memref");
691691

692692
if (writeOp.hasOutOfBoundsDim())
@@ -713,7 +713,7 @@ struct FoldTransferWriteOfExtractTileSlice
713713

714714
rewriter.replaceOpWithNewOp<arm_sme::StoreTileSliceOp>(
715715
writeOp, extractTileSlice.getTile(),
716-
extractTileSlice.getTileSliceIndex(), mask, writeOp.getSource(),
716+
extractTileSlice.getTileSliceIndex(), mask, writeOp.getBase(),
717717
writeOp.getIndices(), extractTileSlice.getLayout());
718718
return success();
719719
}

mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -486,7 +486,7 @@ struct CombineTransferReadOpTranspose final
486486
Value result =
487487
rewriter
488488
.create<vector::TransferReadOp>(
489-
loc, resultType, transferReadOp.getSource(),
489+
loc, resultType, transferReadOp.getBase(),
490490
transferReadOp.getIndices(), AffineMapAttr::get(newMap),
491491
transferReadOp.getPadding(), transferReadOp.getMask(),
492492
transferReadOp.getInBoundsAttr())
@@ -581,7 +581,7 @@ convertTransferReadOp(RewriterBase &rewriter, vector::TransferReadOp op,
581581
gpu::MMAMatrixType type =
582582
gpu::MMAMatrixType::get(op.getVectorType().getShape(), elType, fragType);
583583
Value load = rewriter.create<gpu::SubgroupMmaLoadMatrixOp>(
584-
op.getLoc(), type, op.getSource(), op.getIndices(),
584+
op.getLoc(), type, op.getBase(), op.getIndices(),
585585
rewriter.getIndexAttr(*stride),
586586
isTranspose ? rewriter.getUnitAttr() : UnitAttr());
587587
valueMapping[mappingResult] = load;
@@ -612,7 +612,7 @@ convertTransferWriteOp(RewriterBase &rewriter, vector::TransferWriteOp op,
612612

613613
Value matrix = it->second;
614614
auto store = rewriter.create<gpu::SubgroupMmaStoreMatrixOp>(
615-
op.getLoc(), matrix, op.getSource(), op.getIndices(),
615+
op.getLoc(), matrix, op.getBase(), op.getIndices(),
616616
rewriter.getIndexAttr(*stride), /*transpose=*/UnitAttr());
617617
(void)store;
618618

@@ -759,7 +759,7 @@ creatLdMatrixCompatibleLoads(RewriterBase &rewriter, vector::TransferReadOp op,
759759
indices);
760760

761761
nvgpu::LdMatrixOp newOp = rewriter.create<nvgpu::LdMatrixOp>(
762-
loc, vectorType, op.getSource(), indices, *transpose, params->numTiles);
762+
loc, vectorType, op.getBase(), indices, *transpose, params->numTiles);
763763
valueMapping[op] = newOp->getResult(0);
764764
return success();
765765
}
@@ -819,7 +819,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
819819
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
820820

821821
Value el = rewriter.create<vector::LoadOp>(loc, loadedElType,
822-
op.getSource(), newIndices);
822+
op.getBase(), newIndices);
823823
result = rewriter.create<vector::InsertOp>(loc, el, result, i);
824824
}
825825
} else {
@@ -842,7 +842,7 @@ createNonLdMatrixLoads(RewriterBase &rewriter, vector::TransferReadOp op,
842842
getXferIndices<vector::TransferReadOp>(
843843
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
844844
Value el = rewriter.create<memref::LoadOp>(op.getLoc(), loadedElType,
845-
op.getSource(), newIndices);
845+
op.getBase(), newIndices);
846846
result = rewriter.create<vector::InsertOp>(
847847
op.getLoc(), el, result, ArrayRef<int64_t>{i, innerIdx});
848848
}
@@ -876,7 +876,7 @@ convertTransferReadToLoads(RewriterBase &rewriter, vector::TransferReadOp op,
876876
return rewriter.notifyMatchFailure(op, "no warpMatrixInfo");
877877

878878
bool isLdMatrixCompatible =
879-
isSharedMemory(cast<MemRefType>(op.getSource().getType())) &&
879+
isSharedMemory(cast<MemRefType>(op.getBase().getType())) &&
880880
nvgpu::inferTileWidthInBits(*warpMatrixInfo) == 128;
881881

882882
VectorType vecTy = op.getVectorType();
@@ -934,7 +934,7 @@ convertTransferWriteToStores(RewriterBase &rewriter, vector::TransferWriteOp op,
934934
SmallVector<Value, 4> newIndices;
935935
getXferIndices<vector::TransferWriteOp>(
936936
rewriter, op, *coords, {laneId, logicalValueId}, newIndices);
937-
rewriter.create<vector::StoreOp>(loc, el, op.getSource(), newIndices);
937+
rewriter.create<vector::StoreOp>(loc, el, op.getBase(), newIndices);
938938
}
939939

940940
LLVM_DEBUG(DBGS() << "erase: " << op << "\n");

mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ static Value generateInBoundsCheck(
199199
ImplicitLocOpBuilder lb(xferOp.getLoc(), b);
200200
if (!xferOp.isDimInBounds(0) && !isBroadcast) {
201201
Value memrefDim =
202-
vector::createOrFoldDimOp(b, loc, xferOp.getSource(), *dim);
202+
vector::createOrFoldDimOp(b, loc, xferOp.getBase(), *dim);
203203
AffineExpr d0, d1;
204204
bindDims(xferOp.getContext(), d0, d1);
205205
Value base = xferOp.getIndices()[*dim];
@@ -426,7 +426,7 @@ struct Strategy<TransferReadOp> {
426426
auto vecType = dyn_cast<VectorType>(bufferType.getElementType());
427427
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
428428
auto newXferOp = b.create<vector::TransferReadOp>(
429-
loc, vecType, xferOp.getSource(), xferIndices,
429+
loc, vecType, xferOp.getBase(), xferIndices,
430430
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
431431
xferOp.getPadding(), Value(), inBoundsAttr);
432432

@@ -512,7 +512,7 @@ struct Strategy<TransferWriteOp> {
512512
Location loc = xferOp.getLoc();
513513
auto vec = b.create<memref::LoadOp>(loc, buffer, loadIndices);
514514
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
515-
auto source = loopState.empty() ? xferOp.getSource() : loopState[0];
515+
auto source = loopState.empty() ? xferOp.getBase() : loopState[0];
516516
Type type = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();
517517
auto newXferOp = b.create<vector::TransferWriteOp>(
518518
loc, type, vec, source, xferIndices,
@@ -544,7 +544,7 @@ struct Strategy<TransferWriteOp> {
544544

545545
/// Return the initial loop state for the generated scf.for loop.
546546
static Value initialLoopState(TransferWriteOp xferOp) {
547-
return isTensorOp(xferOp) ? xferOp.getSource() : Value();
547+
return isTensorOp(xferOp) ? xferOp.getBase() : Value();
548548
}
549549
};
550550

@@ -1145,7 +1145,7 @@ struct ScalableTransposeTransferWriteConversion
11451145
ArrayRef<OpFoldResult>(*maskDims).drop_front());
11461146
}
11471147

1148-
Value initDest = isTensorOp(writeOp) ? writeOp.getSource() : Value{};
1148+
Value initDest = isTensorOp(writeOp) ? writeOp.getBase() : Value{};
11491149
ValueRange initLoopArgs = initDest ? initDest : ValueRange{};
11501150
auto result = rewriter.create<scf::ForOp>(
11511151
loc, lb, ub, step, initLoopArgs,
@@ -1165,7 +1165,7 @@ struct ScalableTransposeTransferWriteConversion
11651165

11661166
// Create the transfer_write for the slice.
11671167
Value dest =
1168-
loopIterArgs.empty() ? writeOp.getSource() : loopIterArgs.front();
1168+
loopIterArgs.empty() ? writeOp.getBase() : loopIterArgs.front();
11691169
auto newWriteOp = b.create<vector::TransferWriteOp>(
11701170
loc, sliceVec, dest, xferIndices,
11711171
ArrayRef<bool>(writeOp.getInBoundsValues()).drop_front());
@@ -1340,7 +1340,7 @@ struct UnrollTransferReadConversion
13401340

13411341
auto inBoundsAttr = dropFirstElem(b, xferOp.getInBoundsAttr());
13421342
auto newXferOp = b.create<vector::TransferReadOp>(
1343-
loc, newXferVecType, xferOp.getSource(), xferIndices,
1343+
loc, newXferVecType, xferOp.getBase(), xferIndices,
13441344
AffineMapAttr::get(unpackedPermutationMap(b, xferOp)),
13451345
xferOp.getPadding(), Value(), inBoundsAttr);
13461346
maybeAssignMask(b, xferOp, newXferOp, i);
@@ -1449,7 +1449,7 @@ struct UnrollTransferWriteConversion
14491449
}
14501450

14511451
int64_t dimSize = inputVectorTy.getShape()[0];
1452-
Value source = xferOp.getSource(); // memref or tensor to be written to.
1452+
Value source = xferOp.getBase(); // memref or tensor to be written to.
14531453
auto sourceType = isTensorOp(xferOp) ? xferOp.getShapedType() : Type();
14541454

14551455
// Generate fully unrolled loop of transfer ops.
@@ -1568,7 +1568,7 @@ struct Strategy1d<TransferReadOp> {
15681568
/*inBoundsCase=*/
15691569
[&](OpBuilder &b, Location loc) {
15701570
Value val =
1571-
b.create<memref::LoadOp>(loc, xferOp.getSource(), indices);
1571+
b.create<memref::LoadOp>(loc, xferOp.getBase(), indices);
15721572
return b.create<vector::InsertElementOp>(loc, val, vec, iv);
15731573
},
15741574
/*outOfBoundsCase=*/
@@ -1599,7 +1599,7 @@ struct Strategy1d<TransferWriteOp> {
15991599
/*inBoundsCase=*/[&](OpBuilder &b, Location loc) {
16001600
auto val =
16011601
b.create<vector::ExtractElementOp>(loc, xferOp.getVector(), iv);
1602-
b.create<memref::StoreOp>(loc, val, xferOp.getSource(), indices);
1602+
b.create<memref::StoreOp>(loc, val, xferOp.getBase(), indices);
16031603
});
16041604
b.create<scf::YieldOp>(loc);
16051605
}

mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
192192

193193
xegpu::CreateNdDescOp ndDesc =
194194
createNdDescriptor(rewriter, loc, descType,
195-
dyn_cast<TypedValue<MemRefType>>(readOp.getSource()),
195+
dyn_cast<TypedValue<MemRefType>>(readOp.getBase()),
196196
readOp.getIndices());
197197

198198
DenseI64ArrayAttr transposeAttr =
@@ -233,7 +233,7 @@ struct TransferWriteLowering
233233
xegpu::MemorySpace::Global);
234234
xegpu::CreateNdDescOp ndDesc = createNdDescriptor(
235235
rewriter, loc, descType,
236-
dyn_cast<TypedValue<MemRefType>>(writeOp.getSource()),
236+
dyn_cast<TypedValue<MemRefType>>(writeOp.getBase()),
237237
writeOp.getIndices());
238238

239239
// By default, no specific caching policy is assigned.

mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
118118
Value fill = builder.create<vector::SplatOp>(loc, unbroadcastedVectorType,
119119
readOp.getPadding());
120120
Value load = builder.create<vector::LoadOp>(
121-
loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices());
121+
loc, unbroadcastedVectorType, readOp.getBase(), readOp.getIndices());
122122
Value res = builder.create<arith::SelectOp>(loc, unbroadcastedVectorType,
123123
readOp.getMask(), load, fill);
124124
// Insert a broadcasting op if required.
@@ -149,7 +149,7 @@ struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
149149
}
150150

151151
Location loc = readOp.getLoc();
152-
Value src = readOp.getSource();
152+
Value src = readOp.getBase();
153153

154154
VectorType vectorType = readOp.getVectorType();
155155
int64_t vectorSize = vectorType.getNumElements();

mlir/lib/Dialect/ArmSME/Transforms/VectorLegalization.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ struct LegalizeTransferReadOpsByDecomposition
315315
decomposeToSMETiles(rewriter, vectorType, smeTileType, transposed)) {
316316
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
317317
auto smeRead = rewriter.create<vector::TransferReadOp>(
318-
loc, smeTileType, readOp.getSource(),
318+
loc, smeTileType, readOp.getBase(),
319319
getSMESubTileIndices(rewriter, loc, readOp.getIndices(), smeTile),
320320
readOp.getPermutationMapAttr(), readOp.getPadding(), smeMask,
321321
readOp.getInBoundsAttr());
@@ -359,7 +359,7 @@ struct LegalizeTransferWriteOpsByDecomposition
359359
auto smeTileType = getSMETileTypeForElement(vectorType.getElementType());
360360
auto inputSMETiles = adaptor.getValueToStore();
361361

362-
Value destTensorOrMemref = writeOp.getSource();
362+
Value destTensorOrMemref = writeOp.getBase();
363363
for (auto [index, smeTile] : llvm::enumerate(decomposeToSMETiles(
364364
rewriter, vectorType, smeTileType, transposed))) {
365365
auto smeMask = extractSMEMask(rewriter, loc, mask, smeTile);
@@ -497,7 +497,7 @@ struct LegalizeMultiTileTransferWriteAsStoreLoop
497497
auto slice =
498498
rewriter.create<vector::ExtractOp>(loc, tile, tileSliceIndex);
499499
rewriter.create<vector::TransferWriteOp>(
500-
loc, slice, writeOp.getSource(), ValueRange{storeRow, storeCol},
500+
loc, slice, writeOp.getBase(), ValueRange{storeRow, storeCol},
501501
AffineMapAttr::get(writeOp.getPermutationMap().dropResult(0)),
502502
sliceMask,
503503
rewriter.getBoolArrayAttr(
@@ -677,7 +677,7 @@ struct LiftIllegalVectorTransposeToMemory
677677
});
678678
SmallVector<Value> strides(readType.getRank(), Value(one));
679679
auto readSubview = rewriter.create<memref::SubViewOp>(
680-
loc, illegalRead.getSource(), illegalRead.getIndices(), readSizes,
680+
loc, illegalRead.getBase(), illegalRead.getIndices(), readSizes,
681681
strides);
682682

683683
// Apply the transpose to all values/attributes of the transfer_read:
@@ -851,7 +851,7 @@ struct LowerIllegalTransposeStoreViaZA
851851

852852
// Note: We need to use `get_tile` as there's no vector-level `undef`.
853853
Value undefTile = rewriter.create<arm_sme::GetTileOp>(loc, smeTileType);
854-
Value destTensorOrMemref = writeOp.getSource();
854+
Value destTensorOrMemref = writeOp.getBase();
855855
auto numSlicesPerTile =
856856
std::min(sourceType.getDimSize(0), smeTileType.getDimSize(0));
857857
auto numSlices =

0 commit comments

Comments
 (0)