Skip to content

Commit 11f54be

Browse files
committed
fixup! [mlir][linalg] Add masked vectorisation for depthwise convolutions
Address Diego's comments, move to vector utils
1 parent 5d87b89 commit 11f54be

File tree

3 files changed

+38
-24
lines changed

3 files changed

+38
-24
lines changed

mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,14 @@
1010
#define MLIR_DIALECT_VECTOR_UTILS_VECTORUTILS_H_
1111

1212
#include "mlir/Dialect/Utils/IndexingUtils.h"
13+
#include "mlir/Dialect/MemRef/IR/MemRef.h"
14+
#include "mlir/Dialect/Tensor/IR/Tensor.h"
1315
#include "mlir/Dialect/Vector/IR/VectorOps.h"
1416
#include "mlir/IR/BuiltinAttributes.h"
1517
#include "mlir/Support/LLVM.h"
1618

1719
#include "llvm/ADT/DenseMap.h"
20+
#include "llvm/ADT/TypeSwitch.h"
1821

1922
namespace mlir {
2023

@@ -98,6 +101,17 @@ bool isContiguousSlice(MemRefType memrefType, VectorType vectorType);
98101
std::optional<StaticTileOffsetRange>
99102
createUnrollIterator(VectorType vType, int64_t targetRank = 1);
100103

104+
/// A wrapper for getMixedSizes for vector.transfer_read and
105+
/// vector.transfer_write Ops (for source and destination, respectively).
106+
///
107+
/// Tensor and MemRef types implement their own, very similar version of
108+
/// getMixedSizes. This method will call the appropriate version (depending on
109+
/// `hasTensorSemantics`). It will also automatically extract the operand for
110+
/// which to call it on (source for "read" and destination for "write" ops).
111+
SmallVector<OpFoldResult> getMixedSizesXfer(bool hasTensorSemantics,
112+
Operation *xfer,
113+
RewriterBase &rewriter);
114+
101115
} // namespace vector
102116

103117
/// Constructs a permutation map of invariant memref indices to vector

mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
2525
#include "mlir/Dialect/Vector/IR/VectorOps.h"
2626
#include "mlir/Dialect/Vector/Interfaces/MaskableOpInterface.h"
27+
#include "mlir/Dialect/Vector/Utils/VectorUtils.h"
2728
#include "mlir/IR/AffineExpr.h"
2829
#include "mlir/IR/Builders.h"
2930
#include "mlir/IR/BuiltinTypeInterfaces.h"
@@ -1721,9 +1722,8 @@ static LogicalResult vectorizeDynamicConvOpPrecondition(linalg::LinalgOp conv) {
17211722
}
17221723

17231724
// Support dynamic shapes in 1D depthwise convolution, but only in the
1724-
// _channel_ dimension. That's exclusively to support scalable
1725-
// vectorisation.
1726-
auto lhs = conv.getDpsInputOperand(0)->get();
1725+
// _channel_ dimension.
1726+
Value lhs = conv.getDpsInputOperand(0)->get();
17271727
ArrayRef<int64_t> lhsShape = cast<ShapedType>(lhs.getType()).getShape();
17281728
auto shapeWithoutCh = lhsShape.drop_back(1);
17291729
if (ShapedType::isDynamicShape(shapeWithoutCh)) {
@@ -3217,29 +3217,12 @@ struct Conv1DGenerator
32173217
return opToMask;
32183218
auto maskType =
32193219
VectorType::get(maskShape, rewriter.getI1Type(), scalableDims);
3220-
SmallVector<OpFoldResult> mixedSourceDims =
3221-
cast<LinalgOp>(op).hasPureTensorSemantics()
3222-
? TypeSwitch<Operation *, SmallVector<OpFoldResult>>(opToMask)
3223-
.Case<vector::TransferReadOp>([&](auto readOp) {
3224-
return tensor::getMixedSizes(rewriter, loc,
3225-
readOp.getSource());
3226-
})
3227-
.Case<vector::TransferWriteOp>([&](auto writeOp) {
3228-
return tensor::getMixedSizes(rewriter, loc,
3229-
writeOp.getOperand(1));
3230-
})
3231-
: TypeSwitch<Operation *, SmallVector<OpFoldResult>>(opToMask)
3232-
.Case<vector::TransferReadOp>([&](auto readOp) {
3233-
return memref::getMixedSizes(rewriter, loc,
3234-
readOp.getSource());
3235-
})
3236-
.Case<vector::TransferWriteOp>([&](auto writeOp) {
3237-
return memref::getMixedSizes(rewriter, loc,
3238-
writeOp.getOperand(1));
3239-
});
3220+
3221+
SmallVector<OpFoldResult> mixedDims = vector::getMixedSizesXfer(
3222+
cast<LinalgOp>(op).hasPureTensorSemantics(), opToMask, rewriter);
32403223

32413224
Value maskOp =
3242-
rewriter.create<vector::CreateMaskOp>(loc, maskType, mixedSourceDims);
3225+
rewriter.create<vector::CreateMaskOp>(loc, maskType, mixedDims);
32433226

32443227
return mlir::vector::maskOperation(rewriter, opToMask, maskOp);
32453228
};

mlir/lib/Dialect/Vector/Utils/VectorUtils.cpp

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,3 +300,20 @@ vector::createUnrollIterator(VectorType vType, int64_t targetRank) {
300300
shapeToUnroll = shapeToUnroll.slice(0, firstScalableDim);
301301
return StaticTileOffsetRange(shapeToUnroll, /*unrollStep=*/1);
302302
}
303+
304+
SmallVector<OpFoldResult> vector::getMixedSizesXfer(bool hasTensorSemantics,
305+
Operation *xfer,
306+
RewriterBase &rewriter) {
307+
auto loc = xfer->getLoc();
308+
309+
Value blah = TypeSwitch<Operation *, Value>(xfer)
310+
.Case<vector::TransferReadOp>(
311+
[&](auto readOp) { return readOp.getSource(); })
312+
.Case<vector::TransferWriteOp>(
313+
[&](auto writeOp) { return writeOp.getOperand(1); });
314+
315+
SmallVector<OpFoldResult> mixedSourceDims =
316+
hasTensorSemantics ? tensor::getMixedSizes(rewriter, loc, blah)
317+
: memref::getMixedSizes(rewriter, loc, blah);
318+
return mixedSourceDims;
319+
}

0 commit comments

Comments
 (0)