Skip to content

[mlir][NFC] Avoid using braced initializer lists to call a constructor. #123714

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion mlir/lib/Bindings/Python/IRAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ class PyDenseElementsAttribute
}
shapedType = *explicitType;
} else {
SmallVector<int64_t> shape{static_cast<int64_t>(numAttributes)};
SmallVector<int64_t> shape = {static_cast<int64_t>(numAttributes)};
shapedType = mlirRankedTensorTypeGet(
shape.size(), shape.data(),
mlirAttributeGetType(pyTryCast<PyAttribute>(attributes[0])),
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1476,7 +1476,7 @@ class MaterializeResizeBroadcast : public OpRewritePattern<tosa::ResizeOp> {
reassociationMap.push_back({});
reassociationMap.back().push_back(builder.getAffineDimExpr(3));

llvm::SmallVector<int64_t> collapseShape{batch};
llvm::SmallVector<int64_t> collapseShape = {batch};
if (inputH != 1)
collapseShape.push_back(outputH);
if (inputW != 1)
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -648,12 +648,12 @@ class FullyConnectedConverter

SmallVector<Value> filteredDims = condenseValues(dynDims);

SmallVector<int64_t> permutation{1, 0};
SmallVector<int64_t> permutation = {1, 0};
auto permutationAttr = rewriter.getI64TensorAttr(permutation);
Value permutationValue =
rewriter.create<arith::ConstantOp>(loc, permutationAttr);

SmallVector<int64_t> newWeightShape{weightShape[1], weightShape[0]};
SmallVector<int64_t> newWeightShape = {weightShape[1], weightShape[0]};
Type newWeightTy =
RankedTensorType::get(newWeightShape, weightTy.getElementType());

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
readOp, "Unsupported data type for tranposition");

// If load is transposed, get the base shape for the tensor descriptor.
SmallVector<int64_t> descShape{vecTy.getShape()};
SmallVector<int64_t> descShape(vecTy.getShape());
if (isTransposeLoad)
std::reverse(descShape.begin(), descShape.end());
auto descType = xegpu::TensorDescType::get(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ class LowerContractionToSMMLAPattern
loc, op.getResultType(), rewriter.getZeroAttr(op.getResultType()));

SmallVector<int64_t> unrolledSize = *op.getShapeForUnroll();
SmallVector<int64_t> smmlaShape{2, 8};
SmallVector<int64_t> loopOrder{0, 1};
SmallVector<int64_t> smmlaShape = {2, 8};
SmallVector<int64_t> loopOrder = {0, 1};
if (unrolledSize.size() == 3) {
smmlaShape.insert(smmlaShape.begin(), isVecmat ? 1 : 2);
loopOrder.push_back(2);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ transform::gpu::CopyMappingInfo::inferNumThreadsImpl(
// Scale the most minor size to account for the chosen vector size and
// maximize the number of threads without exceeding the total number of
// threads.
SmallVector<int64_t> scaledSizes{sizes};
SmallVector<int64_t> scaledSizes(sizes);
scaledSizes.back() /= desiredVectorSize;
if (scaledSizes.back() > totalNumThreads) {
LDBG("--Too few threads given the required vector size -> FAIL");
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static bool validateFullTilesOnDims(linalg::LinalgOp linalgOp,

// Skip the batch dimension if present.
// Offset all dimensions accordingly.
SmallVector<int64_t, 3> offsetDims{dims};
SmallVector<int64_t, 3> offsetDims(dims);
for (size_t i = 0; i < offsetDims.size(); i++)
offsetDims[i] += batchDimsOffset;

Expand Down Expand Up @@ -111,10 +111,10 @@ transposePackedMatmul(RewriterBase &rewriter, linalg::LinalgOp linalgOp,

// Transpose only the dimensions that need that to conform to the provided
// transpotion settings.
SmallVector<int64_t> innerPerm{0, 1};
SmallVector<int64_t> innerPerm = {0, 1};
if (isInnerTransposed != transposeInnerBlocks)
innerPerm = {1, 0};
SmallVector<int64_t> outerPerm{0, 1};
SmallVector<int64_t> outerPerm = {0, 1};
if (isOuterTransposed != transposeOuterBlocks)
outerPerm = {1, 0};

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
FHWCConvOp op) {
// Construct a permutation of the filter tensor dimensions. For a 2D
// convolution this will be known statically as [1, 2, 3, 0].
SmallVector<int64_t> filterPerm({1, 2, 3, 0});
SmallVector<int64_t> filterPerm = {1, 2, 3, 0};

// Create the type for the transposed filter tensor.
auto filter = op->getOperand(1);
Expand Down
27 changes: 13 additions & 14 deletions mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
if (isSingleChanneled) {
// Extract input slice of size {wSizeStep} @ [w + kw] for non-channeled
// convolution.
SmallVector<int64_t> sizes{wSizeStep};
SmallVector<int64_t> strides{1};
SmallVector<int64_t> sizes = {wSizeStep};
SmallVector<int64_t> strides = {1};
for (int64_t kw = 0; kw < kwSize; ++kw) {
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
Expand All @@ -97,8 +97,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
} else {
// Extract lhs slice of size {n, wSizeStep, c} @ [0, sw * w + dw * kw, 0]
// for channeled convolution.
SmallVector<int64_t> sizes{nSize, wSizeStep, cSize};
SmallVector<int64_t> strides{1, 1, 1};
SmallVector<int64_t> sizes = {nSize, wSizeStep, cSize};
SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t kw = 0; kw < kwSize; ++kw) {
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
Expand Down Expand Up @@ -135,17 +135,17 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
SmallVector<Value> result;
if (isSingleChanneled) {
// Extract res slice: {wSizeStep} @ [w] for non-channeled convolution.
SmallVector<int64_t> sizes{wSizeStep};
SmallVector<int64_t> strides{1};
SmallVector<int64_t> sizes = {wSizeStep};
SmallVector<int64_t> strides = {1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
loc, res, /*offsets=*/ArrayRef<int64_t>{w}, sizes, strides));
}
} else {
// Extract res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
// convolution.
SmallVector<int64_t> sizes{nSize, wSizeStep, fSize};
SmallVector<int64_t> strides{1, 1, 1};
SmallVector<int64_t> sizes = {nSize, wSizeStep, fSize};
SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
loc, res, /*offsets=*/ArrayRef<int64_t>{0, w, 0}, sizes, strides));
Expand All @@ -163,15 +163,15 @@ static Value insertConvResultSlices(RewriterBase &rewriter, Location loc,
if (isSingleChanneled) {
// Write back res slice: {wSizeStep} @ [w] for non-channeled convolution.
// This does not depend on kw.
SmallVector<int64_t> strides{1};
SmallVector<int64_t> strides = {1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
res = rewriter.create<vector::InsertStridedSliceOp>(
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{w}, strides);
}
} else {
// Write back res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
// convolution. This does not depend on kw.
SmallVector<int64_t> strides{1, 1, 1};
SmallVector<int64_t> strides = {1, 1, 1};
for (int64_t w = 0; w < wSize; w += wSizeStep) {
res = rewriter.create<vector::InsertStridedSliceOp>(
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{0, w, 0},
Expand Down Expand Up @@ -3505,8 +3505,8 @@ struct Conv1DGenerator
//===------------------------------------------------------------------===//
// Unroll along kw and read slices of lhs and rhs.
SmallVector<Value> lhsVals, rhsVals, resVals;
auto inOutSliceSizes = SmallVector<int64_t>{nSize, wSizeStep, cSize};
auto inOutStrides = SmallVector<int64_t>{1, 1, 1};
SmallVector<int64_t> inOutSliceSizes = {nSize, wSizeStep, cSize};
SmallVector<int64_t> inOutStrides = {1, 1, 1};

// Extract lhs slice of size {n, wSizeStep, c}
// @ [0, sw * w + dw * kw, 0].
Expand Down Expand Up @@ -3538,8 +3538,7 @@ struct Conv1DGenerator

// Note - the scalable flags are ignored as flattening combined with
// scalable vectorization is not supported.
auto inOutFlattenSliceSizes =
SmallVector<int64_t>{nSize, wSizeStep * cSize};
SmallVector<int64_t> inOutFlattenSliceSizes = {nSize, wSizeStep * cSize};
auto lhsTypeAfterFlattening =
VectorType::get(inOutFlattenSliceSizes, lhsEltType);
auto resTypeAfterFlattening =
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -740,9 +740,9 @@ static std::tuple<SmallVector<int64_t>, SmallVector<int64_t>,
SmallVector<int64_t>>
makeVectorShapes(ArrayRef<int64_t> lhs, ArrayRef<int64_t> rhs,
ArrayRef<int64_t> res) {
SmallVector<int64_t> vlhs{lhs};
SmallVector<int64_t> vrhs{rhs};
SmallVector<int64_t> vres{res};
SmallVector<int64_t> vlhs(lhs);
SmallVector<int64_t> vrhs(rhs);
SmallVector<int64_t> vres(res);
return std::make_tuple(vlhs, vrhs, vres);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -557,7 +557,7 @@ struct CastAwayConstantMaskLeadingOneDim
int64_t flatLeadingSize =
std::accumulate(dimSizes.begin(), dimSizes.begin() + dropDim + 1,
static_cast<int64_t>(1), std::multiplies<int64_t>());
SmallVector<int64_t> newDimSizes({flatLeadingSize});
SmallVector<int64_t> newDimSizes = {flatLeadingSize};
newDimSizes.append(dimSizes.begin() + dropDim + 1, dimSizes.end());

auto newMask = rewriter.create<vector::ConstantMaskOp>(
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -930,8 +930,8 @@ struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
loc, elemType, rewriter.getZeroAttr(elemType));
Value res = rewriter.create<SplatOp>(loc, castDstType, zero);

SmallVector<int64_t> sliceShape{castDstLastDim};
SmallVector<int64_t> strides{1};
SmallVector<int64_t> sliceShape = {castDstLastDim};
SmallVector<int64_t> strides = {1};
VectorType newCastDstType =
VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio},
castDstType.getElementType());
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ Value mlir::x86vector::avx2::intrin::mm256ShufflePs(ImplicitLocOpBuilder &b,
uint8_t mask) {
uint8_t b01, b23, b45, b67;
MaskHelper::extractShuffle(mask, b01, b23, b45, b67);
SmallVector<int64_t> shuffleMask{b01, b23, b45 + 8, b67 + 8,
b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
SmallVector<int64_t> shuffleMask = {
b01, b23, b45 + 8, b67 + 8, b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
}

Expand Down
Loading