Skip to content

Commit 9cbc1f2

Browse files
authored
[mlir][NFC] Avoid using braced initializer lists to call a constructor. (#123714)
In the LLVM style guide, we prefer not using braced initializer lists to call a constructor. Also, we prefer using an equal before the open curly brace if we use a braced initializer list when initializing a variable. See https://llvm.org/docs/CodingStandards.html#do-not-use-braced-initializer-lists-to-call-a-constructor for more details. The style guide does not explain the reason well. There is an article from abseil, which mentions few benefits. E.g., we can avoid the most vexing parse, etc. See https://abseil.io/tips/88 for more details. Signed-off-by: hanhanW <[email protected]>
1 parent 3bd8b02 commit 9cbc1f2

File tree

13 files changed

+33
-34
lines changed

13 files changed

+33
-34
lines changed

mlir/lib/Bindings/Python/IRAttributes.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -845,7 +845,7 @@ class PyDenseElementsAttribute
845845
}
846846
shapedType = *explicitType;
847847
} else {
848-
SmallVector<int64_t> shape{static_cast<int64_t>(numAttributes)};
848+
SmallVector<int64_t> shape = {static_cast<int64_t>(numAttributes)};
849849
shapedType = mlirRankedTensorTypeGet(
850850
shape.size(), shape.data(),
851851
mlirAttributeGetType(pyTryCast<PyAttribute>(attributes[0])),

mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1476,7 +1476,7 @@ class MaterializeResizeBroadcast : public OpRewritePattern<tosa::ResizeOp> {
14761476
reassociationMap.push_back({});
14771477
reassociationMap.back().push_back(builder.getAffineDimExpr(3));
14781478

1479-
llvm::SmallVector<int64_t> collapseShape{batch};
1479+
llvm::SmallVector<int64_t> collapseShape = {batch};
14801480
if (inputH != 1)
14811481
collapseShape.push_back(outputH);
14821482
if (inputW != 1)

mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamed.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -648,12 +648,12 @@ class FullyConnectedConverter
648648

649649
SmallVector<Value> filteredDims = condenseValues(dynDims);
650650

651-
SmallVector<int64_t> permutation{1, 0};
651+
SmallVector<int64_t> permutation = {1, 0};
652652
auto permutationAttr = rewriter.getI64TensorAttr(permutation);
653653
Value permutationValue =
654654
rewriter.create<arith::ConstantOp>(loc, permutationAttr);
655655

656-
SmallVector<int64_t> newWeightShape{weightShape[1], weightShape[0]};
656+
SmallVector<int64_t> newWeightShape = {weightShape[1], weightShape[0]};
657657
Type newWeightTy =
658658
RankedTensorType::get(newWeightShape, weightTy.getElementType());
659659

mlir/lib/Conversion/VectorToXeGPU/VectorToXeGPU.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ struct TransferReadLowering : public OpRewritePattern<vector::TransferReadOp> {
182182
readOp, "Unsupported data type for tranposition");
183183

184184
// If load is transposed, get the base shape for the tensor descriptor.
185-
SmallVector<int64_t> descShape{vecTy.getShape()};
185+
SmallVector<int64_t> descShape(vecTy.getShape());
186186
if (isTransposeLoad)
187187
std::reverse(descShape.begin(), descShape.end());
188188
auto descType = xegpu::TensorDescType::get(

mlir/lib/Dialect/ArmNeon/Transforms/LowerContractionToSMMLAPattern.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,8 +126,8 @@ class LowerContractionToSMMLAPattern
126126
loc, op.getResultType(), rewriter.getZeroAttr(op.getResultType()));
127127

128128
SmallVector<int64_t> unrolledSize = *op.getShapeForUnroll();
129-
SmallVector<int64_t> smmlaShape{2, 8};
130-
SmallVector<int64_t> loopOrder{0, 1};
129+
SmallVector<int64_t> smmlaShape = {2, 8};
130+
SmallVector<int64_t> loopOrder = {0, 1};
131131
if (unrolledSize.size() == 3) {
132132
smmlaShape.insert(smmlaShape.begin(), isVecmat ? 1 : 2);
133133
loopOrder.push_back(2);

mlir/lib/Dialect/Linalg/TransformOps/GPUHeuristics.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -222,7 +222,7 @@ transform::gpu::CopyMappingInfo::inferNumThreadsImpl(
222222
// Scale the most minor size to account for the chosen vector size and
223223
// maximize the number of threads without exceeding the total number of
224224
// threads.
225-
SmallVector<int64_t> scaledSizes{sizes};
225+
SmallVector<int64_t> scaledSizes(sizes);
226226
scaledSizes.back() /= desiredVectorSize;
227227
if (scaledSizes.back() > totalNumThreads) {
228228
LDBG("--Too few threads given the required vector size -> FAIL");

mlir/lib/Dialect/Linalg/Transforms/BlockPackMatmul.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ static bool validateFullTilesOnDims(linalg::LinalgOp linalgOp,
5555

5656
// Skip the batch dimension if present.
5757
// Offset all dimensions accordingly.
58-
SmallVector<int64_t, 3> offsetDims{dims};
58+
SmallVector<int64_t, 3> offsetDims(dims);
5959
for (size_t i = 0; i < offsetDims.size(); i++)
6060
offsetDims[i] += batchDimsOffset;
6161

@@ -111,10 +111,10 @@ transposePackedMatmul(RewriterBase &rewriter, linalg::LinalgOp linalgOp,
111111

112112
// Transpose only the dimensions that need that to conform to the provided
113113
// transpotion settings.
114-
SmallVector<int64_t> innerPerm{0, 1};
114+
SmallVector<int64_t> innerPerm = {0, 1};
115115
if (isInnerTransposed != transposeInnerBlocks)
116116
innerPerm = {1, 0};
117-
SmallVector<int64_t> outerPerm{0, 1};
117+
SmallVector<int64_t> outerPerm = {0, 1};
118118
if (isOuterTransposed != transposeOuterBlocks)
119119
outerPerm = {1, 0};
120120

mlir/lib/Dialect/Linalg/Transforms/TransposeConv2D.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@ FailureOr<Operation *> transposeConv2DHelper(RewriterBase &rewriter,
5252
FHWCConvOp op) {
5353
// Construct a permutation of the filter tensor dimensions. For a 2D
5454
// convolution this will be known statically as [1, 2, 3, 0].
55-
SmallVector<int64_t> filterPerm({1, 2, 3, 0});
55+
SmallVector<int64_t> filterPerm = {1, 2, 3, 0};
5656

5757
// Create the type for the transposed filter tensor.
5858
auto filter = op->getOperand(1);

mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
8686
if (isSingleChanneled) {
8787
// Extract input slice of size {wSizeStep} @ [w + kw] for non-channeled
8888
// convolution.
89-
SmallVector<int64_t> sizes{wSizeStep};
90-
SmallVector<int64_t> strides{1};
89+
SmallVector<int64_t> sizes = {wSizeStep};
90+
SmallVector<int64_t> strides = {1};
9191
for (int64_t kw = 0; kw < kwSize; ++kw) {
9292
for (int64_t w = 0; w < wSize; w += wSizeStep) {
9393
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
@@ -97,8 +97,8 @@ extractConvInputSlices(RewriterBase &rewriter, Location loc, Value input,
9797
} else {
9898
// Extract lhs slice of size {n, wSizeStep, c} @ [0, sw * w + dw * kw, 0]
9999
// for channeled convolution.
100-
SmallVector<int64_t> sizes{nSize, wSizeStep, cSize};
101-
SmallVector<int64_t> strides{1, 1, 1};
100+
SmallVector<int64_t> sizes = {nSize, wSizeStep, cSize};
101+
SmallVector<int64_t> strides = {1, 1, 1};
102102
for (int64_t kw = 0; kw < kwSize; ++kw) {
103103
for (int64_t w = 0; w < wSize; w += wSizeStep) {
104104
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
@@ -135,17 +135,17 @@ extractConvResultSlices(RewriterBase &rewriter, Location loc, Value res,
135135
SmallVector<Value> result;
136136
if (isSingleChanneled) {
137137
// Extract res slice: {wSizeStep} @ [w] for non-channeled convolution.
138-
SmallVector<int64_t> sizes{wSizeStep};
139-
SmallVector<int64_t> strides{1};
138+
SmallVector<int64_t> sizes = {wSizeStep};
139+
SmallVector<int64_t> strides = {1};
140140
for (int64_t w = 0; w < wSize; w += wSizeStep) {
141141
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
142142
loc, res, /*offsets=*/ArrayRef<int64_t>{w}, sizes, strides));
143143
}
144144
} else {
145145
// Extract res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
146146
// convolution.
147-
SmallVector<int64_t> sizes{nSize, wSizeStep, fSize};
148-
SmallVector<int64_t> strides{1, 1, 1};
147+
SmallVector<int64_t> sizes = {nSize, wSizeStep, fSize};
148+
SmallVector<int64_t> strides = {1, 1, 1};
149149
for (int64_t w = 0; w < wSize; w += wSizeStep) {
150150
result.push_back(rewriter.create<vector::ExtractStridedSliceOp>(
151151
loc, res, /*offsets=*/ArrayRef<int64_t>{0, w, 0}, sizes, strides));
@@ -163,15 +163,15 @@ static Value insertConvResultSlices(RewriterBase &rewriter, Location loc,
163163
if (isSingleChanneled) {
164164
// Write back res slice: {wSizeStep} @ [w] for non-channeled convolution.
165165
// This does not depend on kw.
166-
SmallVector<int64_t> strides{1};
166+
SmallVector<int64_t> strides = {1};
167167
for (int64_t w = 0; w < wSize; w += wSizeStep) {
168168
res = rewriter.create<vector::InsertStridedSliceOp>(
169169
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{w}, strides);
170170
}
171171
} else {
172172
// Write back res slice: {n, wSizeStep, f} @ [0, w, 0] for channeled
173173
// convolution. This does not depend on kw.
174-
SmallVector<int64_t> strides{1, 1, 1};
174+
SmallVector<int64_t> strides = {1, 1, 1};
175175
for (int64_t w = 0; w < wSize; w += wSizeStep) {
176176
res = rewriter.create<vector::InsertStridedSliceOp>(
177177
loc, resVals[w], res, /*offsets=*/ArrayRef<int64_t>{0, w, 0},
@@ -3505,8 +3505,8 @@ struct Conv1DGenerator
35053505
//===------------------------------------------------------------------===//
35063506
// Unroll along kw and read slices of lhs and rhs.
35073507
SmallVector<Value> lhsVals, rhsVals, resVals;
3508-
auto inOutSliceSizes = SmallVector<int64_t>{nSize, wSizeStep, cSize};
3509-
auto inOutStrides = SmallVector<int64_t>{1, 1, 1};
3508+
SmallVector<int64_t> inOutSliceSizes = {nSize, wSizeStep, cSize};
3509+
SmallVector<int64_t> inOutStrides = {1, 1, 1};
35103510

35113511
// Extract lhs slice of size {n, wSizeStep, c}
35123512
// @ [0, sw * w + dw * kw, 0].
@@ -3538,8 +3538,7 @@ struct Conv1DGenerator
35383538

35393539
// Note - the scalable flags are ignored as flattening combined with
35403540
// scalable vectorization is not supported.
3541-
auto inOutFlattenSliceSizes =
3542-
SmallVector<int64_t>{nSize, wSizeStep * cSize};
3541+
SmallVector<int64_t> inOutFlattenSliceSizes = {nSize, wSizeStep * cSize};
35433542
auto lhsTypeAfterFlattening =
35443543
VectorType::get(inOutFlattenSliceSizes, lhsEltType);
35453544
auto resTypeAfterFlattening =

mlir/lib/Dialect/NVGPU/TransformOps/NVGPUTransformOps.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -740,9 +740,9 @@ static std::tuple<SmallVector<int64_t>, SmallVector<int64_t>,
740740
SmallVector<int64_t>>
741741
makeVectorShapes(ArrayRef<int64_t> lhs, ArrayRef<int64_t> rhs,
742742
ArrayRef<int64_t> res) {
743-
SmallVector<int64_t> vlhs{lhs};
744-
SmallVector<int64_t> vrhs{rhs};
745-
SmallVector<int64_t> vres{res};
743+
SmallVector<int64_t> vlhs(lhs);
744+
SmallVector<int64_t> vrhs(rhs);
745+
SmallVector<int64_t> vres(res);
746746
return std::make_tuple(vlhs, vrhs, vres);
747747
}
748748

mlir/lib/Dialect/Vector/Transforms/VectorDropLeadUnitDim.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ struct CastAwayConstantMaskLeadingOneDim
557557
int64_t flatLeadingSize =
558558
std::accumulate(dimSizes.begin(), dimSizes.begin() + dropDim + 1,
559559
static_cast<int64_t>(1), std::multiplies<int64_t>());
560-
SmallVector<int64_t> newDimSizes({flatLeadingSize});
560+
SmallVector<int64_t> newDimSizes = {flatLeadingSize};
561561
newDimSizes.append(dimSizes.begin() + dropDim + 1, dimSizes.end());
562562

563563
auto newMask = rewriter.create<vector::ConstantMaskOp>(

mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -930,8 +930,8 @@ struct BreakDownVectorBitCast : public OpRewritePattern<vector::BitCastOp> {
930930
loc, elemType, rewriter.getZeroAttr(elemType));
931931
Value res = rewriter.create<SplatOp>(loc, castDstType, zero);
932932

933-
SmallVector<int64_t> sliceShape{castDstLastDim};
934-
SmallVector<int64_t> strides{1};
933+
SmallVector<int64_t> sliceShape = {castDstLastDim};
934+
SmallVector<int64_t> strides = {1};
935935
VectorType newCastDstType =
936936
VectorType::get(SmallVector<int64_t>{castDstLastDim / shrinkRatio},
937937
castDstType.getElementType());

mlir/lib/Dialect/X86Vector/Transforms/AVXTranspose.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,8 +66,8 @@ Value mlir::x86vector::avx2::intrin::mm256ShufflePs(ImplicitLocOpBuilder &b,
6666
uint8_t mask) {
6767
uint8_t b01, b23, b45, b67;
6868
MaskHelper::extractShuffle(mask, b01, b23, b45, b67);
69-
SmallVector<int64_t> shuffleMask{b01, b23, b45 + 8, b67 + 8,
70-
b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
69+
SmallVector<int64_t> shuffleMask = {
70+
b01, b23, b45 + 8, b67 + 8, b01 + 4, b23 + 4, b45 + 8 + 4, b67 + 8 + 4};
7171
return b.create<vector::ShuffleOp>(v1, v2, shuffleMask);
7272
}
7373

0 commit comments

Comments
 (0)