Skip to content

Commit d871dae

Browse files
[mlir][TilingInterface] Add scf::tileUsingSCFForallOp method to tile using the interface to generate scf::forall. (#67083)
Similar to `scf::tileUsingSCFForOp` that is a method that tiles operations that implement the `TilingInterface`, using `scf.for` operations, this method introduces tiling of operations using `scf.forall`. Most of this implementation is derived from `linalg::tileToForallOp` method. Eventually that method will either be deprecated or moved to use the method introduced here.
1 parent af3ead4 commit d871dae

File tree

4 files changed

+398
-5
lines changed

4 files changed

+398
-5
lines changed

mlir/include/mlir/Dialect/SCF/Transforms/TileUsingInterface.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,17 @@ struct SCFTilingOptions {
5151
interchangeVector = llvm::to_vector(interchange);
5252
return *this;
5353
}
54+
55+
/// Specify mapping of loops to devices. This is only respected when the loop
56+
/// constructs support such a mapping (like `scf.forall`). Will be ignored
57+
/// when using loop constructs that dont support such a mapping (like
58+
/// `scf.for`)
59+
SmallVector<Attribute> mappingVector = {};
60+
SCFTilingOptions &setMapping(ArrayRef<DeviceMappingAttrInterface> mapping) {
61+
mappingVector = llvm::map_to_vector(
62+
mapping, [](auto attr) -> Attribute { return attr; });
63+
return *this;
64+
}
5465
};
5566

5667
/// Transformation information returned after tiling.
@@ -82,6 +93,12 @@ struct SCFTileAndFuseOptions {
8293
}
8394
};
8495

96+
/// Method to tile an op that implements the `TilingInterface` using
97+
/// `scf.forall`.
98+
FailureOr<SCFTilingResult>
99+
tileUsingSCFForallOp(RewriterBase &rewriter, TilingInterface op,
100+
const SCFTilingOptions &options);
101+
85102
/// Fuse the producer of the source of `candidateSliceOp` by computing the
86103
/// required slice of the producer in-place. Note that the method
87104
/// replaces the uses of `candidateSliceOp` with the tiled and fused producer

mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp

Lines changed: 127 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,10 +101,10 @@ static bool tileDividesIterationDomain(Range loopRange) {
101101
/// `tileSize`, i.e., `min(tileSize, range.end() - iv)`.
102102
static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
103103
Range loopRange, Value iv,
104-
Value tileSize) {
104+
OpFoldResult tileSize) {
105105
std::optional<int64_t> ts = getConstantIntValue(tileSize);
106106
if (ts && ts.value() == 1)
107-
return getAsOpFoldResult(tileSize);
107+
return tileSize;
108108

109109
if (tileDividesIterationDomain(
110110
Range{loopRange.offset, loopRange.size, tileSize}))
@@ -122,6 +122,19 @@ static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
122122
b, loc, minMap, SmallVector<OpFoldResult>{iv, tileSize, size});
123123
}
124124

125+
/// Clones the operation and updates the destination if the operation
126+
/// implements the `DestinationStyleOpInterface`.
127+
static Operation *cloneOpAndUpdateDestinationArgs(RewriterBase &rewriter,
128+
Operation *op,
129+
ValueRange newDestArgs) {
130+
Operation *clonedOp = rewriter.clone(*op);
131+
if (auto destinationStyleOp =
132+
dyn_cast<DestinationStyleOpInterface>(clonedOp)) {
133+
destinationStyleOp.getDpsInitsMutable().assign(newDestArgs);
134+
}
135+
return clonedOp;
136+
}
137+
125138
/// Generate an empty loop nest that represents the tiled loop nest shell.
126139
/// - `loopRanges` specifies the lb, ub and step of the untiled iteration space.
127140
/// - `tileSizes` is the tile sizes to use. Zero represent untiled loops.
@@ -728,6 +741,118 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
728741
getAsOperations(forLoops), replacements};
729742
}
730743

744+
//===----------------------------------------------------------------------===//
745+
// tileUsingSCFForAllOp implementation.
746+
//===----------------------------------------------------------------------===//
747+
748+
FailureOr<scf::SCFTilingResult>
749+
mlir::scf::tileUsingSCFForallOp(RewriterBase &rewriter, TilingInterface op,
750+
const scf::SCFTilingOptions &options) {
751+
Location loc = op->getLoc();
752+
OpBuilder::InsertionGuard g(rewriter);
753+
754+
// 1. Get the range of loops that are represented by the operation.
755+
SmallVector<Range> loopRanges = op.getIterationDomain(rewriter);
756+
if (loopRanges.empty())
757+
return op->emitOpError("expected non-empty loop ranges");
758+
auto hasStrideOne = [](Range r) { return !isConstantIntValue(r.stride, 1); };
759+
if (llvm::any_of(loopRanges, hasStrideOne))
760+
return op->emitOpError("only stride-1 supported atm");
761+
762+
// 2. Get the tile sizes. If tile size is 0, it is not tiled and distributed.
763+
// To make it easier, pad the tile sizes to loopRanges.size with value 0.
764+
SmallVector<OpFoldResult> tileSizeVector =
765+
options.tileSizeComputationFunction(rewriter, op);
766+
tileSizeVector.resize(loopRanges.size(), rewriter.getIndexAttr(0));
767+
768+
// 3. Build the offsets, sizes and steps for the tile and distributed loops.
769+
SmallVector<OpFoldResult> lbs, ubs, steps;
770+
for (auto [tileSize, loopRange] : llvm::zip(tileSizeVector, loopRanges)) {
771+
if (isConstantIntValue(tileSize, 0))
772+
continue;
773+
lbs.push_back(loopRange.offset);
774+
ubs.push_back(loopRange.size);
775+
steps.push_back(tileSize);
776+
}
777+
778+
// 4. Gather destination tensors.
779+
SmallVector<Value> dest;
780+
if (failed(tensor::getOrCreateDestinations(rewriter, loc, op, dest)))
781+
return op->emitOpError("failed to get destination tensors");
782+
783+
// 5. Build the device mapping attribute.
784+
std::optional<ArrayAttr> mappingAttr;
785+
if (!options.mappingVector.empty()) {
786+
mappingAttr = rewriter.getArrayAttr(ArrayRef(options.mappingVector));
787+
}
788+
789+
// 6. Create the ForallOp. We don't use the lambda body-builder
790+
// version because we require the use of RewriterBase in the body, so we
791+
// manually move the insertion point to the body below.
792+
auto forallOp =
793+
rewriter.create<scf::ForallOp>(loc, lbs, ubs, steps, dest, mappingAttr);
794+
795+
// 7. Get the tile offset and sizes.
796+
rewriter.setInsertionPoint(forallOp.getTerminator());
797+
SmallVector<OpFoldResult> tiledOffsets, tiledSizes;
798+
ValueRange ivs = forallOp.getInductionVars();
799+
{
800+
int materializedLoopNum = 0;
801+
for (auto [tileSize, loopRange] : llvm::zip(tileSizeVector, loopRanges)) {
802+
if (isConstantIntValue(tileSize, 0)) {
803+
tiledOffsets.push_back(loopRange.offset);
804+
tiledSizes.push_back(loopRange.size);
805+
continue;
806+
}
807+
Value iv = ivs[materializedLoopNum++];
808+
tiledOffsets.push_back(iv);
809+
tiledSizes.push_back(
810+
getBoundedTileSize(rewriter, loc, loopRange, iv, tileSize));
811+
}
812+
}
813+
814+
// 8. Tile the operation. Clone the operation to allow fix up of destination
815+
// operands.
816+
ArrayRef<BlockArgument> destBbArgs = forallOp.getOutputBlockArguments();
817+
Operation *clonedOp =
818+
cloneOpAndUpdateDestinationArgs(rewriter, op, destBbArgs);
819+
FailureOr<TilingResult> tilingResult =
820+
cast<TilingInterface>(clonedOp).getTiledImplementation(
821+
rewriter, tiledOffsets, tiledSizes);
822+
if (failed(tilingResult))
823+
return clonedOp->emitError("failed to tile op: ");
824+
rewriter.eraseOp(clonedOp);
825+
826+
// 9. Parallel insert back into the result tensor.
827+
for (auto [index, tiledValue, destBBArg] :
828+
llvm::enumerate(tilingResult->tiledValues, destBbArgs)) {
829+
// 9.a. Partial subset information is inserted just before the terminator.
830+
rewriter.setInsertionPoint(forallOp.getTerminator());
831+
832+
SmallVector<OpFoldResult> resultOffsets, resultSizes;
833+
if (failed(op.getResultTilePosition(rewriter, index, tiledOffsets,
834+
tiledSizes, resultOffsets,
835+
resultSizes))) {
836+
return op->emitOpError("output offsets couldn't be calculated");
837+
}
838+
839+
SmallVector<OpFoldResult> strides(resultSizes.size(),
840+
rewriter.getIndexAttr(1));
841+
// 9.b. Parallel insertions are inserted at the end of the combining
842+
// terminator.
843+
rewriter.setInsertionPointToEnd(forallOp.getTerminator().getBody());
844+
rewriter.create<tensor::ParallelInsertSliceOp>(
845+
loc, tiledValue, destBBArg, resultOffsets, resultSizes, strides);
846+
}
847+
848+
// 10. Return the tiling result.
849+
return scf::SCFTilingResult{
850+
tilingResult->tiledOps,
851+
{forallOp.getOperation()},
852+
llvm::map_to_vector(forallOp.getResults(),
853+
[](auto val) -> Value { return val; })};
854+
}
855+
731856
//===----------------------------------------------------------------------===//
732857
// lowerToLoopsUsingSCFForOp implementation.
733858
//===----------------------------------------------------------------------===//
Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
// RUN: mlir-opt -test-tiling-interface=tile-using-scf-forall -split-input-file %s | FileCheck %s
2+
3+
func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>,
4+
%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> {
5+
%0 = linalg.matmul {__internal_transform__ = "simple_gemm"}
6+
ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>)
7+
outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32>
8+
return %0 : tensor<?x?xf32>
9+
}
10+
// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0] -> (10, -d0 + s0)>
11+
// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (20, -d0 + s0)>
12+
// CHECK: func.func @simple_matmul(
13+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32>
14+
// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32>
15+
// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32>
16+
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
17+
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
18+
// CHECK-DAG: %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]]
19+
// CHECK-DAG: %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]]
20+
// CHECK-DAG: %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]]
21+
// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) =
22+
// CHECK-SAME: (0, 0) to (%[[M]], %[[N]]) step (10, 20) shared_outs(%[[INIT:.+]] = %[[ARG2]])
23+
// CHECK: %[[TS_Y:.+]] = affine.min #[[MAP0]](%[[IV0]])[%[[M]]]
24+
// CHECK: %[[TS_X:.+]] = affine.min #[[MAP1]](%[[IV1]])[%[[N]]]
25+
// CHECK: %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]]
26+
// CHECK-SAME: [%[[IV0]], 0] [%[[TS_Y]], %[[K]]] [1, 1]
27+
// CHECK: %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]]
28+
// CHECK-SAME: [0, %[[IV1]]] [%[[K]], %[[TS_X]]] [1, 1]
29+
// CHECK: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT]]
30+
// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1]
31+
// CHECK: %[[GEMM_TILE:.+]] = linalg.matmul
32+
// CHECK-SAME: ins(%[[LHS_TILE]], %[[RHS_TILE]] :
33+
// CHECK-SAME: outs(%[[INIT_TILE]] :
34+
// CHECK: scf.forall.in_parallel {
35+
// CHECK: tensor.parallel_insert_slice %[[GEMM_TILE]] into %[[INIT]]
36+
// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1]
37+
// CHECK: mapping = [#gpu.block<y>, #gpu.block<x>]
38+
// CHECK: return %[[RESULT]]
39+
40+
// -----
41+
42+
#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
43+
#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)>
44+
#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)>
45+
func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
46+
%init0 = tensor.empty() : tensor<128x300x200xf32>
47+
%init1 = tensor.empty() : tensor<300x128x200xf32>
48+
%0:2 = linalg.generic {
49+
indexing_maps = [#map0, #map1, #map2],
50+
iterator_types = ["parallel", "parallel", "parallel"]}
51+
{__internal_transform__ = "parallel_generic_transpose"}
52+
ins(%arg0 : tensor<128x200x300xf32>)
53+
outs(%init0, %init1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>) {
54+
^bb0(%b0 : f32, %b1 : f32, %b2 : f32):
55+
linalg.yield %b0, %b0 : f32, f32
56+
} -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>)
57+
return %0#0, %0#1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>
58+
}
59+
// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0) -> (10, -d0 + 128)>
60+
// CHECK-LABEL: func.func @multi_result(
61+
// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<128x200x300xf32>)
62+
// CHECK-DAG: %[[INIT0:.+]] = tensor.empty()
63+
// CHECK-DAG: %[[INIT1:.+]] = tensor.empty()
64+
// CHECK: %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = (0, 0) to (128, 300) step (10, 20)
65+
// CHECK-SAME: shared_outs(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]])
66+
// CHECK: %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]])
67+
// CHECK: %[[ARG_TILE:.+]] = tensor.extract_slice %[[ARG0]]
68+
// CHECK-SAME: [%[[IV0]], 0, %[[IV1]]] [%[[TS_Y]], 200, 20] [1, 1, 1]
69+
// CHECK-DAG: %[[INIT0_TILE:.+]] = tensor.extract_slice %[[ARG1]]
70+
// CHECK-SAME: [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
71+
// CHECK-DAG: %[[INIT1_TILE:.+]] = tensor.extract_slice %[[ARG2]]
72+
// CHECK-SAME: [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
73+
// CHECK: %[[RESULT_TILE:.+]]:2 = linalg.generic
74+
// CHECK-SAME: ins(%[[ARG_TILE]] :
75+
// CHECK-SAME: outs(%[[INIT0_TILE]], %[[INIT1_TILE]] :
76+
// CHECK: scf.forall.in_parallel {
77+
// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#0 into %[[ARG1]][%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1]
78+
// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#1 into %[[ARG2]][%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1]
79+
// CHECK: }
80+
// CHECK: return %[[OUTER]]#0, %[[OUTER]]#1
81+
82+
// -----
83+
84+
func.func @conv2D(%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>,
85+
%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> {
86+
%0 = linalg.conv_2d_nhwc_hwcf {
87+
strides = dense<[2, 3]> : tensor<2xi64>,
88+
dilation = dense<[4, 5]> : tensor<2xi64>,
89+
__internal_transform__ = "simple_conv"}
90+
ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>)
91+
outs(%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32>
92+
return %0 : tensor<?x?x?x?xf32>
93+
}
94+
// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (10, -d0 + s0)>
95+
// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (20, -d0 + s0)>
96+
// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (30, -d0 + s0)>
97+
// CHECK-DAG: #[[$MAP3:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 2 - 2)>
98+
// CHECK-DAG: #[[$MAP4:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 3 - 3)>
99+
// CHECK-LABEL: func.func @conv2D(
100+
// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
101+
// CHECK-SAME: %[[FILTER:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
102+
// CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32>
103+
// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index
104+
// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index
105+
// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index
106+
// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index
107+
// CHECK-DAG: %[[N:.+]] = tensor.dim %[[INPUT]], %[[C0]]
108+
// CHECK-DAG: %[[C:.+]] = tensor.dim %[[INPUT]], %[[C3]]
109+
// CHECK-DAG: %[[P:.+]] = tensor.dim %[[FILTER]], %[[C0]]
110+
// CHECK-DAG: %[[Q:.+]] = tensor.dim %[[FILTER]], %[[C1]]
111+
// CHECK-DAG: %[[F:.+]] = tensor.dim %[[FILTER]], %[[C3]]
112+
// CHECK-DAG: %[[R:.+]] = tensor.dim %[[INIT]], %[[C1]]
113+
// CHECK-DAG: %[[S:.+]] = tensor.dim %[[INIT]], %[[C2]]
114+
// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]], %[[IV2:[a-zA-Z0-9]+]]) =
115+
// CHECK-SAME: (0, 0, 0) to (%[[P]], %[[Q]], %[[C]]) step (10, 20, 30) shared_outs(%[[INIT0:.+]] = %[[INIT]])
116+
// CHECK-DAG: %[[TS_P:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[P]]]
117+
// CHECK-DAG: %[[TS_Q:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[Q]]]
118+
// CHECK-DAG: %[[TS_C:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[C]]]
119+
// CHECK-DAG: %[[TS_H:.+]] = affine.apply #[[$MAP3]](%[[TS_P]])[%[[R]]]
120+
// CHECK-DAG: %[[TS_W:.+]] = affine.apply #[[$MAP4]](%[[TS_Q]])[%[[S]]]
121+
// CHECK-DAG: %[[INPUT_TILE:.+]] = tensor.extract_slice %[[INPUT]]
122+
// CHECK-SAME: [0, %[[IV0]], %[[IV1]], %[[IV2]]] [%[[N]], %[[TS_H]], %[[TS_W]], %[[TS_C]]]
123+
// CHECK-DAG: %[[FILTER_TILE:.+]] = tensor.extract_slice %[[FILTER]]
124+
// CHECK-SAME: [%[[IV0]], %[[IV1]], %[[IV2]], 0] [%[[TS_P]], %[[TS_Q]], %[[TS_C]], %[[F]]]
125+
// CHECK-DAG: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT0]]
126+
// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]]
127+
// CHECK: %[[CONV_TILE:.+]] = linalg.conv_2d_nhwc_hwcf
128+
// CHECK-SAME: dilation = dense<[4, 5]> : tensor<2xi64>, strides = dense<[2, 3]> : tensor<2xi64>
129+
// CHECK-SAME: ins(%[[INPUT_TILE]], %[[FILTER_TILE]] :
130+
// CHECK-SAME: outs(%[[INIT_TILE]] :
131+
// CHECK: scf.forall.in_parallel
132+
// CHECK: tensor.parallel_insert_slice %[[CONV_TILE]] into %[[INIT0]]
133+
// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] [1, 1, 1, 1]
134+
// CHECK: return %[[RESULT]]
135+
136+
// -----
137+
138+
// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0, d1) -> (d0 + d1)>
139+
140+
func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> {
141+
// Check that we correctly amend "linalg.index" results.
142+
143+
%0 = linalg.generic {
144+
indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>,
145+
affine_map<(d0, d1) -> (d0, d1)>],
146+
iterator_types = ["parallel", "parallel"]}
147+
{__internal_transform__ = "indexed_semantics"}
148+
ins(%arg0: tensor<?x?xf32>)
149+
outs(%arg1: tensor<?x?xf32>) {
150+
^bb0(%arg2: f32, %arg3: f32):
151+
%1 = linalg.index 0 : index
152+
%2 = linalg.index 1 : index
153+
%3 = arith.addi %1, %2 : index
154+
%4 = arith.index_cast %3 : index to i64
155+
%5 = arith.uitofp %4 : i64 to f32
156+
%6 = arith.addf %5, %arg2 : f32
157+
linalg.yield %6 : f32
158+
} -> (tensor<?x?xf32>)
159+
return %0 : tensor<?x?xf32>
160+
}
161+
// CHECK-LABEL: @indexed_semantics
162+
// CHECK: scf.forall (%[[I0:.+]], %[[I1:.+]]) =
163+
// CHECK: %[[INDEX0:.+]] = linalg.index 0
164+
// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX0]], %[[I0]])
165+
// CHECK: %[[INDEX1:.+]] = linalg.index 1
166+
// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX1]], %[[I1]])
167+
// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]]

0 commit comments

Comments
 (0)