Skip to content

Commit 1f5335c

Browse files
Make index computation used divsi/remsi (#124390)
The index computation is meant to be signed. Using unsigned could lead to subtle errors. Fix places where some index math was using unsigned operations. Signed-off-by: MaheshRavishankar <[email protected]>
1 parent 5c5bbff commit 1f5335c

File tree

8 files changed

+74
-73
lines changed

8 files changed

+74
-73
lines changed

mlir/lib/Dialect/Arith/Utils/Utils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ mlir::inferExpandShapeOutputShape(OpBuilder &b, Location loc,
6969
Value indexGroupSize = cast<Value>(inputShape[inputIndex]);
7070
Value indexGroupStaticSizesProduct =
7171
b.create<arith::ConstantIndexOp>(loc, indexGroupStaticSizesProductInt);
72-
Value dynamicDimSize = b.createOrFold<arith::DivUIOp>(
72+
Value dynamicDimSize = b.createOrFold<arith::DivSIOp>(
7373
loc, indexGroupSize, indexGroupStaticSizesProduct);
7474
outputShapeValues.push_back(dynamicDimSize);
7575
}

mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#include "mlir/Dialect/Linalg/Passes.h"
1414

1515
#include "mlir/Dialect/Affine/IR/AffineOps.h"
16+
#include "mlir/Dialect/Arith/IR/Arith.h"
1617
#include "mlir/Dialect/Arith/Utils/Utils.h"
1718
#include "mlir/Dialect/Linalg/IR/Linalg.h"
1819
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
@@ -1572,9 +1573,9 @@ void generateCollapsedIndexingRegion(Location loc, Block *block,
15721573
rewriter.create<linalg::IndexOp>(loc, foldedDims.index());
15731574
for (auto dim : llvm::reverse(foldedDimsRef.drop_front())) {
15741575
indexReplacementVals[dim] =
1575-
rewriter.create<arith::RemUIOp>(loc, newIndexVal, loopRange[dim]);
1576+
rewriter.create<arith::RemSIOp>(loc, newIndexVal, loopRange[dim]);
15761577
newIndexVal =
1577-
rewriter.create<arith::DivUIOp>(loc, newIndexVal, loopRange[dim]);
1578+
rewriter.create<arith::DivSIOp>(loc, newIndexVal, loopRange[dim]);
15781579
}
15791580
indexReplacementVals[foldedDims.value().front()] = newIndexVal;
15801581
}

mlir/test/Conversion/TosaToTensor/tosa-to-tensor.mlir

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor<f32
8686
// CHECK: %[[C0:.*]] = arith.constant 0 : index
8787
// CHECK: %[[DIM:.*]] = tensor.dim %arg0, %[[C0]] : tensor<?xf32>
8888
// CHECK: %[[C2:.*]] = arith.constant 2 : index
89-
// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C2]] : index
89+
// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
9090
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] output_shape [2, %[[VAL_0]]] : tensor<?xf32> into tensor<2x?xf32>
9191
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
9292
func.func @test_reshape_1d_up_d2d_auto(%arg0: tensor<?xf32>) -> tensor<2x?xf32> {
@@ -135,7 +135,7 @@ func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6
135135
// CHECK: %[[C0:.*]] = arith.constant 0 : index
136136
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
137137
// CHECK: %[[C2:.*]] = arith.constant 2 : index
138-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C2]] : index
138+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
139139
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] output_shape [2, %[[DIV]]] : tensor<?xf32> into tensor<2x?xf32>
140140
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
141141
func.func @test_reshape_2d_same_d2d_auto(%arg0: tensor<?x2xf32>) -> tensor<2x?xf32> {
@@ -189,7 +189,7 @@ func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2
189189
// CHECK: %[[C0:.*]] = arith.constant 0 : index
190190
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
191191
// CHECK: %[[C0_0:.*]] = arith.constant 0 : index
192-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C0_0]] : index
192+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C0_0]] : index
193193
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [0, 3, %[[DIV]]] : tensor<?xf32> into tensor<0x3x?xf32>
194194
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor<?x?x?xf32>
195195
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -206,7 +206,7 @@ func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tens
206206
// CHECK: %[[C0:.*]] = arith.constant 0 : index
207207
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
208208
// CHECK: %[[C8:.*]] = arith.constant 8 : index
209-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
209+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
210210
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
211211
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor<?x?x?xf32>
212212
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -223,7 +223,7 @@ func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor<?x?
223223
// CHECK: %[[C0:.*]] = arith.constant 0 : index
224224
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
225225
// CHECK: %[[C6:.*]] = arith.constant 6 : index
226-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
226+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
227227
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, 3, %[[DIV]]] : tensor<?xf32> into tensor<2x3x?xf32>
228228
// CHECK: return %[[VAL_1]] : tensor<2x3x?xf32>
229229
func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> tensor<2x3x?xf32> {
@@ -239,7 +239,7 @@ func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> t
239239
// CHECK: %[[C0:.*]] = arith.constant 0 : index
240240
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
241241
// CHECK: %[[C6:.*]] = arith.constant 6 : index
242-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
242+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
243243
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 2] : tensor<?xf32> into tensor<?x3x2xf32>
244244
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2xf32> to tensor<?x?x?xf32>
245245
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -256,7 +256,7 @@ func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) ->
256256
// CHECK: %[[C0:.*]] = arith.constant 0 : index
257257
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
258258
// CHECK: %[[C12:.*]] = arith.constant 12 : index
259-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
259+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
260260
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
261261
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<?x?x?xf32>
262262
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -284,7 +284,7 @@ func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor<?x3x4xf32>)
284284
// CHECK: %[[C0:.*]] = arith.constant 0 : index
285285
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
286286
// CHECK: %[[C8:.*]] = arith.constant 8 : index
287-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
287+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
288288
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
289289
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<2x?x4xf32> to tensor<2x3x4xf32>
290290
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -301,7 +301,7 @@ func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor<?x?x?xf32>) -> tensor<2x3
301301
// CHECK: %[[C0:.*]] = arith.constant 0 : index
302302
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
303303
// CHECK: %[[C12:.*]] = arith.constant 12 : index
304-
// CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
304+
// CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
305305
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
306306
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<2x3x4xf32>
307307
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -328,7 +328,7 @@ func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>)
328328
// CHECK: %[[C0:.*]] = arith.constant 0 : index
329329
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
330330
// CHECK: %[[C6:.*]] = arith.constant 6 : index
331-
// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
331+
// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
332332
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2, 3]] output_shape [%[[VAL_0]], 3, 2, 1] : tensor<?xf32> into tensor<?x3x2x1xf32>
333333
// CHECK: %[[CAST:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2x1xf32> to tensor<1x3x2x1xf32>
334334
// CHECK: return %[[CAST]] : tensor<1x3x2x1xf32>
@@ -357,7 +357,7 @@ func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor<?x?x?x?xf32>) -> tens
357357
// CHECK: %[[C0:.*]] = arith.constant 0 : index
358358
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
359359
// CHECK: %[[C6:.*]] = arith.constant 6 : index
360-
// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
360+
// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
361361
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 2, 3] : tensor<?xf32> into tensor<?x2x3xf32>
362362
// CHECK: return %[[EXPANDED]] : tensor<?x2x3xf32>
363363
func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor<?x2x3xf32> {
@@ -373,7 +373,7 @@ func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor
373373
// CHECK: %[[C0:.*]] = arith.constant 0 : index
374374
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
375375
// CHECK: %[[C385:.*]] = arith.constant 385 : index
376-
// CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C385]] : index
376+
// CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C385]] : index
377377
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 5, 77] : tensor<?xf32> into tensor<?x5x77xf32>
378378
// CHECK: return %[[EXPANDED]] : tensor<?x5x77xf32>
379379
func.func @test_reshape_6d_down_d2d_auto(%arg0: tensor<1x2x?x5x7x11xf32>) -> tensor<?x5x77xf32> {

mlir/test/Dialect/Linalg/data-layout-propagation.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1301,7 +1301,7 @@ func.func @push_down_unpack_through_expand(%5: tensor<?x32x8x8xf32>, %dim: index
13011301
// CHECK: %[[C32:.+]] = arith.constant 32 : index
13021302
// CHECK: %[[C0:.+]] = arith.constant 0 : index
13031303
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8x8xf32>
1304-
// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C32]] : index
1304+
// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C32]] : index
13051305
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape [%[[SZ0]], 32, 32, 8, 8] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
13061306
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
13071307
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
@@ -1322,7 +1322,7 @@ func.func @push_down_unpack_through_expand_empty_outer_dims_perm(%5: tensor<?x32
13221322
// CHECK: %[[C32:.+]] = arith.constant 32 : index
13231323
// CHECK: %[[C0:.+]] = arith.constant 0 : index
13241324
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8x8xf32>
1325-
// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C32]] : index
1325+
// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C32]] : index
13261326
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3], [4]] output_shape [%[[SZ0]], 32, 32, 8, 8] : tensor<?x32x8x8xf32> into tensor<?x32x32x8x8xf32>
13271327
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x32x32x8x8xf32>
13281328
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>
@@ -1373,7 +1373,7 @@ func.func @push_down_unpack_through_expand_on_outer_dims(%5: tensor<?x32x8xf32>,
13731373
// CHECK: %[[C256:.+]] = arith.constant 256 : index
13741374
// CHECK: %[[C0:.+]] = arith.constant 0 : index
13751375
// CHECK: %[[DIM0:.+]] = tensor.dim %[[ARG0]], %[[C0]] : tensor<?x32x8xf32>
1376-
// CHECK: %[[SZ0:.+]] = arith.divui %[[DIM0]], %[[C256]] : index
1376+
// CHECK: %[[SZ0:.+]] = arith.divsi %[[DIM0]], %[[C256]] : index
13771377
// CHECK: %[[EXPANDED:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]] output_shape [%[[SZ0]], 256, 32, 8] : tensor<?x32x8xf32> into tensor<?x256x32x8xf32>
13781378
// CHECK: %[[DIM:.+]] = tensor.dim %[[EXPANDED]], %[[C0]] : tensor<?x256x32x8xf32>
13791379
// CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM]]) : tensor<?x256x256xf32>

mlir/test/Dialect/Linalg/fuse-with-reshape-by-collapsing.mlir

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -99,14 +99,14 @@ func.func @fuse_by_collapsing_indexing_op(%arg0 : tensor<2x12x5x336x9xi32>,
9999
// CHECK-DAG: %[[C7:.+]] = arith.constant 7 : index
100100
// CHECK: %[[IV0:.+]] = linalg.index 0
101101
// CHECK: %[[IV1:.+]] = linalg.index 1
102-
// CHECK: %[[REM_IV1:.+]] = arith.remui %[[IV1]], %[[C4]]
103-
// CHECK: %[[DIV_IV1:.+]] = arith.divui %[[IV1]], %[[C4]]
102+
// CHECK: %[[REM_IV1:.+]] = arith.remsi %[[IV1]], %[[C4]]
103+
// CHECK: %[[DIV_IV1:.+]] = arith.divsi %[[IV1]], %[[C4]]
104104
// CHECK: %[[IV2:.+]] = linalg.index 2
105105
// CHECK: %[[IV3:.+]] = linalg.index 3
106-
// CHECK: %[[REM1_IV3:.+]] = arith.remui %[[IV3]], %[[C8]]
107-
// CHECK: %[[DIV1_IV3:.+]] = arith.divui %[[IV3]], %[[C8]]
108-
// CHECK: %[[REM2_IV3:.+]] = arith.remui %[[DIV1_IV3]], %[[C7]]
109-
// CHECK: %[[DIV2_IV3:.+]] = arith.divui %[[DIV1_IV3]], %[[C7]]
106+
// CHECK: %[[REM1_IV3:.+]] = arith.remsi %[[IV3]], %[[C8]]
107+
// CHECK: %[[DIV1_IV3:.+]] = arith.divsi %[[IV3]], %[[C8]]
108+
// CHECK: %[[REM2_IV3:.+]] = arith.remsi %[[DIV1_IV3]], %[[C7]]
109+
// CHECK: %[[DIV2_IV3:.+]] = arith.divsi %[[DIV1_IV3]], %[[C7]]
110110
// CHECK: %[[IV4:.+]] = linalg.index 4
111111
// CHECK: %[[T0:.+]] = arith.addi %[[IV0]], %[[DIV_IV1]]
112112
// CHECK: %[[T1:.+]] = arith.addi %[[T0]], %[[REM_IV1]]
@@ -215,13 +215,13 @@ func.func @fuse_by_collapsing_dynamic(%arg0 : tensor<?x?x?x?x?xi32>,
215215
// CHECK-DAG: %[[D1:.+]] = tensor.dim %[[EXPAND]], %[[C5]]
216216
// CHECK: linalg.generic
217217
// CHECK: %[[IV0:.+]] = linalg.index 1
218-
// CHECK: %[[REM1_IV0:.+]] = arith.remui %[[IV0]], %[[C5]]
219-
// CHECK: %[[DIV1_IV0:.+]] = arith.divui %[[IV0]], %[[C5]]
220-
// CHECK: %[[REM2_IV0:.+]] = arith.remui %[[DIV1_IV0]], %[[D1]]
221-
// CHECK: %[[DIV2_IV0:.+]] = arith.divui %[[DIV1_IV0]], %[[D1]]
218+
// CHECK: %[[REM1_IV0:.+]] = arith.remsi %[[IV0]], %[[C5]]
219+
// CHECK: %[[DIV1_IV0:.+]] = arith.divsi %[[IV0]], %[[C5]]
220+
// CHECK: %[[REM2_IV0:.+]] = arith.remsi %[[DIV1_IV0]], %[[D1]]
221+
// CHECK: %[[DIV2_IV0:.+]] = arith.divsi %[[DIV1_IV0]], %[[D1]]
222222
// CHECK: %[[IV1:.+]] = linalg.index 3
223-
// CHECK: %[[REM1_IV1:.+]] = arith.remui %[[IV1]], %[[D0]]
224-
// CHECK: %[[DIV1_IV1:.+]] = arith.divui %[[IV1]], %[[D0]]
223+
// CHECK: %[[REM1_IV1:.+]] = arith.remsi %[[IV1]], %[[D0]]
224+
// CHECK: %[[DIV1_IV1:.+]] = arith.divsi %[[IV1]], %[[D0]]
225225

226226
// -----
227227

@@ -439,7 +439,7 @@ func.func @fuse_only_one_reassociation(%arg0 : tensor<?x?xf32>, %arg1 : tensor<4
439439
// CHECK-SAME: outs(%[[COLLAPSE_ARG1_1]] :
440440
// CHECK: %[[DIM:.+]] = tensor.dim %[[GENERIC]], %[[C1]] : tensor<4x?x?xf32>
441441
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[GENERIC]], %[[C2]] : tensor<4x?x?xf32>
442-
// CHECK: %[[VAL_1:.+]] = arith.divui %[[DIM_2]], %[[C8]] : index
442+
// CHECK: %[[VAL_1:.+]] = arith.divsi %[[DIM_2]], %[[C8]] : index
443443
// CHECK: %[[EXPANDED_3:.+]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0], [1], [2, 3]] output_shape [4, %[[DIM]], %[[VAL_1]], 8] : tensor<4x?x?xf32> into tensor<4x?x?x8xf32>
444444
// CHECK: return %[[EXPANDED_3]]
445445

@@ -492,20 +492,20 @@ func.func @fold_non_consecutive_dims(%arg0 : tensor<?x?xi32>, %sz0: index, %sz1:
492492
// CHECK-SAME: outs(%[[COLLAPSE_INIT]] :
493493
// CHECK-NEXT: ^bb{{[0-9]}}
494494
// CHECK: %[[ID0:.+]] = linalg.index 0
495-
// CHECK-DAG: %[[T0:.+]] = arith.remui %[[ID0]], %[[C4]]
496-
// CHECK-DAG: %[[T1:.+]] = arith.divui %[[ID0]], %[[C4]]
495+
// CHECK-DAG: %[[T0:.+]] = arith.remsi %[[ID0]], %[[C4]]
496+
// CHECK-DAG: %[[T1:.+]] = arith.divsi %[[ID0]], %[[C4]]
497497
// CHECK: %[[ID1:.+]] = linalg.index 1
498-
// CHECK-DAG: %[[T2:.+]] = arith.remui %[[ID1]], %[[C8]]
499-
// CHECK-DAG: %[[T3:.+]] = arith.divui %[[ID1]], %[[C8]]
498+
// CHECK-DAG: %[[T2:.+]] = arith.remsi %[[ID1]], %[[C8]]
499+
// CHECK-DAG: %[[T3:.+]] = arith.divsi %[[ID1]], %[[C8]]
500500
// CHECK-DAG: %[[T4:.+]] = arith.addi %[[T1]], %[[T2]]
501501
// CHECK-DAG: %[[T5:.+]] = arith.addi %[[T4]], %[[T0]]
502502
// CHECK-DAG: %[[T6:.+]] = arith.addi %[[T5]], %[[T3]]
503503
// CHECK-DAG: %[[T7:.+]] = arith.index_cast %[[T6]]
504504
// CHECK: linalg.yield %[[T7]]
505505
// CHECK: %[[DIM_1:.+]] = tensor.dim %[[GENERIC]], %[[C0]] : tensor<?x?xi32>
506506
// CHECK: %[[DIM_2:.+]] = tensor.dim %[[GENERIC]], %[[C1]] : tensor<?x?xi32>
507-
// CHECK: %[[VAL_2:.+]] = arith.divui %[[DIM_1]], %[[C8]] : index
508-
// CHECK: %[[VAL_3:.+]] = arith.divui %[[DIM_2]], %[[C4]] : index
507+
// CHECK: %[[VAL_2:.+]] = arith.divsi %[[DIM_1]], %[[C8]] : index
508+
// CHECK: %[[VAL_3:.+]] = arith.divsi %[[DIM_2]], %[[C4]] : index
509509
// CHECK: %[[EXPANDED_3:.+]] = tensor.expand_shape %[[GENERIC]] {{\[\[}}0, 1], [2, 3]] output_shape [%[[VAL_2]], 8, %[[VAL_3]], 4] : tensor<?x?xi32> into tensor<?x8x?x4xi32>
510510
// CHECK: return %[[EXPANDED_3]]
511511

mlir/test/Dialect/Linalg/fusion-push-reshape.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
// CHECK-SAME: iterator_types = ["parallel", "parallel"]}
1313
// CHECK-SAME: ins(%[[A]], %[[B]] : tensor<?x16xf32>, tensor<16xf32>) outs(%[[RI]] : tensor<?x16xf32>)
1414
// CHECK: %[[DIM:.*]] = tensor.dim %[[R]], %[[C0]] : tensor<?x16xf32>
15-
// CHECK: %[[VAL_1:.*]] = arith.divui %[[DIM]], %[[C112]] : index
15+
// CHECK: %[[VAL_1:.*]] = arith.divsi %[[DIM]], %[[C112]] : index
1616
// CHECK: %[[RR:.*]] = tensor.expand_shape %[[R]] {{\[\[}}0, 1], [2]] output_shape [%[[VAL_1]], 112, 16] : tensor<?x16xf32> into tensor<?x112x16xf32>
1717
// CHECK: return %[[RR]] : tensor<?x112x16xf32>
1818
func.func @reshape(%A: tensor<?x16xf32>, %B: tensor<16xf32>, %init: tensor<?x112x16xf32>, %sz0: index) -> tensor<?x112x16xf32> {

0 commit comments

Comments
 (0)