@@ -86,7 +86,7 @@ func.func @test_reshape_1d_down_s2s_explicit(%arg0: tensor<1xf32>) -> tensor<f32
86
86
// CHECK: %[[C0:.*]] = arith.constant 0 : index
87
87
// CHECK: %[[DIM:.*]] = tensor.dim %arg0, %[[C0]] : tensor<?xf32>
88
88
// CHECK: %[[C2:.*]] = arith.constant 2 : index
89
- // CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C2]] : index
89
+ // CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
90
90
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[ARG_0]] {{\[\[}}0, 1]] output_shape [2, %[[VAL_0]]] : tensor<?xf32> into tensor<2x?xf32>
91
91
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
92
92
func.func @test_reshape_1d_up_d2d_auto (%arg0: tensor <?xf32 >) -> tensor <2 x?xf32 > {
@@ -135,7 +135,7 @@ func.func @test_reshape_2d_down_s2s_explicit(%arg0: tensor<2x3xf32>) -> tensor<6
135
135
// CHECK: %[[C0:.*]] = arith.constant 0 : index
136
136
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
137
137
// CHECK: %[[C2:.*]] = arith.constant 2 : index
138
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C2]] : index
138
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C2]] : index
139
139
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1]] output_shape [2, %[[DIV]]] : tensor<?xf32> into tensor<2x?xf32>
140
140
// CHECK: return %[[EXPANDED]] : tensor<2x?xf32>
141
141
func.func @test_reshape_2d_same_d2d_auto (%arg0: tensor <?x2 xf32 >) -> tensor <2 x?xf32 > {
@@ -189,7 +189,7 @@ func.func @test_reshape_2d_same_s2s_explicit(%arg0: tensor<3x2xf32>) -> tensor<2
189
189
// CHECK: %[[C0:.*]] = arith.constant 0 : index
190
190
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
191
191
// CHECK: %[[C0_0:.*]] = arith.constant 0 : index
192
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C0_0]] : index
192
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C0_0]] : index
193
193
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [0, 3, %[[DIV]]] : tensor<?xf32> into tensor<0x3x?xf32>
194
194
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<0x3x?xf32> to tensor<?x?x?xf32>
195
195
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -206,7 +206,7 @@ func.func @test_reshape_3d_same_d2d_auto_empty(%arg0: tensor<3x2x?xf32>) -> tens
206
206
// CHECK: %[[C0:.*]] = arith.constant 0 : index
207
207
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
208
208
// CHECK: %[[C8:.*]] = arith.constant 8 : index
209
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
209
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
210
210
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
211
211
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[VAL_1]] : tensor<2x?x4xf32> to tensor<?x?x?xf32>
212
212
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -223,7 +223,7 @@ func.func @test_reshape_3d_same_d2d_auto(%arg0: tensor<2x?x?xf32>) -> tensor<?x?
223
223
// CHECK: %[[C0:.*]] = arith.constant 0 : index
224
224
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
225
225
// CHECK: %[[C6:.*]] = arith.constant 6 : index
226
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
226
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
227
227
// CHECK: %[[VAL_1:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, 3, %[[DIV]]] : tensor<?xf32> into tensor<2x3x?xf32>
228
228
// CHECK: return %[[VAL_1]] : tensor<2x3x?xf32>
229
229
func.func @test_reshape_3d_same_d2d_auto_identity (%arg0: tensor <?x3 x4 xf32 >) -> tensor <2 x3 x?xf32 > {
@@ -239,7 +239,7 @@ func.func @test_reshape_3d_same_d2d_auto_identity(%arg0: tensor<?x3x4xf32>) -> t
239
239
// CHECK: %[[C0:.*]] = arith.constant 0 : index
240
240
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
241
241
// CHECK: %[[C6:.*]] = arith.constant 6 : index
242
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C6]] : index
242
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
243
243
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 2] : tensor<?xf32> into tensor<?x3x2xf32>
244
244
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2xf32> to tensor<?x?x?xf32>
245
245
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -256,7 +256,7 @@ func.func @test_reshape_3d_same_d2d_explicit_empty(%arg0: tensor<3x2x?xf32>) ->
256
256
// CHECK: %[[C0:.*]] = arith.constant 0 : index
257
257
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
258
258
// CHECK: %[[C12:.*]] = arith.constant 12 : index
259
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
259
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
260
260
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
261
261
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<?x?x?xf32>
262
262
// CHECK: return %[[VAL_2]] : tensor<?x?x?xf32>
@@ -284,7 +284,7 @@ func.func @test_reshape_3d_same_d2d_explicit_identity(%arg0: tensor<?x3x4xf32>)
284
284
// CHECK: %[[C0:.*]] = arith.constant 0 : index
285
285
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
286
286
// CHECK: %[[C8:.*]] = arith.constant 8 : index
287
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C8]] : index
287
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C8]] : index
288
288
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [2, %[[DIV]], 4] : tensor<?xf32> into tensor<2x?x4xf32>
289
289
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<2x?x4xf32> to tensor<2x3x4xf32>
290
290
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -301,7 +301,7 @@ func.func @test_reshape_3d_same_d2s_auto(%arg0: tensor<?x?x?xf32>) -> tensor<2x3
301
301
// CHECK: %[[C0:.*]] = arith.constant 0 : index
302
302
// CHECK: %[[DIM:.*]] = tensor.dim %[[VAL_0]], %[[C0]] : tensor<?xf32>
303
303
// CHECK: %[[C12:.*]] = arith.constant 12 : index
304
- // CHECK: %[[DIV:.*]] = arith.divui %[[DIM]], %[[C12]] : index
304
+ // CHECK: %[[DIV:.*]] = arith.divsi %[[DIM]], %[[C12]] : index
305
305
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[VAL_0]] {{\[\[}}0, 1, 2]] output_shape [%[[DIV]], 3, 4] : tensor<?xf32> into tensor<?x3x4xf32>
306
306
// CHECK: %[[VAL_2:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x4xf32> to tensor<2x3x4xf32>
307
307
// CHECK: return %[[VAL_2]] : tensor<2x3x4xf32>
@@ -328,7 +328,7 @@ func.func @test_reshape_3d_same_s2s_explicit_identity(%arg0: tensor<2x3x4xf32>)
328
328
// CHECK: %[[C0:.*]] = arith.constant 0 : index
329
329
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
330
330
// CHECK: %[[C6:.*]] = arith.constant 6 : index
331
- // CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
331
+ // CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
332
332
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2, 3]] output_shape [%[[VAL_0]], 3, 2, 1] : tensor<?xf32> into tensor<?x3x2x1xf32>
333
333
// CHECK: %[[CAST:.*]] = tensor.cast %[[EXPANDED]] : tensor<?x3x2x1xf32> to tensor<1x3x2x1xf32>
334
334
// CHECK: return %[[CAST]] : tensor<1x3x2x1xf32>
@@ -357,7 +357,7 @@ func.func @test_reshape_4d_down_d2s_explicit(%arg0: tensor<?x?x?x?xf32>) -> tens
357
357
// CHECK: %[[C0:.*]] = arith.constant 0 : index
358
358
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
359
359
// CHECK: %[[C6:.*]] = arith.constant 6 : index
360
- // CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C6]] : index
360
+ // CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C6]] : index
361
361
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 2, 3] : tensor<?xf32> into tensor<?x2x3xf32>
362
362
// CHECK: return %[[EXPANDED]] : tensor<?x2x3xf32>
363
363
func.func @test_reshape_5d_down_d2d_auto (%arg0: tensor <?x?x?x2 x3 xf32 >) -> tensor <?x2 x3 xf32 > {
@@ -373,7 +373,7 @@ func.func @test_reshape_5d_down_d2d_auto(%arg0: tensor<?x?x?x2x3xf32>) -> tensor
373
373
// CHECK: %[[C0:.*]] = arith.constant 0 : index
374
374
// CHECK: %[[DIM:.*]] = tensor.dim %[[COLLAPSED]], %[[C0]] : tensor<?xf32>
375
375
// CHECK: %[[C385:.*]] = arith.constant 385 : index
376
- // CHECK: %[[VAL_0:.*]] = arith.divui %[[DIM]], %[[C385]] : index
376
+ // CHECK: %[[VAL_0:.*]] = arith.divsi %[[DIM]], %[[C385]] : index
377
377
// CHECK: %[[EXPANDED:.*]] = tensor.expand_shape %[[COLLAPSED]] {{\[\[}}0, 1, 2]] output_shape [%[[VAL_0]], 5, 77] : tensor<?xf32> into tensor<?x5x77xf32>
378
378
// CHECK: return %[[EXPANDED]] : tensor<?x5x77xf32>
379
379
func.func @test_reshape_6d_down_d2d_auto (%arg0: tensor <1 x2 x?x5 x7 x11 xf32 >) -> tensor <?x5 x77 xf32 > {
0 commit comments