Skip to content

Commit 3e4a8c2

Browse files
authored
[mlir][sparse] remove most bufferization.alloc_tensor ops from sparse (#66847)
The only ones left need actual deprecation in bufferization module.
1 parent a009fa7 commit 3e4a8c2

14 files changed

+77
-50
lines changed

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp

Lines changed: 43 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -705,6 +705,7 @@ class SparseCastConverter : public OpConversionPattern<tensor::CastOp> {
705705
};
706706

707707
/// Sparse codegen rule for the alloc operator.
708+
/// TODO(springerm): remove when bufferization.alloc_tensor is gone
708709
class SparseTensorAllocConverter
709710
: public OpConversionPattern<bufferization::AllocTensorOp> {
710711
public:
@@ -764,6 +765,46 @@ class SparseTensorAllocConverter
764765
bool enableBufferInitialization;
765766
};
766767

768+
/// Sparse codegen rule for the empty tensor operator.
769+
/// TODO(springerm): remove when bufferization.alloc_tensor is gone
770+
class SparseTensorEmptyConverter : public OpConversionPattern<tensor::EmptyOp> {
771+
public:
772+
using OpConversionPattern::OpConversionPattern;
773+
SparseTensorEmptyConverter(TypeConverter &typeConverter, MLIRContext *context,
774+
bool enableInit)
775+
: OpConversionPattern(typeConverter, context),
776+
enableBufferInitialization(enableInit) {}
777+
778+
LogicalResult
779+
matchAndRewrite(tensor::EmptyOp op, OpAdaptor adaptor,
780+
ConversionPatternRewriter &rewriter) const override {
781+
const auto resType = getSparseTensorType(op);
782+
if (!resType.hasEncoding())
783+
return failure();
784+
785+
// Construct allocation for each field.
786+
const Location loc = op.getLoc();
787+
const Value sizeHint; // none
788+
const ValueRange dynSizes = adaptor.getDynamicSizes();
789+
const size_t found = dynSizes.size();
790+
const int64_t expected = resType.getNumDynamicDims();
791+
if (found != static_cast<size_t>(expected))
792+
return rewriter.notifyMatchFailure(
793+
op, llvm::formatv(
794+
"Got wrong number of dynamic sizes: Found={0}, Expected={1}",
795+
found, expected));
796+
SmallVector<Value> fields;
797+
createAllocFields(rewriter, loc, resType, dynSizes,
798+
enableBufferInitialization, fields, sizeHint);
799+
// Replace operation with resulting memrefs.
800+
rewriter.replaceOp(op, genTuple(rewriter, loc, resType, fields));
801+
return success();
802+
}
803+
804+
private:
805+
bool enableBufferInitialization;
806+
};
807+
767808
/// Sparse codegen rule for the dealloc operator.
768809
class SparseTensorDeallocConverter
769810
: public OpConversionPattern<bufferization::DeallocTensorOp> {
@@ -1546,6 +1587,6 @@ void mlir::populateSparseTensorCodegenPatterns(
15461587
patterns.getContext());
15471588
patterns.add<SparseTensorDeallocConverter>(
15481589
typeConverter, patterns.getContext(), createSparseDeallocs);
1549-
patterns.add<SparseTensorAllocConverter>(typeConverter, patterns.getContext(),
1550-
enableBufferInitialization);
1590+
patterns.add<SparseTensorAllocConverter, SparseTensorEmptyConverter>(
1591+
typeConverter, patterns.getContext(), enableBufferInitialization);
15511592
}

mlir/test/Dialect/SparseTensor/GPU/gpu_spgemm_lib.mlir

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
// CHECK: %[[VAL_2:.*]] = arith.constant 8 : index
1010
// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index
1111
// CHECK: %[[VAL_4:.*]] = arith.constant 9 : index
12-
// CHECK: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<8x8xf32, #{{.*}}>
1312
// CHECK: %[[VAL_6:.*]] = sparse_tensor.number_of_entries %[[VAL_0]] : tensor<8x8xf32, #{{.*}}>
1413
// CHECK: %[[VAL_7:.*]] = sparse_tensor.number_of_entries %[[VAL_1]] : tensor<8x8xf32, #{{.*}}>
1514
// CHECK: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf32, #{{.*}}>
@@ -92,7 +91,7 @@
9291
// CHECK: }
9392
func.func @matmulCSR(%A: tensor<8x8xf32, #CSR>,
9493
%B: tensor<8x8xf32, #CSR>) -> tensor<8x8xf32, #CSR> {
95-
%init = bufferization.alloc_tensor() : tensor<8x8xf32, #CSR>
94+
%init = tensor.empty() : tensor<8x8xf32, #CSR>
9695
%C = linalg.matmul
9796
ins(%A, %B: tensor<8x8xf32, #CSR>,
9897
tensor<8x8xf32, #CSR>)

mlir/test/Dialect/SparseTensor/codegen.mlir

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ func.func @sparse_dealloc_csr(%arg0: tensor<?x?xf64, #CSR>) {
329329
// CHECK: %[[A26:.*]] = sparse_tensor.storage_specifier.set %[[A18]] pos_mem_sz at 1 with %[[A25]] : !sparse_tensor.storage_specifier
330330
// CHECK: return %[[A23]], %[[A6]], %[[A8]], %[[A26]] : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
331331
func.func @sparse_alloc_csc(%arg0: index) -> tensor<10x?xf64, #CSC> {
332-
%0 = bufferization.alloc_tensor(%arg0) : tensor<10x?xf64, #CSC>
332+
%0 = tensor.empty(%arg0) : tensor<10x?xf64, #CSC>
333333
%1 = sparse_tensor.load %0 : tensor<10x?xf64, #CSC>
334334
return %1 : tensor<10x?xf64, #CSC>
335335
}
@@ -351,24 +351,11 @@ func.func @sparse_alloc_csc(%arg0: index) -> tensor<10x?xf64, #CSC> {
351351
// CHECK: %[[A16:.*]] = sparse_tensor.storage_specifier.set %[[A10]] val_mem_sz with %[[A14]] : !sparse_tensor.storage_specifier
352352
// CHECK: return %[[A15]], %[[A16]] : memref<?xf64>, !sparse_tensor.storage_specifier
353353
func.func @sparse_alloc_3d() -> tensor<10x20x30xf64, #Dense3D> {
354-
%0 = bufferization.alloc_tensor() : tensor<10x20x30xf64, #Dense3D>
354+
%0 = tensor.empty() : tensor<10x20x30xf64, #Dense3D>
355355
%1 = sparse_tensor.load %0 : tensor<10x20x30xf64, #Dense3D>
356356
return %1 : tensor<10x20x30xf64, #Dense3D>
357357
}
358358

359-
// CHECK-LABEL: func.func @sparse_alloc_coo_with_size_hint(
360-
// CHECK-SAME: %[[HINT:.*]]: index)
361-
// CHECK: %[[C2:.*]] = arith.constant 2 : index
362-
// CHECK: %[[M2:.*]] = arith.muli %[[HINT]], %c2 : index
363-
// CHECK: %[[A1:.*]] = memref.alloc() : memref<2xindex>
364-
// CHECK: %[[A2:.*]] = memref.alloc(%[[M2]]) : memref<?xindex>
365-
// CHECK: %[[A3:.*]] = memref.alloc(%[[HINT]]) : memref<?xf64>
366-
func.func @sparse_alloc_coo_with_size_hint(%arg0: index) -> tensor<10x20xf64, #Coo> {
367-
%0 = bufferization.alloc_tensor() size_hint=%arg0 : tensor<10x20xf64, #Coo>
368-
%1 = sparse_tensor.load %0 : tensor<10x20xf64, #Coo>
369-
return %1 : tensor<10x20xf64, #Coo>
370-
}
371-
372359
// CHECK-LABEL: func.func @sparse_expansion1()
373360
// CHECK: %[[A:.*]] = memref.alloc() : memref<8xf64>
374361
// CHECK: %[[B:.*]] = memref.alloc() : memref<8xi1>
@@ -378,7 +365,7 @@ func.func @sparse_alloc_coo_with_size_hint(%arg0: index) -> tensor<10x20xf64, #C
378365
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<8xi1>)
379366
// CHECK: return %[[D]] : memref<?xindex>
380367
func.func @sparse_expansion1() -> memref<?xindex> {
381-
%0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSR>
368+
%0 = tensor.empty() : tensor<4x8xf64, #CSR>
382369
%values, %filled, %added, %count = sparse_tensor.expand %0
383370
: tensor<4x8xf64, #CSR> to memref<?xf64>, memref<?xi1>, memref<?xindex>
384371
return %added : memref<?xindex>
@@ -393,7 +380,7 @@ func.func @sparse_expansion1() -> memref<?xindex> {
393380
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<4xi1>)
394381
// CHECK: return %[[D]] : memref<?xindex>
395382
func.func @sparse_expansion2() -> memref<?xindex> {
396-
%0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSC>
383+
%0 = tensor.empty() : tensor<4x8xf64, #CSC>
397384
%values, %filled, %added, %count = sparse_tensor.expand %0
398385
: tensor<4x8xf64, #CSC> to memref<?xf64>, memref<?xi1>, memref<?xindex>
399386
return %added : memref<?xindex>
@@ -409,7 +396,7 @@ func.func @sparse_expansion2() -> memref<?xindex> {
409396
// CHECK: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
410397
// CHECK: return %[[D]] : memref<?xindex>
411398
func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
412-
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #CSC>
399+
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64, #CSC>
413400
%values, %filled, %added, %count = sparse_tensor.expand %0
414401
: tensor<?x?xf64, #CSC> to memref<?xf64>, memref<?xi1>, memref<?xindex>
415402
return %added : memref<?xindex>

mlir/test/Dialect/SparseTensor/codegen_buffer_initialization.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
#SV = #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>
44

5-
// CHECK-LABEL: func.func @sparse_alloc_sparse_vector(
5+
// CHECK-LABEL: func.func @empty_sparse_vector(
66
// CHECK-SAME: %[[VAL_0:.*]]: index) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
77
// CHECK: %[[VAL_1:.*]] = arith.constant 1 : index
88
// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f64
@@ -24,8 +24,8 @@
2424
// CHECK: %[[VAL_19:.*]], %[[VAL_21:.*]] = sparse_tensor.push_back %[[VAL_17]], %[[VAL_15]], %[[VAL_3]], %[[VAL_1]] : index, memref<?xindex>, index, index
2525
// CHECK: %[[VAL_22:.*]] = sparse_tensor.storage_specifier.set %[[VAL_18]] pos_mem_sz at 0 with %[[VAL_21]] : !sparse_tensor.storage_specifier
2626
// CHECK: return %[[VAL_19]], %[[VAL_7]], %[[VAL_9]], %[[VAL_22]] : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
27-
func.func @sparse_alloc_sparse_vector(%arg0: index) -> tensor<?xf64, #SV> {
28-
%0 = bufferization.alloc_tensor(%arg0) : tensor<?xf64, #SV>
27+
func.func @empty_sparse_vector(%arg0: index) -> tensor<?xf64, #SV> {
28+
%0 = tensor.empty(%arg0) : tensor<?xf64, #SV>
2929
%1 = sparse_tensor.load %0 : tensor<?xf64, #SV>
3030
return %1 : tensor<?xf64, #SV>
3131
}

mlir/test/Dialect/SparseTensor/conversion.mlir

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor
150150
// CHECK: %[[T:.*]] = call @newSparseTensor(%[[DimSizes]], %[[LvlSizes]], %[[LvlTypes]], %[[Iota]], %[[Iota]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
151151
// CHECK: return %[[T]] : !llvm.ptr<i8>
152152
func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #CSR> {
153-
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #CSR>
153+
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64, #CSR>
154154
%1 = sparse_tensor.load %0 : tensor<?x?xf64, #CSR>
155155
return %1 : tensor<?x?xf64, #CSR>
156156
}
@@ -334,7 +334,7 @@ func.func @sparse_insert(%arg0: tensor<128xf32, #SparseVector>,
334334
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<8xi1>)
335335
// CHECK: return %[[D]] : memref<?xindex>
336336
func.func @sparse_expansion1() -> memref<?xindex> {
337-
%0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSR>
337+
%0 = tensor.empty() : tensor<4x8xf64, #CSR>
338338
%values, %filled, %added, %count = sparse_tensor.expand %0
339339
: tensor<4x8xf64, #CSR> to memref<?xf64>, memref<?xi1>, memref<?xindex>
340340
return %added : memref<?xindex>
@@ -350,7 +350,7 @@ func.func @sparse_expansion1() -> memref<?xindex> {
350350
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<4xi1>)
351351
// CHECK: return %[[D]] : memref<?xindex>
352352
func.func @sparse_expansion2() -> memref<?xindex> {
353-
%0 = bufferization.alloc_tensor() : tensor<4x8xf64, #CSC>
353+
%0 = tensor.empty() : tensor<4x8xf64, #CSC>
354354
%values, %filled, %added, %count = sparse_tensor.expand %0
355355
: tensor<4x8xf64, #CSC> to memref<?xf64>, memref<?xi1>, memref<?xindex>
356356
return %added : memref<?xindex>
@@ -367,7 +367,7 @@ func.func @sparse_expansion2() -> memref<?xindex> {
367367
// CHECK-DAG: linalg.fill ins(%{{.*}} : i1) outs(%[[B]] : memref<?xi1>)
368368
// CHECK: return %[[C]] : memref<?xindex>
369369
func.func @sparse_expansion3(%arg0: index, %arg1: index) -> memref<?xindex> {
370-
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #CSC>
370+
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64, #CSC>
371371
%values, %filled, %added, %count = sparse_tensor.expand %0
372372
: tensor<?x?xf64, #CSC> to memref<?xf64>, memref<?xi1>, memref<?xindex>
373373
return %added : memref<?xindex>
@@ -430,12 +430,12 @@ func.func @sparse_out2(%arg0: tensor<?x?x?xf32, #SparseTensor>, %arg1: !llvm.ptr
430430

431431
// CHECK-LABEL: func @sparse_and_dense_init(
432432
// CHECK: %[[S:.*]] = call @newSparseTensor
433-
// CHECK: %[[D:.*]] = bufferization.alloc_tensor
433+
// CHECK: %[[D:.*]] = tensor.empty
434434
// CHECK: return %[[S]], %[[D]] : !llvm.ptr<i8>, tensor<?x?xf64>
435435
func.func @sparse_and_dense_init(%arg0: index, %arg1: index)
436436
-> (tensor<?x?xf64, #CSR>, tensor<?x?xf64>) {
437-
%0 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64, #CSR>
437+
%0 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64, #CSR>
438438
%1 = sparse_tensor.load %0 : tensor<?x?xf64, #CSR>
439-
%2 = bufferization.alloc_tensor(%arg0, %arg1) : tensor<?x?xf64>
439+
%2 = tensor.empty(%arg0, %arg1) : tensor<?x?xf64>
440440
return %1, %2 : tensor<?x?xf64, #CSR>, tensor<?x?xf64>
441441
}

mlir/test/Dialect/SparseTensor/pre_rewriting.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ func.func @sparse_fuse_slice(%a : tensor<2x3xi64, #SortedCOO>) -> tensor<1x3xi64
6363
// CHECK-SAME: %[[VAL_1:.*]]: tensor<4x4xf64, #sparse_tensor.encoding<{{.*}}>>,
6464
// CHECK-SAME: %[[VAL_2:.*]]: tensor<4x4xf64, #sparse_tensor.encoding<{{.*}}>>) -> tensor<4x4xf64, #sparse_tensor.encoding<{{.*}}>> {
6565
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0.000000e+00 : f64
66-
// CHECK-DAG: %[[VAL_4:.*]] = bufferization.alloc_tensor() : tensor<4x4xf64, #sparse_tensor.encoding<{{.*}}>>
66+
// CHECK-DAG: %[[VAL_4:.*]] = tensor.empty() : tensor<4x4xf64, #sparse_tensor.encoding<{{.*}}>>
6767
// CHECK-NEXT: %[[VAL_5:.*]] = linalg.generic {indexing_maps = [#map, #map, #map, #map], iterator_types = ["parallel", "parallel"]}
6868
// CHECK-SAME: ins(%[[VAL_0]], %[[VAL_1]], %[[VAL_2]]
6969
// CHECK-NEXT: ^bb0(%[[VAL_6:.*]]: i1, %[[VAL_7:.*]]: f64, %[[VAL_8:.*]]: f64, %[[VAL_9:.*]]: f64):
@@ -90,7 +90,7 @@ func.func @sparse_fuse_slice(%a : tensor<2x3xi64, #SortedCOO>) -> tensor<1x3xi64
9090
func.func @sparse_select(%cond: tensor<4x4xi1>,
9191
%arga: tensor<4x4xf64, #DCSR>,
9292
%argb: tensor<4x4xf64, #DCSR>) -> tensor<4x4xf64, #DCSR> {
93-
%xv = bufferization.alloc_tensor() : tensor<4x4xf64, #DCSR>
93+
%xv = tensor.empty() : tensor<4x4xf64, #DCSR>
9494
%0 = linalg.generic #sel_trait
9595
ins(%cond, %arga, %argb: tensor<4x4xi1>, tensor<4x4xf64, #DCSR>, tensor<4x4xf64, #DCSR>)
9696
outs(%xv: tensor<4x4xf64, #DCSR>) {

mlir/test/Dialect/SparseTensor/sparse_2d.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1058,7 +1058,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
10581058
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 2 : index
10591059
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : index
10601060
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
1061-
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor() : tensor<2x3xf64, #{{.*}}>>
1061+
// CHECK-DAG: %[[VAL_5:.*]] = tensor.empty() : tensor<2x3xf64, #{{.*}}>>
10621062
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<2x3xf64, #{{.*}}>> to memref<?xindex>
10631063
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<2x3xf64, #{{.*}}>> to memref<?xindex>
10641064
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<2x3xf64, #{{.*}}>> to memref<?xf64>
@@ -1142,7 +1142,7 @@ func.func @cmp_ss_ss(%arga: tensor<32x16xf32, #Tss>, %argb: tensor<32x16xf32, #T
11421142
// CHECK: }
11431143
func.func @sub_ss_batched(%0: tensor<2x3xf64, #BatchedVector>, %1: tensor<2x3xf64, #BatchedVector>)
11441144
-> tensor<2x3xf64, #BatchedVector> {
1145-
%2 = bufferization.alloc_tensor() : tensor<2x3xf64, #BatchedVector>
1145+
%2 = tensor.empty() : tensor<2x3xf64, #BatchedVector>
11461146
%3 = linalg.generic #trait2
11471147
ins(%0, %1 : tensor<2x3xf64, #BatchedVector>, tensor<2x3xf64, #BatchedVector>)
11481148
outs(%2 : tensor<2x3xf64, #BatchedVector>) {

mlir/test/Dialect/SparseTensor/sparse_conv_2d_slice_based.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
// CHECK-DAG: %[[VAL_9:.*]] = arith.constant 4 : index
2020
// CHECK-DAG: %[[VAL_10:.*]] = arith.constant 0 : i32
2121
// CHECK-DAG: %[[VAL_11:.*]] = arith.constant false
22-
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.alloc_tensor() : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
22+
// CHECK-DAG: %[[VAL_12:.*]] = tensor.empty() : tensor<6x6xi32, #sparse_tensor.encoding<{{.*}}>>
2323
// CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
2424
// CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
2525
// CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xi32, #sparse_tensor.encoding<{{.*}}>> to memref<?xindex>
@@ -261,7 +261,7 @@
261261
// CHECK: }
262262
func.func @conv2d_all_sparse_CSR(%arg0: tensor<8x8xi32, #DCSR>,
263263
%arg1: tensor<3x3xi32>) -> tensor<6x6xi32, #DCSR> {
264-
%0 = bufferization.alloc_tensor() : tensor<6x6xi32, #DCSR>
264+
%0 = tensor.empty() : tensor<6x6xi32, #DCSR>
265265
%1 = linalg.generic {
266266
indexing_maps = [#map, #map1, #map2],
267267
iterator_types = ["parallel", "parallel", "reduction", "reduction"]}

mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@
121121
// CHECK: }
122122
func.func @fill_zero_after_alloc(%arg0: tensor<100x200xf64, #DCSR>,
123123
%arg1: tensor<200x300xf64, #DCSR>) -> tensor<100x300xf64, #DCSR> {
124-
%0 = bufferization.alloc_tensor() : tensor<100x300xf64, #DCSR>
124+
%0 = tensor.empty() : tensor<100x300xf64, #DCSR>
125125
%cst = arith.constant 0.000000e+00 : f64
126126
%1 = linalg.fill ins(%cst : f64)
127127
outs(%0 : tensor<100x300xf64, #DCSR>) -> tensor<100x300xf64, #DCSR>

mlir/test/Dialect/SparseTensor/sparse_index.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
2424
// CHECK-DAG: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
2525
// CHECK-DAG: %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
26-
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
26+
// CHECK-DAG: %[[VAL_5:.*]] = tensor.empty(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
2727
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<?x?xi64, #sparse_tensor.encoding
2828
// CHECK-DAG: %[[VAL_7:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
2929
// CHECK-DAG: %[[VAL_8:.*]] = tensor.dim %[[VAL_0]], %[[VAL_2]] : tensor<?x?xi64, #sparse_tensor.encoding
@@ -52,7 +52,7 @@ func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
5252
%c1 = arith.constant 0 : index
5353
%0 = tensor.dim %arga, %c0 : tensor<?x?xi64, #DenseMatrix>
5454
%1 = tensor.dim %arga, %c1 : tensor<?x?xi64, #DenseMatrix>
55-
%init = bufferization.alloc_tensor(%0, %1) : tensor<?x?xi64, #DenseMatrix>
55+
%init = tensor.empty(%0, %1) : tensor<?x?xi64, #DenseMatrix>
5656
%r = linalg.generic #trait
5757
ins(%arga: tensor<?x?xi64, #DenseMatrix>)
5858
outs(%init: tensor<?x?xi64, #DenseMatrix>) {
@@ -75,7 +75,7 @@ func.func @dense_index(%arga: tensor<?x?xi64, #DenseMatrix>)
7575
// CHECK-DAG: %[[VAL_2:.*]] = arith.constant 1 : index
7676
// CHECK-DAG: %[[VAL_3:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
7777
// CHECK-DAG: %[[VAL_4:.*]] = tensor.dim %[[VAL_0]], %[[VAL_1]] : tensor<?x?xi64, #sparse_tensor.encoding
78-
// CHECK-DAG: %[[VAL_5:.*]] = bufferization.alloc_tensor(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
78+
// CHECK-DAG: %[[VAL_5:.*]] = tensor.empty(%[[VAL_3]], %[[VAL_4]]) : tensor<?x?xi64, #sparse_tensor.encoding
7979
// CHECK-DAG: %[[VAL_6:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
8080
// CHECK-DAG: %[[VAL_7:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<?x?xi64, #sparse_tensor.encoding
8181
// CHECK-DAG: %[[VAL_8:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<?x?xi64, #sparse_tensor.encoding
@@ -109,7 +109,7 @@ func.func @sparse_index(%arga: tensor<?x?xi64, #SparseMatrix>)
109109
%c1 = arith.constant 0 : index
110110
%0 = tensor.dim %arga, %c0 : tensor<?x?xi64, #SparseMatrix>
111111
%1 = tensor.dim %arga, %c1 : tensor<?x?xi64, #SparseMatrix>
112-
%init = bufferization.alloc_tensor(%0, %1) : tensor<?x?xi64, #SparseMatrix>
112+
%init = tensor.empty(%0, %1) : tensor<?x?xi64, #SparseMatrix>
113113
%r = linalg.generic #trait
114114
ins(%arga: tensor<?x?xi64, #SparseMatrix>)
115115
outs(%init: tensor<?x?xi64, #SparseMatrix>) {

mlir/test/Dialect/SparseTensor/sparse_matmul_codegen.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@
144144
// CHECK: return %[[VAL_77]]#0, %[[VAL_77]]#1, %[[VAL_77]]#2, %[[VAL_77]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
145145
func.func @matmul(%A: tensor<4x8xf64, #CSR>,
146146
%B: tensor<8x4xf64, #CSR>) -> tensor<4x4xf64, #CSR> {
147-
%C = bufferization.alloc_tensor() : tensor<4x4xf64, #CSR>
147+
%C = tensor.empty() : tensor<4x4xf64, #CSR>
148148
%D = linalg.matmul
149149
ins(%A, %B: tensor<4x8xf64, #CSR>, tensor<8x4xf64, #CSR>)
150150
outs(%C: tensor<4x4xf64, #CSR>) -> tensor<4x4xf64, #CSR>

0 commit comments

Comments
 (0)