Skip to content

Commit 6280e23

Browse files
[mlir][sparse] Print new syntax (#68130)
Printing changes from `#sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>` to `map = (d0) -> (d0 : compressed)`. Level properties, ELL and slice are also supported.
1 parent 7794e16 commit 6280e23

File tree

9 files changed

+115
-64
lines changed

9 files changed

+115
-64
lines changed

mlir/include/mlir/Dialect/SparseTensor/IR/Enums.h

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -215,29 +215,29 @@ constexpr const char *toMLIRString(DimLevelType dlt) {
215215
case DimLevelType::Compressed:
216216
return "compressed";
217217
case DimLevelType::CompressedNu:
218-
return "compressed_nu";
218+
return "compressed(nonunique)";
219219
case DimLevelType::CompressedNo:
220-
return "compressed_no";
220+
return "compressed(nonordered)";
221221
case DimLevelType::CompressedNuNo:
222-
return "compressed_nu_no";
222+
return "compressed(nonunique, nonordered)";
223223
case DimLevelType::Singleton:
224224
return "singleton";
225225
case DimLevelType::SingletonNu:
226-
return "singleton_nu";
226+
return "singleton(nonunique)";
227227
case DimLevelType::SingletonNo:
228-
return "singleton_no";
228+
return "singleton(nonordered)";
229229
case DimLevelType::SingletonNuNo:
230-
return "singleton_nu_no";
230+
return "singleton(nonunique, nonordered)";
231231
case DimLevelType::LooseCompressed:
232232
return "loose_compressed";
233233
case DimLevelType::LooseCompressedNu:
234-
return "loose_compressed_nu";
234+
return "loose_compressed(nonunique)";
235235
case DimLevelType::LooseCompressedNo:
236-
return "loose_compressed_no";
236+
return "loose_compressed(nonordered)";
237237
case DimLevelType::LooseCompressedNuNo:
238-
return "loose_compressed_nu_no";
238+
return "loose_compressed(nonunique, nonordered)";
239239
case DimLevelType::TwoOutOfFour:
240-
return "compressed24";
240+
return "block2_4";
241241
}
242242
return "";
243243
}

mlir/include/mlir/Dialect/SparseTensor/IR/SparseTensorAttrDefs.td

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -422,6 +422,14 @@ def SparseTensorEncodingAttr : SparseTensor_Attr<"SparseTensorEncoding",
422422
std::optional<uint64_t> getStaticLvlSliceOffset(::mlir::sparse_tensor::Level lvl) const;
423423
std::optional<uint64_t> getStaticLvlSliceSize(::mlir::sparse_tensor::Level lvl) const;
424424
std::optional<uint64_t> getStaticLvlSliceStride(::mlir::sparse_tensor::Level lvl) const;
425+
426+
//
427+
// Printing methods.
428+
//
429+
430+
void printSymbols(AffineMap &map, AsmPrinter &printer) const;
431+
void printDimensions(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::SparseTensorDimSliceAttr> dimSlices) const;
432+
void printLevels(AffineMap &map, AsmPrinter &printer, ArrayRef<::mlir::sparse_tensor::DimLevelType> lvlTypes) const;
425433
}];
426434

427435
let genVerifyDecl = 1;

mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp

Lines changed: 54 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -586,31 +586,67 @@ Attribute SparseTensorEncodingAttr::parse(AsmParser &parser, Type type) {
586586
}
587587

588588
void SparseTensorEncodingAttr::print(AsmPrinter &printer) const {
589-
// Print the struct-like storage in dictionary fashion.
590-
printer << "<{ lvlTypes = [ ";
591-
llvm::interleaveComma(getLvlTypes(), printer, [&](DimLevelType dlt) {
592-
printer << "\"" << toMLIRString(dlt) << "\"";
593-
});
594-
printer << " ]";
589+
auto map = static_cast<AffineMap>(getDimToLvl());
590+
// Empty affine map indicates identity map
591+
if (!map)
592+
map = AffineMap::getMultiDimIdentityMap(getLvlTypes().size(), getContext());
593+
printer << "<{ map = ";
594+
printSymbols(map, printer);
595+
printer << '(';
596+
printDimensions(map, printer, getDimSlices());
597+
printer << ") -> (";
598+
printLevels(map, printer, getLvlTypes());
599+
printer << ')';
595600
// Print remaining members only for non-default values.
596-
if (!isIdentity())
597-
printer << ", dimToLvl = affine_map<" << getDimToLvl() << ">";
598601
if (getPosWidth())
599602
printer << ", posWidth = " << getPosWidth();
600603
if (getCrdWidth())
601604
printer << ", crdWidth = " << getCrdWidth();
602-
if (!getDimSlices().empty()) {
603-
printer << ", dimSlices = [ ";
604-
llvm::interleaveComma(getDimSlices(), printer,
605-
[&](SparseTensorDimSliceAttr attr) {
606-
// Calls SparseTensorDimSliceAttr::print directly to
607-
// skip mnemonic.
608-
attr.print(printer);
609-
});
610-
printer << " ]";
605+
printer << " }>";
606+
}
607+
608+
void SparseTensorEncodingAttr::printSymbols(AffineMap &map,
609+
AsmPrinter &printer) const {
610+
if (map.getNumSymbols() == 0)
611+
return;
612+
printer << '[';
613+
for (unsigned i = 0, n = map.getNumSymbols() - 1; i < n; i++)
614+
printer << 's' << i << ", ";
615+
if (map.getNumSymbols() >= 1)
616+
printer << 's' << map.getNumSymbols() - 1;
617+
printer << ']';
618+
}
619+
620+
void SparseTensorEncodingAttr::printDimensions(
621+
AffineMap &map, AsmPrinter &printer,
622+
ArrayRef<SparseTensorDimSliceAttr> dimSlices) const {
623+
if (!dimSlices.empty()) {
624+
for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++)
625+
printer << 'd' << i << " : " << dimSlices[i] << ", ";
626+
if (map.getNumDims() >= 1) {
627+
printer << 'd' << map.getNumDims() - 1 << " : "
628+
<< dimSlices[map.getNumDims() - 1];
629+
}
630+
} else {
631+
for (unsigned i = 0, n = map.getNumDims() - 1; i < n; i++)
632+
printer << 'd' << i << ", ";
633+
if (map.getNumDims() >= 1)
634+
printer << 'd' << map.getNumDims() - 1;
611635
}
636+
}
612637

613-
printer << " }>";
638+
void SparseTensorEncodingAttr::printLevels(
639+
AffineMap &map, AsmPrinter &printer,
640+
ArrayRef<DimLevelType> lvlTypes) const {
641+
for (unsigned i = 0, n = map.getNumResults() - 1; i < n; i++) {
642+
map.getResult(i).print(printer.getStream());
643+
printer << " : " << toMLIRString(lvlTypes[i]) << ", ";
644+
}
645+
if (map.getNumResults() >= 1) {
646+
auto lastIndex = map.getNumResults() - 1;
647+
map.getResult(lastIndex).print(printer.getStream());
648+
printer << " : " << toMLIRString(lvlTypes[lastIndex]);
649+
}
614650
}
615651

616652
LogicalResult

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorCodegen.cpp

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -472,8 +472,17 @@ class SparseInsertGenerator
472472
llvm::raw_svector_ostream nameOstream(nameBuffer);
473473
nameOstream << kInsertFuncNamePrefix;
474474
const Level lvlRank = stt.getLvlRank();
475-
for (Level l = 0; l < lvlRank; l++)
476-
nameOstream << toMLIRString(stt.getLvlType(l)) << "_";
475+
for (Level l = 0; l < lvlRank; l++) {
476+
std::string lvlType = toMLIRString(stt.getLvlType(l));
477+
// Replace/remove punctuations in level properties.
478+
std::replace_if(
479+
lvlType.begin(), lvlType.end(),
480+
[](char c) { return c == '(' || c == ','; }, '_');
481+
lvlType.erase(std::remove_if(lvlType.begin(), lvlType.end(),
482+
[](char c) { return c == ')' || c == ' '; }),
483+
lvlType.end());
484+
nameOstream << lvlType << "_";
485+
}
477486
// Static dim sizes are used in the generated code while dynamic sizes are
478487
// loaded from the dimSizes buffer. This is the reason for adding the shape
479488
// to the function name.

mlir/test/Dialect/SparseTensor/codegen.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -507,7 +507,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
507507
return %1 : tensor<8x8xf64, #CSR>
508508
}
509509

510-
// CHECK-LABEL: func.func private @_insert_dense_compressed_no_8_8_f64_0_0(
510+
// CHECK-LABEL: func.func private @_insert_dense_compressed_nonordered_8_8_f64_0_0(
511511
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
512512
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
513513
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -533,7 +533,7 @@ func.func @sparse_compression(%tensor: tensor<8x8xf64, #CSR>,
533533
// CHECK: %[[A13:.*]]:4 = scf.for %[[A14:.*]] = %[[A11]] to %[[A7]] step %[[A12]] iter_args(%[[A15:.*]] = %[[A0]], %[[A16:.*]] = %[[A1]], %[[A17:.*]] = %[[A2]], %[[A18:.*]] = %[[A3]]) -> (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
534534
// CHECK: %[[A19:.*]] = memref.load %[[A6]]{{\[}}%[[A14]]] : memref<?xindex>
535535
// CHECK: %[[A20:.*]] = memref.load %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
536-
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_no_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
536+
// CHECK: %[[A21:.*]]:4 = func.call @_insert_dense_compressed_nonordered_8_8_f64_0_0(%[[A15]], %[[A16]], %[[A17]], %[[A18]], %[[A8]], %[[A19]], %[[A20]]) : (memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
537537
// CHECK: memref.store %[[A10]], %[[A4]]{{\[}}%[[A19]]] : memref<?xf64>
538538
// CHECK: memref.store %[[A9]], %[[A5]]{{\[}}%[[A19]]] : memref<?xi1>
539539
// CHECK: scf.yield %[[A21]]#0, %[[A21]]#1, %[[A21]]#2, %[[A21]]#3 : memref<?xindex>, memref<?xindex>, memref<?xf64>, !sparse_tensor.storage_specifier
@@ -611,7 +611,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
611611
return %1 : tensor<128xf64, #SparseVector>
612612
}
613613

614-
// CHECK-LABEL: func.func private @_insert_compressed_nu_singleton_5_6_f64_0_0(
614+
// CHECK-LABEL: func.func private @_insert_compressed_nonunique_singleton_5_6_f64_0_0(
615615
// CHECK-SAME: %[[A1:.*0]]: memref<?xindex>,
616616
// CHECK-SAME: %[[A2:.*1]]: memref<?xindex>,
617617
// CHECK-SAME: %[[A3:.*2]]: memref<?xf64>,
@@ -627,7 +627,7 @@ func.func @sparse_insert_typed(%arg0: tensor<128xf64, #SparseVector>, %arg1: ind
627627
// CHECK-SAME: %[[A3:.*3]]: !sparse_tensor.storage_specifier
628628
// CHECK-SAME: %[[A4:.*4]]: index,
629629
// CHECK-SAME: %[[A5:.*5]]: f64)
630-
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nu_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
630+
// CHECK: %[[R:.*]]:4 = call @_insert_compressed_nonunique_singleton_5_6_f64_0_0(%[[A0]], %[[A1]], %[[A2]], %[[A3]], %[[A4]], %[[A4]], %[[A5]])
631631
// CHECK: return %[[R]]#0, %[[R]]#1, %[[R]]#2, %[[R]]#3
632632
func.func @sparse_insert_coo(%arg0: tensor<5x6xf64, #Coo>, %arg1: index, %arg2: f64) -> tensor<5x6xf64, #Coo> {
633633
%0 = sparse_tensor.insert %arg2 into %arg0[%arg1, %arg1] : tensor<5x6xf64, #Coo>

mlir/test/Dialect/SparseTensor/roundtrip_encoding.mlir

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s -split-input-file | mlir-opt | FileCheck %s
22

33
// CHECK-LABEL: func private @sparse_1d_tensor(
4-
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed" ] }>>)
4+
// CHECK-SAME: tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
55
func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map = (d0) -> (d0 : compressed) }>>)
66

77
// -----
@@ -13,7 +13,7 @@ func.func private @sparse_1d_tensor(tensor<32xf64, #sparse_tensor.encoding<{ map
1313
}>
1414

1515
// CHECK-LABEL: func private @sparse_csr(
16-
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], posWidth = 64, crdWidth = 64 }>>)
16+
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed), posWidth = 64, crdWidth = 64 }>>)
1717
func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
1818

1919
// -----
@@ -23,7 +23,7 @@ func.func private @sparse_csr(tensor<?x?xf32, #CSR>)
2323
}>
2424

2525
// CHECK-LABEL: func private @CSR_explicit(
26-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ] }>>
26+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : compressed) }>>
2727
func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
2828
return
2929
}
@@ -37,7 +37,7 @@ func.func private @CSR_explicit(%arg0: tensor<?x?xf64, #CSR_explicit>) {
3737
}>
3838

3939
// CHECK-LABEL: func private @sparse_csc(
40-
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)> }>>)
40+
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : dense, d0 : compressed) }>>)
4141
func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
4242

4343
// -----
@@ -49,7 +49,7 @@ func.func private @sparse_csc(tensor<?x?xf32, #CSC>)
4949
}>
5050

5151
// CHECK-LABEL: func private @sparse_dcsc(
52-
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ], dimToLvl = affine_map<(d0, d1) -> (d1, d0)>, crdWidth = 64 }>>)
52+
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d1 : compressed, d0 : compressed), crdWidth = 64 }>>)
5353
func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
5454

5555
// -----
@@ -59,7 +59,7 @@ func.func private @sparse_dcsc(tensor<?x?xf32, #DCSC>)
5959
}>
6060

6161
// CHECK-LABEL: func private @sparse_coo(
62-
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu_no", "singleton_no" ] }>>)
62+
// CHECK-SAME: tensor<?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique, nonordered), d1 : singleton(nonordered)) }>>)
6363
func.func private @sparse_coo(tensor<?x?xf32, #COO>)
6464

6565
// -----
@@ -69,7 +69,7 @@ func.func private @sparse_coo(tensor<?x?xf32, #COO>)
6969
}>
7070

7171
// CHECK-LABEL: func private @sparse_bcoo(
72-
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "loose_compressed_nu", "singleton" ] }>>)
72+
// CHECK-SAME: tensor<?x?x?xf32, #sparse_tensor.encoding<{ map = (d0, d1, d2) -> (d0 : dense, d1 : loose_compressed(nonunique), d2 : singleton) }>>)
7373
func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
7474

7575
// -----
@@ -79,7 +79,7 @@ func.func private @sparse_bcoo(tensor<?x?x?xf32, #BCOO>)
7979
}>
8080

8181
// CHECK-LABEL: func private @sparse_sorted_coo(
82-
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed_nu", "singleton" ] }>>)
82+
// CHECK-SAME: tensor<10x10xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton) }>>)
8383
func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
8484

8585
// -----
@@ -94,7 +94,7 @@ func.func private @sparse_sorted_coo(tensor<10x10xf64, #SortedCOO>)
9494
}>
9595

9696
// CHECK-LABEL: func private @sparse_bcsr(
97-
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
97+
// CHECK-SAME: tensor<10x60xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
9898
func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
9999

100100

@@ -105,7 +105,7 @@ func.func private @sparse_bcsr(tensor<10x60xf64, #BCSR>)
105105
}>
106106

107107
// CHECK-LABEL: func private @sparse_ell(
108-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed" ], dimToLvl = affine_map<(d0, d1)[s0] -> (d0 * (s0 * 4), d0, d1)> }>>
108+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = [s0](d0, d1) -> (d0 * (s0 * 4) : dense, d0 : dense, d1 : compressed) }>>
109109
func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
110110

111111
// -----
@@ -115,7 +115,7 @@ func.func private @sparse_ell(tensor<?x?xf64, #ELL>)
115115
}>
116116

117117
// CHECK-LABEL: func private @sparse_slice(
118-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, 4, 1), (1, 4, 2) ] }>>
118+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, 4, 1)>, d1 : #sparse_tensor<slice(1, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
119119
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
120120

121121
// -----
@@ -125,7 +125,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
125125
}>
126126

127127
// CHECK-LABEL: func private @sparse_slice(
128-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed" ], dimSlices = [ (1, ?, 1), (?, 4, 2) ] }>>
128+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0 : #sparse_tensor<slice(1, ?, 1)>, d1 : #sparse_tensor<slice(?, 4, 2)>) -> (d0 : dense, d1 : compressed) }>>
129129
func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
130130

131131
// -----
@@ -138,7 +138,7 @@ func.func private @sparse_slice(tensor<?x?xf64, #CSR_SLICE>)
138138
}>
139139

140140
// CHECK-LABEL: func private @sparse_2_out_of_4(
141-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "compressed24" ] }>>
141+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 : block2_4) }>>
142142
func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
143143

144144
// -----
@@ -153,7 +153,7 @@ func.func private @sparse_2_out_of_4(tensor<?x?xf64, #NV_24>)
153153
}>
154154

155155
// CHECK-LABEL: func private @BCSR(
156-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
156+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
157157
func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
158158
return
159159
}
@@ -174,7 +174,7 @@ func.func private @BCSR(%arg0: tensor<?x?xf64, #BCSR>) {
174174
}>
175175

176176
// CHECK-LABEL: func private @BCSR_explicit(
177-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed", "dense", "dense" ], dimToLvl = affine_map<(d0, d1) -> (d0 floordiv 2, d1 floordiv 3, d0 mod 2, d1 mod 3)> }>>
177+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 floordiv 2 : compressed, d1 floordiv 3 : compressed, d0 mod 2 : dense, d1 mod 3 : dense) }>>
178178
func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
179179
return
180180
}
@@ -190,7 +190,7 @@ func.func private @BCSR_explicit(%arg0: tensor<?x?xf64, #BCSR_explicit>) {
190190
}>
191191

192192
// CHECK-LABEL: func private @NV_24(
193-
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ lvlTypes = [ "dense", "dense", "compressed24" ], dimToLvl = affine_map<(d0, d1) -> (d0, d1 floordiv 4, d1 mod 4)> }>>
193+
// CHECK-SAME: tensor<?x?xf64, #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : dense, d1 floordiv 4 : dense, d1 mod 4 : block2_4) }>>
194194
func.func private @NV_24(%arg0: tensor<?x?xf64, #NV_24>) {
195195
return
196196
}

mlir/test/Dialect/SparseTensor/sparse_reshape.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
// CHECK-ROUND: return %[[E]] : tensor<10x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
1717
//
1818
// CHECK-LABEL: func.func @sparse_expand(
19-
// CHECK-SAME: %[[S:.*]]:
19+
// CHECK-SAME: %[[S:.*0]]:
2020
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
2121
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
2222
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
@@ -53,7 +53,7 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
5353
// CHECK-ROUND: return %[[C]] : tensor<100xf64, #sparse_tensor.encoding<{{{.*}}}>>
5454
//
5555
// CHECK-LABEL: func.func @sparse_collapse(
56-
// CHECK-SAME: %[[S:.*]]:
56+
// CHECK-SAME: %[[S:.*0]]:
5757
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
5858
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
5959
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
@@ -99,7 +99,7 @@ func.func @sparse_collapse(%arg0: tensor<10x10xf64, #SparseMatrix>) -> tensor<10
9999
// CHECK-ROUND: return %[[E]] : tensor<?x10xf64, #sparse_tensor.encoding<{{{.*}}}>>
100100
//
101101
// CHECK-LABEL: func.func @dynamic_sparse_expand(
102-
// CHECK-SAME: %[[S:.*]]:
102+
// CHECK-SAME: %[[S:.*0]]:
103103
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
104104
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
105105
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index
@@ -142,7 +142,7 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>) -> tensor<
142142
// CHECK-ROUND: return %[[C]] : tensor<?xf64, #sparse_tensor.encoding<{{{.*}}}>>
143143
//
144144
// CHECK-LABEL: func.func @dynamic_sparse_collapse(
145-
// CHECK-SAME: %[[S:.*]]:
145+
// CHECK-SAME: %[[S:.*0]]:
146146
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
147147
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index
148148
// CHECK-DAG: %[[C1:.*]] = arith.constant 1 : index

mlir/test/Dialect/SparseTensor/sparse_tensor_reshape.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
#SparseMatrix = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
55

66
// CHECK: func.func @sparse_reshape(
7-
// CHECK-SAME: %[[S:.*]]:
7+
// CHECK-SAME: %[[S:.*0]]:
88
// CHECK-DAG: %[[C25:.*]] = arith.constant 25 : index
99
// CHECK-DAG: %[[C10:.*]] = arith.constant 10 : index
1010
// CHECK-DAG: %[[C0:.*]] = arith.constant 0 : index

0 commit comments

Comments
 (0)