Skip to content

Commit c43e627

Browse files
authored
Changed the phrase sparse-compiler to sparsifier in comments (#71578)
When the Powers That Be decided that the name "sparse compiler" should be changed to "sparsifier", we negected to change some of the comments in the code; this pull request completes the name change.
1 parent cc9ad72 commit c43e627

15 files changed

+32
-32
lines changed

mlir/lib/Dialect/Linalg/Transforms/ElementwiseOpFusion.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1816,7 +1816,7 @@ struct RemoveOutsDependency : public OpRewritePattern<GenericOp> {
18161816
if (!operandType)
18171817
continue;
18181818

1819-
// If outs is sparse, leave it to the sparse compiler.
1819+
// If outs is sparse, leave it to the sparsifier.
18201820
if (sparse_tensor::getSparseTensorEncoding(operandVal.getType()))
18211821
continue;
18221822

mlir/lib/Dialect/SparseTensor/Transforms/BufferizableOpInterfaceImpl.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ struct SparseBufferizableOpInterfaceExternalModel
3232
LogicalResult bufferize(Operation *op, RewriterBase &rewriter,
3333
const BufferizationOptions &options) const {
3434
return op->emitError(
35-
"sparse_tensor ops must be bufferized with the sparse compiler");
35+
"sparse_tensor ops must be bufferized with the sparsifier");
3636
}
3737
};
3838

mlir/lib/Dialect/SparseTensor/Transforms/SparseGPUCodegen.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
//
77
//===----------------------------------------------------------------------===//
88
//
9-
// This is a prototype GPU codegenerator for the sparse compiler.
9+
// This is a prototype GPU codegenerator for the sparsifier.
1010
// The objective is to eventually use the right combination of
1111
// direct code generation and libary calls into vendor-specific
1212
// highly optimized sparse libraries (e.g. cuSparse for CUDA).
@@ -1227,7 +1227,7 @@ rewriteSDDMM(PatternRewriter &rewriter, linalg::GenericOp op, bool enableRT,
12271227
//===----------------------------------------------------------------------===//
12281228

12291229
/// Proof-of-concept rewriter. This rule generates a GPU implementation
1230-
/// for each outermost forall loop generated by the sparse compiler.
1230+
/// for each outermost forall loop generated by the sparsifier.
12311231
/// TODO: right now works with parallelization-strategy=dense-outer-loop
12321232
/// but give this its own flags in the future
12331233
struct ForallRewriter : public OpRewritePattern<scf::ParallelOp> {
@@ -1239,7 +1239,7 @@ struct ForallRewriter : public OpRewritePattern<scf::ParallelOp> {
12391239
LogicalResult matchAndRewrite(scf::ParallelOp forallOp,
12401240
PatternRewriter &rewriter) const override {
12411241
// Reject inadmissible loop form.
1242-
// Essentially only accept a loop, generated by the sparse compiler,
1242+
// Essentially only accept a loop, generated by the sparsifier,
12431243
// of the form
12441244
// forall (i = 0; i < N; i++)
12451245
// so that cyclic scheduling over the threads is easy.

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorDescriptor.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ SparseTensorTypeToBufferConverter::SparseTensorTypeToBufferConverter() {
6565
if (!getSparseTensorEncoding(tp))
6666
// Not a sparse tensor.
6767
return std::nullopt;
68-
// Sparse compiler knows how to cancel out these casts.
68+
// Sparsifier knows how to cancel out these casts.
6969
return genTuple(builder, loc, tp, inputs);
7070
});
7171
}

mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorRewriting.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -393,8 +393,8 @@ struct FuseTensorCast : public OpRewritePattern<tensor::CastOp> {
393393
};
394394

395395
/// Rewrites a sequence of operations for sparse tensor selections in to
396-
/// semi-ring operations such that they can be compiled correctly by the sparse
397-
/// compiler. E.g., transforming the following sequence
396+
/// semi-ring operations such that they can be compiled correctly by the
397+
/// sparsifier. E.g., transforming the following sequence
398398
///
399399
/// %sel = arith.select %cond, %sp1, %sp2
400400
///

mlir/lib/Dialect/SparseTensor/Transforms/SparseVectorization.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@
66
//
77
//===----------------------------------------------------------------------===//
88
//
9-
// A pass that converts loops generated by the sparse compiler into a form that
9+
// A pass that converts loops generated by the sparsifier into a form that
1010
// can exploit SIMD instructions of the target architecture. Note that this pass
11-
// ensures the sparse compiler can generate efficient SIMD (including ArmSVE
11+
// ensures the sparsifier can generate efficient SIMD (including ArmSVE
1212
// support) with proper separation of concerns as far as sparsification and
1313
// vectorization is concerned. However, this pass is not the final abstraction
1414
// level we want, and not the general vectorizer we want either. It forms a good
@@ -105,7 +105,7 @@ static Value genVectorInvariantValue(PatternRewriter &rewriter, VL vl,
105105

106106
/// Generates a vectorized load lhs = a[ind[lo:hi]] or lhs = a[lo:hi],
107107
/// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note
108-
/// that the sparse compiler can only generate indirect loads in
108+
/// that the sparsifier can only generate indirect loads in
109109
/// the last index, i.e. back().
110110
static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
111111
Value mem, ArrayRef<Value> idxs, Value vmask) {
@@ -124,7 +124,7 @@ static Value genVectorLoad(PatternRewriter &rewriter, Location loc, VL vl,
124124

125125
/// Generates a vectorized store a[ind[lo:hi]] = rhs or a[lo:hi] = rhs
126126
/// where 'lo' denotes the current index and 'hi = lo + vl - 1'. Note
127-
/// that the sparse compiler can only generate indirect stores in
127+
/// that the sparsifier can only generate indirect stores in
128128
/// the last index, i.e. back().
129129
static void genVectorStore(PatternRewriter &rewriter, Location loc, Value mem,
130130
ArrayRef<Value> idxs, Value vmask, Value rhs) {
@@ -219,8 +219,8 @@ static Value genVectorReducInit(PatternRewriter &rewriter, Location loc,
219219
/// The first call (!codegen) does the analysis. Then, on success, the second
220220
/// call (codegen) yields the proper vector form in the output parameter
221221
/// vector 'idxs'. This mechanism ensures that analysis and rewriting code
222-
/// stay in sync. Note that the analyis part is simple because the sparse
223-
/// compiler only generates relatively simple subscript expressions.
222+
/// stay in sync. Note that the analyis part is simple because the sparsifier
223+
/// only generates relatively simple subscript expressions.
224224
///
225225
/// See https://llvm.org/docs/GetElementPtr.html for some background on
226226
/// the complications described below.
@@ -359,7 +359,7 @@ static bool vectorizeSubscripts(PatternRewriter &rewriter, scf::ForOp forOp,
359359
/// The first call (!codegen) does the analysis. Then, on success, the second
360360
/// call (codegen) yields the proper vector form in the output parameter 'vexp'.
361361
/// This mechanism ensures that analysis and rewriting code stay in sync. Note
362-
/// that the analyis part is simple because the sparse compiler only generates
362+
/// that the analyis part is simple because the sparsifier only generates
363363
/// relatively simple expressions inside the for-loops.
364364
static bool vectorizeExpr(PatternRewriter &rewriter, scf::ForOp forOp, VL vl,
365365
Value exp, bool codegen, Value vmask, Value &vexp) {
@@ -616,7 +616,7 @@ struct ForOpRewriter : public OpRewritePattern<scf::ForOp> {
616616
LogicalResult matchAndRewrite(scf::ForOp op,
617617
PatternRewriter &rewriter) const override {
618618
// Check for single block, unit-stride for-loop that is generated by
619-
// sparse compiler, which means no data dependence analysis is required,
619+
// sparsifier, which means no data dependence analysis is required,
620620
// and its loop-body is very restricted in form.
621621
if (!op.getRegion().hasOneBlock() || !isConstantIntValue(op.getStep(), 1) ||
622622
!op->hasAttr(LoopEmitter::getLoopEmitterLoopAttrName()))

mlir/lib/Dialect/SparseTensor/Transforms/Sparsification.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ struct AffineDimCollector : public AffineExprVisitor<AffineDimCollector> {
124124
} // namespace
125125

126126
//===----------------------------------------------------------------------===//
127-
// Sparse compiler analysis methods.
127+
// Sparsifier analysis methods.
128128
//===----------------------------------------------------------------------===//
129129

130130
// TODO: the "idx"-vs-"ldx" naming convention is not self-explanatory,
@@ -840,7 +840,7 @@ static bool computeIterationGraph(CodegenEnv &env, SortMask mask,
840840
}
841841

842842
//===----------------------------------------------------------------------===//
843-
// Sparse compiler synthesis methods (statements and expressions).
843+
// Sparsifier synthesis methods (statements and expressions).
844844
//===----------------------------------------------------------------------===//
845845

846846
/// Local bufferization of all dense and sparse data structures.
@@ -1139,7 +1139,7 @@ inline static Value genInvariantValue(CodegenEnv &env, ExprId exp) {
11391139
return env.exp(exp).val;
11401140
}
11411141

1142-
/// Semi-ring branches are simply inlined by the sparse compiler. Prior
1142+
/// Semi-ring branches are simply inlined by the sparsifier. Prior
11431143
/// analysis has verified that all computations are "local" to the inlined
11441144
/// branch or otherwise invariantly defined outside the loop nest, with the
11451145
/// exception of index computations, which need to be relinked to actual
@@ -1562,7 +1562,7 @@ static void endIf(CodegenEnv &env, OpBuilder &builder, scf::IfOp ifOp,
15621562
}
15631563

15641564
//===----------------------------------------------------------------------===//
1565-
// Sparse compiler synthesis methods (loop sequence).
1565+
// Sparsifier synthesis methods (loop sequence).
15661566
//===----------------------------------------------------------------------===//
15671567

15681568
/// Starts a loop sequence at given level. Returns true if
@@ -1926,7 +1926,7 @@ static void genResult(CodegenEnv &env, RewriterBase &rewriter) {
19261926
}
19271927

19281928
//===----------------------------------------------------------------------===//
1929-
// Sparse compiler rewriting methods.
1929+
// Sparsifier rewriting methods.
19301930
//===----------------------------------------------------------------------===//
19311931

19321932
namespace {

mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1219,7 +1219,7 @@ Type Merger::inferType(ExprId e, Value src) const {
12191219
return dtp;
12201220
}
12211221

1222-
/// Ensures that sparse compiler can generate code for expression.
1222+
/// Ensures that the sparsifier can generate code for expression.
12231223
static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) {
12241224
// Arguments are always admissible.
12251225
if (isa<BlockArgument>(v))
@@ -1239,7 +1239,7 @@ static bool isAdmissibleBranchExp(Operation *op, Block *block, Value v) {
12391239
return true;
12401240
}
12411241

1242-
/// Ensures that sparse compiler can generate code for branch.
1242+
/// Ensures that the sparsifier can generate code for branch.
12431243
static bool isAdmissibleBranch(Operation *op, Region &region) {
12441244
if (region.empty())
12451245
return true;

mlir/test/Dialect/SparseTensor/one_shot_bufferize_invalid.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
}>
66

77
func.func @sparse_tensor_op(%arg0: tensor<64xf32, #SparseVector>) -> tensor<64xf32, #SparseVector> {
8-
// expected-error @below{{sparse_tensor ops must be bufferized with the sparse compiler}}
8+
// expected-error @below{{sparse_tensor ops must be bufferized with the sparsifier}}
99
// expected-error @below{{failed to bufferize op}}
1010
%0 = sparse_tensor.convert %arg0 : tensor<64xf32, #SparseVector> to tensor<64xf32, #SparseVector>
1111
return %0 : tensor<64xf32, #SparseVector>

mlir/test/Dialect/SparseTensor/rejected.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// RUN: mlir-opt %s -sparsification | FileCheck %s
22

33

4-
// The file contains examples that will be rejected by sparse compiler
4+
// The file contains examples that will be rejected by sparsifier
55
// (we expect the linalg.generic unchanged).
66
#SparseVector = #sparse_tensor.encoding<{map = (d0) -> (d0 : compressed)}>
77

@@ -29,7 +29,7 @@ func.func @sparse_reduction_subi(%argx: tensor<i32>,
2929
ins(%arga: tensor<?xi32, #SparseVector>)
3030
outs(%argx: tensor<i32>) {
3131
^bb(%a: i32, %x: i32):
32-
// NOTE: `subi %a, %x` is the reason why the program is rejected by the sparse compiler.
32+
// NOTE: `subi %a, %x` is the reason why the program is rejected by the sparsifier.
3333
// It is because we do not allow `-outTensor` in reduction loops as it creates cyclic
3434
// dependences.
3535
%t = arith.subi %a, %x: i32

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_quantized_matmul.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
#DCSR = #sparse_tensor.encoding<{ map = (d0, d1) -> (d0 : compressed, d1 : compressed) }>
3434

3535
// An example of a quantized sparse matmul. With the zero offset for the
36-
// sparse input, the sparse compiler generates very efficient code for the
36+
// sparse input, the sparsifier generates very efficient code for the
3737
// x(i,j) += (ext(a(i,k)) - 2) * ext(b(k,j))
3838
// operation.
3939
module {

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_reductions_min.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ module {
6363

6464
// Regular MIN reduction: stored i32 elements AND implicit zeros.
6565
// Note that dealing with the implicit zeros is taken care of
66-
// by the sparse compiler to preserve semantics of the "original".
66+
// by the sparsifier to preserve semantics of the "original".
6767
func.func @min2(%arga: tensor<32xi32, #SV>, %argx: tensor<i32>) -> tensor<i32> {
6868
%c = tensor.extract %argx[] : tensor<i32>
6969
%0 = linalg.generic #trait_reduction

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_storage.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@
6868
module {
6969
//
7070
// Main driver that initializes a sparse tensor and inspects the sparse
71-
// storage schemes in detail. Note that users of the MLIR sparse compiler
71+
// storage schemes in detail. Note that users of the MLIR sparsifier
7272
// are typically not concerned with such details, but the test ensures
7373
// everything is working "under the hood".
7474
//

mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_unary.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ module {
7474
}
7575

7676
// Invert the structure of a sparse vector, where missing values are
77-
// filled with 1. For a dense output, the sparse compiler initializes
77+
// filled with 1. For a dense output, the sparsifier initializes
7878
// the buffer to all zero at all other places.
7979
func.func @vector_complement_dense(%arga: tensor<?xf64, #SparseVector>) -> tensor<?xi32> {
8080
%c = arith.constant 0 : index

mlir/test/Integration/Dialect/SparseTensor/python/tools/sparse_compiler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
# See https://llvm.org/LICENSE.txt for license information.
33
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
44

5-
# This file contains the sparse compiler class.
5+
# This file contains the SparseCompiler class.
66

77
from mlir import execution_engine
88
from mlir import ir
@@ -24,7 +24,7 @@ def __call__(self, module: ir.Module):
2424
self.compile(module)
2525

2626
def compile(self, module: ir.Module):
27-
"""Compiles the module by invoking the sparse compiler pipeline."""
27+
"""Compiles the module by invoking the sparsifier pipeline."""
2828
passmanager.PassManager.parse(self.pipeline).run(module.operation)
2929

3030
def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:

0 commit comments

Comments
 (0)