Skip to content

Commit 91464e1

Browse files
[mlir][bufferization][NFC] Rename copy_tensor op to materialize_in_destination (#65467)
The previous name was badly chosen. The op is used to ensure that a computation materializes in the future buffer of a certain tensor.
1 parent b4c66f4 commit 91464e1

File tree

10 files changed

+92
-75
lines changed

10 files changed

+92
-75
lines changed

mlir/include/mlir/Dialect/Bufferization/IR/BufferizationOps.td

Lines changed: 21 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -209,22 +209,33 @@ def Bufferization_CloneOp : Bufferization_Op<"clone", [
209209
}
210210

211211
//===----------------------------------------------------------------------===//
212-
// CopyTensorOp
212+
// MaterializeInDestinationOp
213213
//===----------------------------------------------------------------------===//
214214

215-
def Bufferization_CopyTensorOp : Bufferization_Op<"copy_tensor",
216-
[BufferizableOpInterface, SameOperandsAndResultType,
217-
DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
215+
def Bufferization_MaterializeInDestinationOp
216+
: Bufferization_Op<"materialize_in_destination",
217+
[BufferizableOpInterface, SameOperandsAndResultType,
218+
DeclareOpInterfaceMethods<ReifyRankedShapedTypeOpInterface>]> {
218219
let summary = "copy a tensor";
219220

220221
let description = [{
221-
Copy the contents of the source tensor into the destination tensor. This
222-
operation is guaranteed to bufferize to a memory copy.
222+
This op indicates that the data of the `source` tensor should materialize
223+
in the future buffer of the `dest` tensors. Both tensors must have the same
224+
shape and element type at runtime.
225+
226+
By default, this op bufferizes to a memcpy from the future buffer of the
227+
`source` tensor to the future buffer of the `dest` tensor. However,
228+
transformations such as "empty tensor elimination" may rewrite IR such that
229+
a computation is performed directly in the future buffer of the `dest`
230+
tensor and no memcpy is needed.
231+
232+
Note: "tensor.insert_slice" could be used for the same purpose, but since
233+
tensor dialect ops only indicate *what* should be computed but not *where*,
234+
it could fold away, causing the computation to materialize in a different
235+
buffer.
223236
}];
224237

225-
let arguments = (ins AnyTensor:$source,
226-
AnyTensor:$dest);
227-
238+
let arguments = (ins AnyTensor:$source, AnyTensor:$dest);
228239
let results = (outs AnyTensor:$result);
229240

230241
let extraClassDeclaration = [{
@@ -245,7 +256,7 @@ def Bufferization_CopyTensorOp : Bufferization_Op<"copy_tensor",
245256
}
246257
}];
247258

248-
let assemblyFormat = "$source `,` $dest attr-dict `:` type($source)";
259+
let assemblyFormat = "$source `in` $dest attr-dict `:` type($source)";
249260
}
250261

251262
//===----------------------------------------------------------------------===//

mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -941,7 +941,7 @@ def PadOp : Op<Transform_Dialect, "structured.pad",
941941
the original destination tensor of the targeted op. The op that copies back
942942
the result can be customized with `copy_back_op`:
943943

944-
* "bufferization.copy_tensor" (default)
944+
* "bufferization.materialize_in_destination" (default)
945945
* "linalg.copy"
946946
* "none" (no copy back)
947947

@@ -966,7 +966,7 @@ def PadOp : Op<Transform_Dialect, "structured.pad",
966966
DefaultValuedAttr<
967967
TypedArrayAttrBase<I64ArrayAttr, "array of arrays of i64">,
968968
"{}">:$transpose_paddings,
969-
DefaultValuedAttr<StrAttr, "::mlir::bufferization::CopyTensorOp::getOperationName()">:$copy_back_op);
969+
DefaultValuedAttr<StrAttr, "::mlir::bufferization::MaterializeInDestinationOp::getOperationName()">:$copy_back_op);
970970
let results = (outs TransformHandleTypeInterface:$padded,
971971
TransformHandleTypeInterface:$pad,
972972
TransformHandleTypeInterface:$copy);
@@ -986,7 +986,7 @@ def PadOp : Op<Transform_Dialect, "structured.pad",
986986
CArg<"ArrayRef<int64_t>", "{}">:$padToMultipleOf,
987987
CArg<"ArrayRef<int64_t>", "{}">:$packPaddings,
988988
CArg<"ArrayRef<Attribute>", "{}">:$transposePaddings,
989-
CArg<"StringRef", "::mlir::bufferization::CopyTensorOp::getOperationName()">:$copyBackOp)>
989+
CArg<"StringRef", "::mlir::bufferization::MaterializeInDestinationOp::getOperationName()">:$copyBackOp)>
990990
];
991991

992992
let extraClassDeclaration = [{

mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -299,12 +299,12 @@ struct LinalgPaddingOptions {
299299
}
300300
enum class CopyBackOp : int8_t {
301301
None = 0,
302-
BufferizationCopyTensor = 1,
302+
BufferizationMaterializeInDestination = 1,
303303
LinalgCopy = 2
304304
};
305305
/// The op to be used for copying the padded result to the original
306306
/// destination tensor.
307-
CopyBackOp copyBackOp = CopyBackOp::BufferizationCopyTensor;
307+
CopyBackOp copyBackOp = CopyBackOp::BufferizationMaterializeInDestination;
308308
LinalgPaddingOptions &setCopyBackOp(CopyBackOp op) {
309309
copyBackOp = op;
310310
return *this;

mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp

Lines changed: 44 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -441,48 +441,6 @@ Value AllocTensorOp::getDynamicSize(OpBuilder &b, unsigned idx) {
441441
return getOperand(getIndexOfDynamicSize(idx));
442442
}
443443

444-
//===----------------------------------------------------------------------===//
445-
// CopyTensorOp
446-
//===----------------------------------------------------------------------===//
447-
448-
bool CopyTensorOp::bufferizesToMemoryRead(OpOperand &opOperand,
449-
const AnalysisState &state) {
450-
if (&opOperand == &getOperation()->getOpOperand(0) /*source*/)
451-
return true;
452-
return false;
453-
}
454-
455-
bool CopyTensorOp::bufferizesToMemoryWrite(OpOperand &opOperand,
456-
const AnalysisState &state) {
457-
if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
458-
return true;
459-
return false;
460-
}
461-
462-
AliasingValueList CopyTensorOp::getAliasingValues(OpOperand &opOperand,
463-
const AnalysisState &state) {
464-
if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
465-
return {{getOperation()->getResult(0), BufferRelation::Equivalent}};
466-
return {};
467-
}
468-
469-
LogicalResult CopyTensorOp::bufferize(RewriterBase &rewriter,
470-
const BufferizationOptions &options) {
471-
FailureOr<Value> buffer = getBuffer(rewriter, getDest(), options);
472-
if (failed(buffer))
473-
return failure();
474-
rewriter.create<memref::TensorStoreOp>(getLoc(), getSource(), *buffer);
475-
replaceOpWithBufferizedValues(rewriter, getOperation(), *buffer);
476-
return success();
477-
}
478-
479-
LogicalResult CopyTensorOp::reifyResultShapes(
480-
OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
481-
reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
482-
reifiedReturnShapes[0] = tensor::getMixedSizes(builder, getLoc(), getDest());
483-
return success();
484-
}
485-
486444
//===----------------------------------------------------------------------===//
487445
// CloneOp
488446
//===----------------------------------------------------------------------===//
@@ -585,6 +543,50 @@ LogicalResult DeallocTensorOp::bufferize(RewriterBase &rewriter,
585543
return success();
586544
}
587545

546+
//===----------------------------------------------------------------------===//
547+
// MaterializeInDestinationOp
548+
//===----------------------------------------------------------------------===//
549+
550+
bool MaterializeInDestinationOp::bufferizesToMemoryRead(
551+
OpOperand &opOperand, const AnalysisState &state) {
552+
if (&opOperand == &getOperation()->getOpOperand(0) /*source*/)
553+
return true;
554+
return false;
555+
}
556+
557+
bool MaterializeInDestinationOp::bufferizesToMemoryWrite(
558+
OpOperand &opOperand, const AnalysisState &state) {
559+
if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
560+
return true;
561+
return false;
562+
}
563+
564+
AliasingValueList
565+
MaterializeInDestinationOp::getAliasingValues(OpOperand &opOperand,
566+
const AnalysisState &state) {
567+
if (&opOperand == &getOperation()->getOpOperand(1) /*dest*/)
568+
return {{getOperation()->getResult(0), BufferRelation::Equivalent}};
569+
return {};
570+
}
571+
572+
LogicalResult
573+
MaterializeInDestinationOp::bufferize(RewriterBase &rewriter,
574+
const BufferizationOptions &options) {
575+
FailureOr<Value> buffer = getBuffer(rewriter, getDest(), options);
576+
if (failed(buffer))
577+
return failure();
578+
rewriter.create<memref::TensorStoreOp>(getLoc(), getSource(), *buffer);
579+
replaceOpWithBufferizedValues(rewriter, getOperation(), *buffer);
580+
return success();
581+
}
582+
583+
LogicalResult MaterializeInDestinationOp::reifyResultShapes(
584+
OpBuilder &builder, ReifiedRankedShapedTypeDims &reifiedReturnShapes) {
585+
reifiedReturnShapes.resize(1, SmallVector<OpFoldResult>(getType().getRank()));
586+
reifiedReturnShapes[0] = tensor::getMixedSizes(builder, getLoc(), getDest());
587+
return success();
588+
}
589+
588590
//===----------------------------------------------------------------------===//
589591
// ToTensorOp
590592
//===----------------------------------------------------------------------===//

mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1683,9 +1683,10 @@ transform::PadOp::apply(transform::TransformRewriter &rewriter,
16831683
options.padToMultipleOf = padToMultipleOf;
16841684
options.paddingValues = paddingValues;
16851685
options.packPaddings = packPaddings;
1686-
if (getCopyBackOp() == bufferization::CopyTensorOp::getOperationName()) {
1687-
options.copyBackOp =
1688-
LinalgPaddingOptions::CopyBackOp::BufferizationCopyTensor;
1686+
if (getCopyBackOp() ==
1687+
bufferization::MaterializeInDestinationOp::getOperationName()) {
1688+
options.copyBackOp = LinalgPaddingOptions::CopyBackOp::
1689+
BufferizationMaterializeInDestination;
16891690
} else if (getCopyBackOp() == linalg::CopyOp::getOperationName()) {
16901691
options.copyBackOp = LinalgPaddingOptions::CopyBackOp::LinalgCopy;
16911692
} else if (getCopyBackOp() == kCopyOpNone) {
@@ -1761,7 +1762,8 @@ LogicalResult transform::PadOp::verify() {
17611762
<< attr;
17621763
}
17631764
}
1764-
if (getCopyBackOp() != bufferization::CopyTensorOp::getOperationName() &&
1765+
if (getCopyBackOp() !=
1766+
bufferization::MaterializeInDestinationOp::getOperationName() &&
17651767
getCopyBackOp() != linalg::CopyOp::getOperationName() &&
17661768
getCopyBackOp() != kCopyOpNone)
17671769
return emitOpError() << "invalid copy_back_op";

mlir/lib/Dialect/Linalg/Transforms/Padding.cpp

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,9 +245,11 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, LinalgOp opToPad,
245245
std::get<1>(it)->get())
246246
.getResult(0));
247247
} else if (options.copyBackOp ==
248-
LinalgPaddingOptions::CopyBackOp::BufferizationCopyTensor) {
249-
replacements.push_back(rewriter.create<bufferization::CopyTensorOp>(
250-
loc, std::get<0>(it), std::get<1>(it)->get()));
248+
LinalgPaddingOptions::CopyBackOp::
249+
BufferizationMaterializeInDestination) {
250+
replacements.push_back(
251+
rewriter.create<bufferization::MaterializeInDestinationOp>(
252+
loc, std::get<0>(it), std::get<1>(it)->get()));
251253
} else {
252254
llvm_unreachable("unsupported copy back op");
253255
}

mlir/test/Dialect/Bufferization/Transforms/one-shot-bufferize.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,6 @@ func.func @tensor_copy(%arg0: tensor<5xf32>) -> tensor<5xf32> {
224224
// CHECK: memref.dealloc %[[alloc]]
225225
// CHECK: return %[[r]]
226226
%dest = bufferization.alloc_tensor() : tensor<5xf32>
227-
%0 = bufferization.copy_tensor %arg0, %dest : tensor<5xf32>
227+
%0 = bufferization.materialize_in_destination %arg0 in %dest : tensor<5xf32>
228228
return %0 : tensor<5xf32>
229229
}

mlir/test/Dialect/Bufferization/invalid.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -99,9 +99,9 @@ func.func @invalid_writable_on_op() {
9999
// -----
100100

101101
// expected-note @below{{prior use here}}
102-
func.func @invalid_tensor_copy(%arg0: tensor<?xf32>, %arg1: tensor<5xf32>) {
102+
func.func @invalid_materialize_in_destination(%arg0: tensor<?xf32>, %arg1: tensor<5xf32>) {
103103
// expected-error @below{{expects different type than prior uses: 'tensor<?xf32>' vs 'tensor<5xf32>'}}
104-
bufferization.copy_tensor %arg0, %arg1 : tensor<?xf32>
104+
bufferization.materialize_in_destination %arg0 in %arg1 : tensor<?xf32>
105105
}
106106

107107
// -----

mlir/test/Dialect/Bufferization/ops.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,11 +58,11 @@ func.func @test_dealloc_tensor_op(%arg0: tensor<4xi32>) {
5858
return
5959
}
6060

61-
// CHECK-LABEL: func @test_copy_tensor_op
62-
func.func @test_copy_tensor_op(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>)
61+
// CHECK-LABEL: func @test_materialize_in_destination_op
62+
func.func @test_materialize_in_destination_op(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>)
6363
-> tensor<?xf32> {
64-
// CHECK: bufferization.copy_tensor {{.*}} : tensor<?xf32>
65-
%1 = bufferization.copy_tensor %arg0, %arg1 : tensor<?xf32>
64+
// CHECK: bufferization.materialize_in_destination {{.*}} : tensor<?xf32>
65+
%1 = bufferization.materialize_in_destination %arg0 in %arg1 : tensor<?xf32>
6666
return %1 : tensor<?xf32>
6767
}
6868

mlir/test/Dialect/Linalg/transform-op-pad.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ func.func @static_sizes_output_divisible(%arg0: tensor<24x12xf32>,
2727
// CHECK-SAME: outs(%[[T2]] : tensor<4x5xf32>)
2828

2929
// CHECK: %[[T6:.*]] = tensor.extract_slice %[[T5]]
30-
// CHECK: %[[T7:.*]] = bufferization.copy_tensor %[[T6]], %[[T2]]
30+
// CHECK: %[[T7:.*]] = bufferization.materialize_in_destination %[[T6]] in %[[T2]]
3131
%4 = linalg.matmul ins(%1, %2 : tensor<4x?xf32>, tensor<?x5xf32>) outs(%3 : tensor<4x5xf32>) -> tensor<4x5xf32>
3232
%5 = tensor.insert_slice %4 into %arg2[%iv0, %iv1] [4, 5] [1, 1] : tensor<4x5xf32> into tensor<24x25xf32>
3333
func.return %5 : tensor<24x25xf32>
@@ -40,9 +40,9 @@ transform.sequence failures(propagate) {
4040
padding_values=[0.0 : f32, 0.0 : f32, 0.0 : f32],
4141
padding_dimensions=[0, 1, 2],
4242
pack_paddings=[1, 1, 0]
43-
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.op<"bufferization.copy_tensor">)
43+
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.op<"bufferization.materialize_in_destination">)
4444
// expected-remark @below {{1}}
45-
test_print_number_of_associated_payload_ir_ops %copy_back : !transform.op<"bufferization.copy_tensor">
45+
test_print_number_of_associated_payload_ir_ops %copy_back : !transform.op<"bufferization.materialize_in_destination">
4646
}
4747

4848
// -----
@@ -272,7 +272,7 @@ func.func @pack_everything(%arg0: tensor<24x12xf32>,
272272
// CHECK: %[[T6:.*]] = tensor.extract_slice %[[T5]]
273273
// Copy back result to the original buffer, so that the destination of the
274274
// computation does not change.
275-
// CHECK: %[[T7:.*]] = bufferization.copy_tensor %[[T6]], %[[T2]]
275+
// CHECK: %[[T7:.*]] = bufferization.materialize_in_destination %[[T6]] in %[[T2]]
276276
%4 = linalg.matmul ins(%1, %2 : tensor<4x?xf32>, tensor<?x5xf32>) outs(%3 : tensor<4x5xf32>) -> tensor<4x5xf32>
277277

278278
// CHECK: %[[T8:.*]] = tensor.insert_slice %[[T7]] into %{{.*}}

0 commit comments

Comments
 (0)