-
Notifications
You must be signed in to change notification settings - Fork 13.6k
[mlir][nfc] Update 2 tests for PadOpVectorizationWithTransferWritePattern #122721
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[mlir][nfc] Update 2 tests for PadOpVectorizationWithTransferWritePattern #122721
Conversation
@llvm/pr-subscribers-mlir-linalg Author: Andrzej Warzyński (banach-space) Changes
This is essentially a small clean-up in preparation for upcoming changes. (*) Full diff: https://github.com/llvm/llvm-project/pull/122721.diff 1 Files Affected:
diff --git a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
index 41e480648177f5..08a3bbbb301c87 100644
--- a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
@@ -114,6 +114,74 @@ module attributes {transform.with_named_sequence} {
}
}
+// -----
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// Negative test - low pad is non-zero
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_low_pad(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
+
+// -----
+
+// Negative test - TransferWriteOp result is not _directly_ consumed by an
+// ExtractSliceOp (noet the non-zero offset).
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_offset(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 0] high[5, 7] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
// -----
@@ -209,75 +277,3 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_low_pad(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_offset(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
|
@llvm/pr-subscribers-mlir Author: Andrzej Warzyński (banach-space) Changes
This is essentially a small clean-up in preparation for upcoming changes. (*) Full diff: https://github.com/llvm/llvm-project/pull/122721.diff 1 Files Affected:
diff --git a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
index 41e480648177f5..08a3bbbb301c87 100644
--- a/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
+++ b/mlir/test/Dialect/Linalg/vectorization-pad-patterns.mlir
@@ -114,6 +114,74 @@ module attributes {transform.with_named_sequence} {
}
}
+// -----
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// Negative test - low pad is non-zero
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_low_pad(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
+
+// -----
+
+// Negative test - TransferWriteOp result is not _directly_ consumed by an
+// ExtractSliceOp (noet the non-zero offset).
+
+func.func private @make_vector() -> vector<7x9xf32>
+
+// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
+// CHECK: tensor.pad
+func.func @pad_and_transfer_write_static_non_zero_offset(
+ %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
+ %c0 = arith.constant 0 : index
+ %c5 = arith.constant 5.0 : f32
+ %0 = tensor.pad %arg0 low[0, 0] high[5, 7] {
+ ^bb0(%arg2: index, %arg3: index):
+ tensor.yield %c5 : f32
+ } : tensor<5x6xf32> to tensor<10x13xf32>
+ %1 = call @make_vector() : () -> vector<7x9xf32>
+ %2 = vector.transfer_write %1, %0[%c0, %c0]
+ : vector<7x9xf32>, tensor<10x13xf32>
+ %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
+ return %3 : tensor<5x6xf32>
+}
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
+
+ transform.apply_patterns to %func_op {
+ transform.apply_patterns.linalg.pad_vectorization
+ } : !transform.op<"func.func">
+ transform.yield
+ }
+}
// -----
@@ -209,75 +277,3 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_low_pad
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_low_pad(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 0] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
-
-// -----
-func.func private @make_vector() -> vector<7x9xf32>
-
-// Variant of @pad_and_transfer_write_static
-
-// CHECK-LABEL: func @pad_and_transfer_write_static_non_zero_offset
-// CHECK-NOT: tensor.pad
-// CHECK: linalg.fill
-func.func @pad_and_transfer_write_static_non_zero_offset(
- %arg0: tensor<5x6xf32>) -> tensor<5x6xf32> {
- %c0 = arith.constant 0 : index
- %c5 = arith.constant 5.0 : f32
- %0 = tensor.pad %arg0 low[0, 1] high[5, 6] {
- ^bb0(%arg2: index, %arg3: index):
- tensor.yield %c5 : f32
- } : tensor<5x6xf32> to tensor<10x13xf32>
- %1 = call @make_vector() : () -> vector<7x9xf32>
- %2 = vector.transfer_write %1, %0[%c0, %c0]
- : vector<7x9xf32>, tensor<10x13xf32>
- %3 = tensor.extract_slice %2[0, 1] [5, 6] [1, 1] : tensor<10x13xf32> to tensor<5x6xf32>
- return %3 : tensor<5x6xf32>
-}
-
-module attributes {transform.with_named_sequence} {
- transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
- %func_op = transform.structured.match ops{["func.func"]} in %arg1 : (!transform.any_op) -> !transform.op<"func.func">
-
- transform.apply_patterns to %func_op {
- // TODO: Split into two tests, one for each pattern
- transform.apply_patterns.linalg.decompose_pad
- transform.apply_patterns.linalg.pad_vectorization
- } : !transform.op<"func.func">
- transform.yield
- }
-}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks! Really appreciate for your contribution of test improvements!!
…tern * Relocates two tests for `PadOpVectorizationWithTransferWritePattern` in "vectorization-pad-patterns.mlir" to group them with other tests for the same pattern. * Adds a note clarifying that these are negative tests and explains the reasoning behind them. * Removes `transform.apply_patterns.linalg.decompose_pad` from the TD sequences as it's no longer needed (*). This is essentially a small clean-up in preparation for upcoming changes. (*) `transform.apply_patterns.linalg.decompose_pad` was split off from `transform.apply_patterns.linalg.pad_vectorization` in llvm#117329. "vectorization-pad-patterns.mlir" is meant to test the latter, not the former.
Thank you - that's very kind feedback 🙏🏻 Makes this worthwhile. Also, I really appreciate your help reviewing these 😅 |
e638f5b
to
1280b52
Compare
PadOpVectorizationWithTransferWritePattern
in "vectorization-pad-patterns.mlir" to group them with other tests
for the same pattern.
reasoning behind them.
transform.apply_patterns.linalg.decompose_pad
from the TDsequences as it's no longer needed (*).
This is essentially a small clean-up in preparation for upcoming changes.
(*)
transform.apply_patterns.linalg.decompose_pad
was split off fromtransform.apply_patterns.linalg.pad_vectorization
in #117329."vectorization-pad-patterns.mlir" is meant to test the latter, not the
former.