|
| 1 | +// RUN: mlir-opt -test-tiling-interface=tile-using-scf-forall -split-input-file %s | FileCheck %s |
| 2 | + |
| 3 | +func.func @simple_matmul(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>, |
| 4 | + %arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> { |
| 5 | + %0 = linalg.matmul {__internal_transform__ = "simple_gemm"} |
| 6 | + ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) |
| 7 | + outs(%arg2 : tensor<?x?xf32>) -> tensor<?x?xf32> |
| 8 | + return %0 : tensor<?x?xf32> |
| 9 | +} |
| 10 | +// CHECK-DAG: #[[MAP0:.+]] = affine_map<(d0)[s0] -> (10, -d0 + s0)> |
| 11 | +// CHECK-DAG: #[[MAP1:.+]] = affine_map<(d0)[s0] -> (20, -d0 + s0)> |
| 12 | +// CHECK: func.func @simple_matmul( |
| 13 | +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<?x?xf32> |
| 14 | +// CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: tensor<?x?xf32> |
| 15 | +// CHECK-SAME: %[[ARG2:[a-zA-Z0-9]+]]: tensor<?x?xf32> |
| 16 | +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index |
| 17 | +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index |
| 18 | +// CHECK-DAG: %[[M:.+]] = tensor.dim %[[ARG0]], %[[C0]] |
| 19 | +// CHECK-DAG: %[[K:.+]] = tensor.dim %[[ARG0]], %[[C1]] |
| 20 | +// CHECK-DAG: %[[N:.+]] = tensor.dim %[[ARG1]], %[[C1]] |
| 21 | +// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = |
| 22 | +// CHECK-SAME: (0, 0) to (%[[M]], %[[N]]) step (10, 20) shared_outs(%[[INIT:.+]] = %[[ARG2]]) |
| 23 | +// CHECK: %[[TS_Y:.+]] = affine.min #[[MAP0]](%[[IV0]])[%[[M]]] |
| 24 | +// CHECK: %[[TS_X:.+]] = affine.min #[[MAP1]](%[[IV1]])[%[[N]]] |
| 25 | +// CHECK: %[[LHS_TILE:.+]] = tensor.extract_slice %[[ARG0]] |
| 26 | +// CHECK-SAME: [%[[IV0]], 0] [%[[TS_Y]], %[[K]]] [1, 1] |
| 27 | +// CHECK: %[[RHS_TILE:.+]] = tensor.extract_slice %[[ARG1]] |
| 28 | +// CHECK-SAME: [0, %[[IV1]]] [%[[K]], %[[TS_X]]] [1, 1] |
| 29 | +// CHECK: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT]] |
| 30 | +// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1] |
| 31 | +// CHECK: %[[GEMM_TILE:.+]] = linalg.matmul |
| 32 | +// CHECK-SAME: ins(%[[LHS_TILE]], %[[RHS_TILE]] : |
| 33 | +// CHECK-SAME: outs(%[[INIT_TILE]] : |
| 34 | +// CHECK: scf.forall.in_parallel { |
| 35 | +// CHECK: tensor.parallel_insert_slice %[[GEMM_TILE]] into %[[INIT]] |
| 36 | +// CHECK-SAME: [%[[IV0]], %[[IV1]]] [%[[TS_Y]], %[[TS_X]]] [1, 1] |
| 37 | +// CHECK: mapping = [#gpu.block<y>, #gpu.block<x>] |
| 38 | +// CHECK: return %[[RESULT]] |
| 39 | + |
| 40 | +// ----- |
| 41 | + |
| 42 | +#map0 = affine_map<(d0, d1, d2) -> (d0, d1, d2)> |
| 43 | +#map1 = affine_map<(d0, d1, d2) -> (d0, d2, d1)> |
| 44 | +#map2 = affine_map<(d0, d1, d2) -> (d2, d0, d1)> |
| 45 | +func.func @multi_result(%arg0 : tensor<128x200x300xf32>) -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) { |
| 46 | + %init0 = tensor.empty() : tensor<128x300x200xf32> |
| 47 | + %init1 = tensor.empty() : tensor<300x128x200xf32> |
| 48 | + %0:2 = linalg.generic { |
| 49 | + indexing_maps = [#map0, #map1, #map2], |
| 50 | + iterator_types = ["parallel", "parallel", "parallel"]} |
| 51 | + {__internal_transform__ = "parallel_generic_transpose"} |
| 52 | + ins(%arg0 : tensor<128x200x300xf32>) |
| 53 | + outs(%init0, %init1 : tensor<128x300x200xf32>, tensor<300x128x200xf32>) { |
| 54 | + ^bb0(%b0 : f32, %b1 : f32, %b2 : f32): |
| 55 | + linalg.yield %b0, %b0 : f32, f32 |
| 56 | + } -> (tensor<128x300x200xf32>, tensor<300x128x200xf32>) |
| 57 | + return %0#0, %0#1 : tensor<128x300x200xf32>, tensor<300x128x200xf32> |
| 58 | +} |
| 59 | +// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0) -> (10, -d0 + 128)> |
| 60 | +// CHECK-LABEL: func.func @multi_result( |
| 61 | +// CHECK-SAME: %[[ARG0:[a-zA-Z0-9]+]]: tensor<128x200x300xf32>) |
| 62 | +// CHECK-DAG: %[[INIT0:.+]] = tensor.empty() |
| 63 | +// CHECK-DAG: %[[INIT1:.+]] = tensor.empty() |
| 64 | +// CHECK: %[[OUTER:[a-zA-Z0-9]+]]:2 = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]]) = (0, 0) to (128, 300) step (10, 20) |
| 65 | +// CHECK-SAME: shared_outs(%[[ARG1:[a-zA-Z0-9]+]] = %[[INIT0]], %[[ARG2:[a-zA-Z0-9]+]] = %[[INIT1]]) |
| 66 | +// CHECK: %[[TS_Y:.+]] = affine.min #[[$MAP0]](%[[IV0]]) |
| 67 | +// CHECK: %[[ARG_TILE:.+]] = tensor.extract_slice %[[ARG0]] |
| 68 | +// CHECK-SAME: [%[[IV0]], 0, %[[IV1]]] [%[[TS_Y]], 200, 20] [1, 1, 1] |
| 69 | +// CHECK-DAG: %[[INIT0_TILE:.+]] = tensor.extract_slice %[[ARG1]] |
| 70 | +// CHECK-SAME: [%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1] |
| 71 | +// CHECK-DAG: %[[INIT1_TILE:.+]] = tensor.extract_slice %[[ARG2]] |
| 72 | +// CHECK-SAME: [%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1] |
| 73 | +// CHECK: %[[RESULT_TILE:.+]]:2 = linalg.generic |
| 74 | +// CHECK-SAME: ins(%[[ARG_TILE]] : |
| 75 | +// CHECK-SAME: outs(%[[INIT0_TILE]], %[[INIT1_TILE]] : |
| 76 | +// CHECK: scf.forall.in_parallel { |
| 77 | +// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#0 into %[[ARG1]][%[[IV0]], %[[IV1]], 0] [%[[TS_Y]], 20, 200] [1, 1, 1] |
| 78 | +// CHECK-DAG: tensor.parallel_insert_slice %[[RESULT_TILE]]#1 into %[[ARG2]][%[[IV1]], %[[IV0]], 0] [20, %[[TS_Y]], 200] [1, 1, 1] |
| 79 | +// CHECK: } |
| 80 | +// CHECK: return %[[OUTER]]#0, %[[OUTER]]#1 |
| 81 | + |
| 82 | +// ----- |
| 83 | + |
| 84 | +func.func @conv2D(%arg0 : tensor<?x?x?x?xf32>, %arg1 : tensor<?x?x?x?xf32>, |
| 85 | + %arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> { |
| 86 | + %0 = linalg.conv_2d_nhwc_hwcf { |
| 87 | + strides = dense<[2, 3]> : tensor<2xi64>, |
| 88 | + dilation = dense<[4, 5]> : tensor<2xi64>, |
| 89 | + __internal_transform__ = "simple_conv"} |
| 90 | + ins(%arg0, %arg1 : tensor<?x?x?x?xf32>, tensor<?x?x?x?xf32>) |
| 91 | + outs(%arg2 : tensor<?x?x?x?xf32>) -> tensor<?x?x?x?xf32> |
| 92 | + return %0 : tensor<?x?x?x?xf32> |
| 93 | +} |
| 94 | +// CHECK-DAG: #[[$MAP0:.+]] = affine_map<(d0)[s0] -> (10, -d0 + s0)> |
| 95 | +// CHECK-DAG: #[[$MAP1:.+]] = affine_map<(d0)[s0] -> (20, -d0 + s0)> |
| 96 | +// CHECK-DAG: #[[$MAP2:.+]] = affine_map<(d0)[s0] -> (30, -d0 + s0)> |
| 97 | +// CHECK-DAG: #[[$MAP3:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 2 - 2)> |
| 98 | +// CHECK-DAG: #[[$MAP4:.+]] = affine_map<(d0)[s0] -> (d0 + s0 * 3 - 3)> |
| 99 | +// CHECK-LABEL: func.func @conv2D( |
| 100 | +// CHECK-SAME: %[[INPUT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> |
| 101 | +// CHECK-SAME: %[[FILTER:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> |
| 102 | +// CHECK-SAME: %[[INIT:[a-zA-Z0-9]+]]: tensor<?x?x?x?xf32> |
| 103 | +// CHECK-DAG: %[[C0:.+]] = arith.constant 0 : index |
| 104 | +// CHECK-DAG: %[[C1:.+]] = arith.constant 1 : index |
| 105 | +// CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index |
| 106 | +// CHECK-DAG: %[[C3:.+]] = arith.constant 3 : index |
| 107 | +// CHECK-DAG: %[[N:.+]] = tensor.dim %[[INPUT]], %[[C0]] |
| 108 | +// CHECK-DAG: %[[C:.+]] = tensor.dim %[[INPUT]], %[[C3]] |
| 109 | +// CHECK-DAG: %[[P:.+]] = tensor.dim %[[FILTER]], %[[C0]] |
| 110 | +// CHECK-DAG: %[[Q:.+]] = tensor.dim %[[FILTER]], %[[C1]] |
| 111 | +// CHECK-DAG: %[[F:.+]] = tensor.dim %[[FILTER]], %[[C3]] |
| 112 | +// CHECK-DAG: %[[R:.+]] = tensor.dim %[[INIT]], %[[C1]] |
| 113 | +// CHECK-DAG: %[[S:.+]] = tensor.dim %[[INIT]], %[[C2]] |
| 114 | +// CHECK: %[[RESULT:.+]] = scf.forall (%[[IV0:[a-zA-Z0-9]+]], %[[IV1:[a-zA-Z0-9]+]], %[[IV2:[a-zA-Z0-9]+]]) = |
| 115 | +// CHECK-SAME: (0, 0, 0) to (%[[P]], %[[Q]], %[[C]]) step (10, 20, 30) shared_outs(%[[INIT0:.+]] = %[[INIT]]) |
| 116 | +// CHECK-DAG: %[[TS_P:.+]] = affine.min #[[$MAP0]](%[[IV0]])[%[[P]]] |
| 117 | +// CHECK-DAG: %[[TS_Q:.+]] = affine.min #[[$MAP1]](%[[IV1]])[%[[Q]]] |
| 118 | +// CHECK-DAG: %[[TS_C:.+]] = affine.min #[[$MAP2]](%[[IV2]])[%[[C]]] |
| 119 | +// CHECK-DAG: %[[TS_H:.+]] = affine.apply #[[$MAP3]](%[[TS_P]])[%[[R]]] |
| 120 | +// CHECK-DAG: %[[TS_W:.+]] = affine.apply #[[$MAP4]](%[[TS_Q]])[%[[S]]] |
| 121 | +// CHECK-DAG: %[[INPUT_TILE:.+]] = tensor.extract_slice %[[INPUT]] |
| 122 | +// CHECK-SAME: [0, %[[IV0]], %[[IV1]], %[[IV2]]] [%[[N]], %[[TS_H]], %[[TS_W]], %[[TS_C]]] |
| 123 | +// CHECK-DAG: %[[FILTER_TILE:.+]] = tensor.extract_slice %[[FILTER]] |
| 124 | +// CHECK-SAME: [%[[IV0]], %[[IV1]], %[[IV2]], 0] [%[[TS_P]], %[[TS_Q]], %[[TS_C]], %[[F]]] |
| 125 | +// CHECK-DAG: %[[INIT_TILE:.+]] = tensor.extract_slice %[[INIT0]] |
| 126 | +// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] |
| 127 | +// CHECK: %[[CONV_TILE:.+]] = linalg.conv_2d_nhwc_hwcf |
| 128 | +// CHECK-SAME: dilation = dense<[4, 5]> : tensor<2xi64>, strides = dense<[2, 3]> : tensor<2xi64> |
| 129 | +// CHECK-SAME: ins(%[[INPUT_TILE]], %[[FILTER_TILE]] : |
| 130 | +// CHECK-SAME: outs(%[[INIT_TILE]] : |
| 131 | +// CHECK: scf.forall.in_parallel |
| 132 | +// CHECK: tensor.parallel_insert_slice %[[CONV_TILE]] into %[[INIT0]] |
| 133 | +// CHECK-SAME: [0, 0, 0, 0] [%[[N]], %[[R]], %[[S]], %[[F]]] [1, 1, 1, 1] |
| 134 | +// CHECK: return %[[RESULT]] |
| 135 | + |
| 136 | +// ----- |
| 137 | + |
| 138 | +// CHECK: #[[$MAP_ADD:.+]] = affine_map<(d0, d1) -> (d0 + d1)> |
| 139 | + |
| 140 | +func.func @indexed_semantics(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>) -> tensor<?x?xf32> { |
| 141 | + // Check that we correctly amend "linalg.index" results. |
| 142 | + |
| 143 | + %0 = linalg.generic { |
| 144 | + indexing_maps = [affine_map<(d0, d1) -> (d0, d1)>, |
| 145 | + affine_map<(d0, d1) -> (d0, d1)>], |
| 146 | + iterator_types = ["parallel", "parallel"]} |
| 147 | + {__internal_transform__ = "indexed_semantics"} |
| 148 | + ins(%arg0: tensor<?x?xf32>) |
| 149 | + outs(%arg1: tensor<?x?xf32>) { |
| 150 | + ^bb0(%arg2: f32, %arg3: f32): |
| 151 | + %1 = linalg.index 0 : index |
| 152 | + %2 = linalg.index 1 : index |
| 153 | + %3 = arith.addi %1, %2 : index |
| 154 | + %4 = arith.index_cast %3 : index to i64 |
| 155 | + %5 = arith.uitofp %4 : i64 to f32 |
| 156 | + %6 = arith.addf %5, %arg2 : f32 |
| 157 | + linalg.yield %6 : f32 |
| 158 | + } -> (tensor<?x?xf32>) |
| 159 | + return %0 : tensor<?x?xf32> |
| 160 | +} |
| 161 | +// CHECK-LABEL: @indexed_semantics |
| 162 | +// CHECK: scf.forall (%[[I0:.+]], %[[I1:.+]]) = |
| 163 | +// CHECK: %[[INDEX0:.+]] = linalg.index 0 |
| 164 | +// CHECK: %[[INDEX0_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX0]], %[[I0]]) |
| 165 | +// CHECK: %[[INDEX1:.+]] = linalg.index 1 |
| 166 | +// CHECK: %[[INDEX1_AMENDED:.+]] = affine.apply #[[$MAP_ADD]](%[[INDEX1]], %[[I1]]) |
| 167 | +// CHECK: arith.addi %[[INDEX0_AMENDED]], %[[INDEX1_AMENDED]] |
0 commit comments