|
1 | 1 | // RUN: mlir-opt %s | mlir-opt | FileCheck %s
|
2 | 2 | // RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
|
3 | 3 |
|
| 4 | +// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)> |
| 5 | + |
| 6 | +// CHECK-LABEL: func @alloc() { |
| 7 | +func.func @alloc() { |
| 8 | +^bb0: |
| 9 | + // Test simple alloc. |
| 10 | + // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1> |
| 11 | + %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 12 | + |
| 13 | + %c0 = "arith.constant"() {value = 0: index} : () -> index |
| 14 | + %c1 = "arith.constant"() {value = 1: index} : () -> index |
| 15 | + |
| 16 | + // Test alloc with dynamic dimensions. |
| 17 | + // CHECK: %{{.*}} = memref.alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1> |
| 18 | + %1 = memref.alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 19 | + |
| 20 | + // Test alloc with no dynamic dimensions and one symbol. |
| 21 | + // CHECK: %{{.*}} = memref.alloc()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1> |
| 22 | + %2 = memref.alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> |
| 23 | + |
| 24 | + // Test alloc with dynamic dimensions and one symbol. |
| 25 | + // CHECK: %{{.*}} = memref.alloc(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1> |
| 26 | + %3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1> |
| 27 | + |
| 28 | + // Alloc with no mappings. |
| 29 | + // b/116054838 Parser crash while parsing ill-formed AllocOp |
| 30 | + // CHECK: %{{.*}} = memref.alloc() : memref<2xi32> |
| 31 | + %4 = memref.alloc() : memref<2 x i32> |
| 32 | + |
| 33 | + // CHECK: return |
| 34 | + return |
| 35 | +} |
| 36 | + |
| 37 | +// CHECK-LABEL: func @alloca() { |
| 38 | +func.func @alloca() { |
| 39 | +^bb0: |
| 40 | + // Test simple alloc. |
| 41 | + // CHECK: %{{.*}} = memref.alloca() : memref<1024x64xf32, 1> |
| 42 | + %0 = memref.alloca() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 43 | + |
| 44 | + %c0 = "arith.constant"() {value = 0: index} : () -> index |
| 45 | + %c1 = "arith.constant"() {value = 1: index} : () -> index |
| 46 | + |
| 47 | + // Test alloca with dynamic dimensions. |
| 48 | + // CHECK: %{{.*}} = memref.alloca(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1> |
| 49 | + %1 = memref.alloca(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 50 | + |
| 51 | + // Test alloca with no dynamic dimensions and one symbol. |
| 52 | + // CHECK: %{{.*}} = memref.alloca()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1> |
| 53 | + %2 = memref.alloca()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1> |
| 54 | + |
| 55 | + // Test alloca with dynamic dimensions and one symbol. |
| 56 | + // CHECK: %{{.*}} = memref.alloca(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1> |
| 57 | + %3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1> |
| 58 | + |
| 59 | + // Alloca with no mappings, but with alignment. |
| 60 | + // CHECK: %{{.*}} = memref.alloca() {alignment = 64 : i64} : memref<2xi32> |
| 61 | + %4 = memref.alloca() {alignment = 64} : memref<2 x i32> |
| 62 | + |
| 63 | + return |
| 64 | +} |
| 65 | + |
| 66 | +// CHECK-LABEL: func @dealloc() { |
| 67 | +func.func @dealloc() { |
| 68 | +^bb0: |
| 69 | + // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32> |
| 70 | + %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0> |
| 71 | + |
| 72 | + // CHECK: memref.dealloc %{{.*}} : memref<1024x64xf32> |
| 73 | + memref.dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0> |
| 74 | + return |
| 75 | +} |
| 76 | + |
| 77 | +// CHECK-LABEL: func @load_store |
| 78 | +func.func @load_store() { |
| 79 | +^bb0: |
| 80 | + // CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1> |
| 81 | + %0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 82 | + |
| 83 | + %1 = arith.constant 0 : index |
| 84 | + %2 = arith.constant 1 : index |
| 85 | + |
| 86 | + // CHECK: %{{.*}} = memref.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1> |
| 87 | + %3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 88 | + |
| 89 | + // CHECK: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1> |
| 90 | + memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1> |
| 91 | + |
| 92 | + return |
| 93 | +} |
| 94 | + |
| 95 | +// CHECK-LABEL: func @dma_ops() |
| 96 | +func.func @dma_ops() { |
| 97 | + %c0 = arith.constant 0 : index |
| 98 | + %stride = arith.constant 32 : index |
| 99 | + %elt_per_stride = arith.constant 16 : index |
| 100 | + |
| 101 | + %A = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0> |
| 102 | + %Ah = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 1> |
| 103 | + %tag = memref.alloc() : memref<1 x f32> |
| 104 | + |
| 105 | + %num_elements = arith.constant 256 : index |
| 106 | + |
| 107 | + memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0] : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32> |
| 108 | + memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32> |
| 109 | + // CHECK: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32> |
| 110 | + // CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32> |
| 111 | + |
| 112 | + // DMA with strides |
| 113 | + memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0], %stride, %elt_per_stride : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32> |
| 114 | + memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32> |
| 115 | + // CHECK-NEXT: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}} : memref<256xf32>, memref<256xf32, 1>, memref<1xf32> |
| 116 | + // CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32> |
| 117 | + |
| 118 | + return |
| 119 | +} |
| 120 | + |
4 | 121 | // CHECK-LABEL: func @memref_reinterpret_cast
|
5 | 122 | func.func @memref_reinterpret_cast(%in: memref<?xf32>)
|
6 | 123 | -> memref<10x?xf32, strided<[?, 1], offset: ?>> {
|
@@ -90,6 +207,87 @@ func.func @memref_alloca_scope() {
|
90 | 207 | return
|
91 | 208 | }
|
92 | 209 |
|
| 210 | +// CHECK-LABEL: func @memref_cast(%arg0 |
| 211 | +func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) { |
| 212 | + // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32> |
| 213 | + %0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32> |
| 214 | + |
| 215 | + // CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32> |
| 216 | + %1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32> |
| 217 | + |
| 218 | + // CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> |
| 219 | + %2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> |
| 220 | + |
| 221 | + // CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>> |
| 222 | + %3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> |
| 223 | + |
| 224 | + // CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32> |
| 225 | + %4 = memref.cast %1 : memref<4xf32> to memref<*xf32> |
| 226 | + |
| 227 | + // CHECK: memref.cast %{{.*}} : memref<*xf32> to memref<4xf32> |
| 228 | + %5 = memref.cast %4 : memref<*xf32> to memref<4xf32> |
| 229 | + return |
| 230 | +} |
| 231 | + |
| 232 | +// Check that unranked memrefs with non-default memory space roundtrip |
| 233 | +// properly. |
| 234 | +// CHECK-LABEL: @unranked_memref_roundtrip(memref<*xf32, 4>) |
| 235 | +func.func private @unranked_memref_roundtrip(memref<*xf32, 4>) |
| 236 | + |
| 237 | +// CHECK-LABEL: func @load_store_prefetch |
| 238 | +func.func @load_store_prefetch(memref<4x4xi32>, index) { |
| 239 | +^bb0(%0: memref<4x4xi32>, %1: index): |
| 240 | + // CHECK: %0 = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32> |
| 241 | + %2 = "memref.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32 |
| 242 | + |
| 243 | + // CHECK: %{{.*}} = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32> |
| 244 | + %3 = memref.load %0[%1, %1] : memref<4x4xi32> |
| 245 | + |
| 246 | + // CHECK: memref.prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32> |
| 247 | + memref.prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32> |
| 248 | + |
| 249 | + // CHECK: memref.prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32> |
| 250 | + memref.prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32> |
| 251 | + |
| 252 | + return |
| 253 | +} |
| 254 | + |
| 255 | +// Test with zero-dimensional operands using no index in load/store. |
| 256 | +// CHECK-LABEL: func @zero_dim_no_idx |
| 257 | +func.func @zero_dim_no_idx(%arg0 : memref<i32>, %arg1 : memref<i32>, %arg2 : memref<i32>) { |
| 258 | + %0 = memref.load %arg0[] : memref<i32> |
| 259 | + memref.store %0, %arg1[] : memref<i32> |
| 260 | + return |
| 261 | + // CHECK: %0 = memref.load %{{.*}}[] : memref<i32> |
| 262 | + // CHECK: memref.store %{{.*}}, %{{.*}}[] : memref<i32> |
| 263 | +} |
| 264 | + |
| 265 | +// CHECK-LABEL: func @memref_view(%arg0 |
| 266 | +func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) { |
| 267 | + %0 = memref.alloc() : memref<2048xi8> |
| 268 | + // Test two dynamic sizes and dynamic offset. |
| 269 | + // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32> |
| 270 | + %1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32> |
| 271 | + |
| 272 | + // Test one dynamic size and dynamic offset. |
| 273 | + // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32> |
| 274 | + %3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32> |
| 275 | + |
| 276 | + // Test static sizes and static offset. |
| 277 | + // CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32> |
| 278 | + %c0 = arith.constant 0: index |
| 279 | + %5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32> |
| 280 | + return |
| 281 | +} |
| 282 | + |
| 283 | +// CHECK-LABEL: func @assume_alignment |
| 284 | +// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16> |
| 285 | +func.func @assume_alignment(%0: memref<4x4xf16>) { |
| 286 | + // CHECK: memref.assume_alignment %[[MEMREF]], 16 : memref<4x4xf16> |
| 287 | + memref.assume_alignment %0, 16 : memref<4x4xf16> |
| 288 | + return |
| 289 | +} |
| 290 | + |
93 | 291 | // CHECK-LABEL: func @expand_collapse_shape_static
|
94 | 292 | func.func @expand_collapse_shape_static(
|
95 | 293 | %arg0: memref<3x4x5xf32>,
|
|
0 commit comments