Skip to content

Commit 00a1f1a

Browse files
authored
[MLIR] NFC. Move leftover memref op test cases out of test/IR (#115583)
Move memref dialect ops' test cases of test/IR/. It was also surprising to not find test cases of ops like memref.view in test/Dialect/MemRef/. NFC.
1 parent 04b295e commit 00a1f1a

File tree

3 files changed

+198
-199
lines changed

3 files changed

+198
-199
lines changed

mlir/test/Dialect/MemRef/ops.mlir

Lines changed: 198 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,123 @@
11
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
22
// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s
33

4+
// CHECK: #[[$MAP:.*]] = affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>
5+
6+
// CHECK-LABEL: func @alloc() {
7+
func.func @alloc() {
8+
^bb0:
9+
// Test simple alloc.
10+
// CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
11+
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
12+
13+
%c0 = "arith.constant"() {value = 0: index} : () -> index
14+
%c1 = "arith.constant"() {value = 1: index} : () -> index
15+
16+
// Test alloc with dynamic dimensions.
17+
// CHECK: %{{.*}} = memref.alloc(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
18+
%1 = memref.alloc(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
19+
20+
// Test alloc with no dynamic dimensions and one symbol.
21+
// CHECK: %{{.*}} = memref.alloc()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
22+
%2 = memref.alloc()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
23+
24+
// Test alloc with dynamic dimensions and one symbol.
25+
// CHECK: %{{.*}} = memref.alloc(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
26+
%3 = memref.alloc(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
27+
28+
// Alloc with no mappings.
29+
// b/116054838 Parser crash while parsing ill-formed AllocOp
30+
// CHECK: %{{.*}} = memref.alloc() : memref<2xi32>
31+
%4 = memref.alloc() : memref<2 x i32>
32+
33+
// CHECK: return
34+
return
35+
}
36+
37+
// CHECK-LABEL: func @alloca() {
38+
func.func @alloca() {
39+
^bb0:
40+
// Test simple alloc.
41+
// CHECK: %{{.*}} = memref.alloca() : memref<1024x64xf32, 1>
42+
%0 = memref.alloca() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
43+
44+
%c0 = "arith.constant"() {value = 0: index} : () -> index
45+
%c1 = "arith.constant"() {value = 1: index} : () -> index
46+
47+
// Test alloca with dynamic dimensions.
48+
// CHECK: %{{.*}} = memref.alloca(%{{.*}}, %{{.*}}) : memref<?x?xf32, 1>
49+
%1 = memref.alloca(%c0, %c1) : memref<?x?xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
50+
51+
// Test alloca with no dynamic dimensions and one symbol.
52+
// CHECK: %{{.*}} = memref.alloca()[%{{.*}}] : memref<2x4xf32, #[[$MAP]], 1>
53+
%2 = memref.alloca()[%c0] : memref<2x4xf32, affine_map<(d0, d1)[s0] -> ((d0 + s0), d1)>, 1>
54+
55+
// Test alloca with dynamic dimensions and one symbol.
56+
// CHECK: %{{.*}} = memref.alloca(%{{.*}})[%{{.*}}] : memref<2x?xf32, #[[$MAP]], 1>
57+
%3 = memref.alloca(%c1)[%c0] : memref<2x?xf32, affine_map<(d0, d1)[s0] -> (d0 + s0, d1)>, 1>
58+
59+
// Alloca with no mappings, but with alignment.
60+
// CHECK: %{{.*}} = memref.alloca() {alignment = 64 : i64} : memref<2xi32>
61+
%4 = memref.alloca() {alignment = 64} : memref<2 x i32>
62+
63+
return
64+
}
65+
66+
// CHECK-LABEL: func @dealloc() {
67+
func.func @dealloc() {
68+
^bb0:
69+
// CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32>
70+
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
71+
72+
// CHECK: memref.dealloc %{{.*}} : memref<1024x64xf32>
73+
memref.dealloc %0 : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 0>
74+
return
75+
}
76+
77+
// CHECK-LABEL: func @load_store
78+
func.func @load_store() {
79+
^bb0:
80+
// CHECK: %{{.*}} = memref.alloc() : memref<1024x64xf32, 1>
81+
%0 = memref.alloc() : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
82+
83+
%1 = arith.constant 0 : index
84+
%2 = arith.constant 1 : index
85+
86+
// CHECK: %{{.*}} = memref.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
87+
%3 = memref.load %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
88+
89+
// CHECK: memref.store %{{.*}}, %{{.*}}[%{{.*}}, %{{.*}}] : memref<1024x64xf32, 1>
90+
memref.store %3, %0[%1, %2] : memref<1024x64xf32, affine_map<(d0, d1) -> (d0, d1)>, 1>
91+
92+
return
93+
}
94+
95+
// CHECK-LABEL: func @dma_ops()
96+
func.func @dma_ops() {
97+
%c0 = arith.constant 0 : index
98+
%stride = arith.constant 32 : index
99+
%elt_per_stride = arith.constant 16 : index
100+
101+
%A = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 0>
102+
%Ah = memref.alloc() : memref<256 x f32, affine_map<(d0) -> (d0)>, 1>
103+
%tag = memref.alloc() : memref<1 x f32>
104+
105+
%num_elements = arith.constant 256 : index
106+
107+
memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0] : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
108+
memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
109+
// CHECK: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}] : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
110+
// CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
111+
112+
// DMA with strides
113+
memref.dma_start %A[%c0], %Ah[%c0], %num_elements, %tag[%c0], %stride, %elt_per_stride : memref<256 x f32>, memref<256 x f32, 1>, memref<1 x f32>
114+
memref.dma_wait %tag[%c0], %num_elements : memref<1 x f32>
115+
// CHECK-NEXT: dma_start %{{.*}}[%{{.*}}], %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}}[%{{.*}}], %{{.*}}, %{{.*}} : memref<256xf32>, memref<256xf32, 1>, memref<1xf32>
116+
// CHECK-NEXT: dma_wait %{{.*}}[%{{.*}}], %{{.*}} : memref<1xf32>
117+
118+
return
119+
}
120+
4121
// CHECK-LABEL: func @memref_reinterpret_cast
5122
func.func @memref_reinterpret_cast(%in: memref<?xf32>)
6123
-> memref<10x?xf32, strided<[?, 1], offset: ?>> {
@@ -90,6 +207,87 @@ func.func @memref_alloca_scope() {
90207
return
91208
}
92209

210+
// CHECK-LABEL: func @memref_cast(%arg0
211+
func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) {
212+
// CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
213+
%0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
214+
215+
// CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32>
216+
%1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
217+
218+
// CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
219+
%2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
220+
221+
// CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
222+
%3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>
223+
224+
// CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32>
225+
%4 = memref.cast %1 : memref<4xf32> to memref<*xf32>
226+
227+
// CHECK: memref.cast %{{.*}} : memref<*xf32> to memref<4xf32>
228+
%5 = memref.cast %4 : memref<*xf32> to memref<4xf32>
229+
return
230+
}
231+
232+
// Check that unranked memrefs with non-default memory space roundtrip
233+
// properly.
234+
// CHECK-LABEL: @unranked_memref_roundtrip(memref<*xf32, 4>)
235+
func.func private @unranked_memref_roundtrip(memref<*xf32, 4>)
236+
237+
// CHECK-LABEL: func @load_store_prefetch
238+
func.func @load_store_prefetch(memref<4x4xi32>, index) {
239+
^bb0(%0: memref<4x4xi32>, %1: index):
240+
// CHECK: %0 = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
241+
%2 = "memref.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32
242+
243+
// CHECK: %{{.*}} = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
244+
%3 = memref.load %0[%1, %1] : memref<4x4xi32>
245+
246+
// CHECK: memref.prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32>
247+
memref.prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32>
248+
249+
// CHECK: memref.prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32>
250+
memref.prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32>
251+
252+
return
253+
}
254+
255+
// Test with zero-dimensional operands using no index in load/store.
256+
// CHECK-LABEL: func @zero_dim_no_idx
257+
func.func @zero_dim_no_idx(%arg0 : memref<i32>, %arg1 : memref<i32>, %arg2 : memref<i32>) {
258+
%0 = memref.load %arg0[] : memref<i32>
259+
memref.store %0, %arg1[] : memref<i32>
260+
return
261+
// CHECK: %0 = memref.load %{{.*}}[] : memref<i32>
262+
// CHECK: memref.store %{{.*}}, %{{.*}}[] : memref<i32>
263+
}
264+
265+
// CHECK-LABEL: func @memref_view(%arg0
266+
func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
267+
%0 = memref.alloc() : memref<2048xi8>
268+
// Test two dynamic sizes and dynamic offset.
269+
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32>
270+
%1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
271+
272+
// Test one dynamic size and dynamic offset.
273+
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32>
274+
%3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
275+
276+
// Test static sizes and static offset.
277+
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32>
278+
%c0 = arith.constant 0: index
279+
%5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32>
280+
return
281+
}
282+
283+
// CHECK-LABEL: func @assume_alignment
284+
// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16>
285+
func.func @assume_alignment(%0: memref<4x4xf16>) {
286+
// CHECK: memref.assume_alignment %[[MEMREF]], 16 : memref<4x4xf16>
287+
memref.assume_alignment %0, 16 : memref<4x4xf16>
288+
return
289+
}
290+
93291
// CHECK-LABEL: func @expand_collapse_shape_static
94292
func.func @expand_collapse_shape_static(
95293
%arg0: memref<3x4x5xf32>,

mlir/test/IR/core-ops.mlir

Lines changed: 0 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -171,34 +171,6 @@ func.func @affine_apply() {
171171
return
172172
}
173173

174-
// CHECK-LABEL: func @load_store_prefetch
175-
func.func @load_store_prefetch(memref<4x4xi32>, index) {
176-
^bb0(%0: memref<4x4xi32>, %1: index):
177-
// CHECK: %0 = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
178-
%2 = "memref.load"(%0, %1, %1) : (memref<4x4xi32>, index, index)->i32
179-
180-
// CHECK: %{{.*}} = memref.load %arg0[%arg1, %arg1] : memref<4x4xi32>
181-
%3 = memref.load %0[%1, %1] : memref<4x4xi32>
182-
183-
// CHECK: memref.prefetch %arg0[%arg1, %arg1], write, locality<1>, data : memref<4x4xi32>
184-
memref.prefetch %0[%1, %1], write, locality<1>, data : memref<4x4xi32>
185-
186-
// CHECK: memref.prefetch %arg0[%arg1, %arg1], read, locality<3>, instr : memref<4x4xi32>
187-
memref.prefetch %0[%1, %1], read, locality<3>, instr : memref<4x4xi32>
188-
189-
return
190-
}
191-
192-
// Test with zero-dimensional operands using no index in load/store.
193-
// CHECK-LABEL: func @zero_dim_no_idx
194-
func.func @zero_dim_no_idx(%arg0 : memref<i32>, %arg1 : memref<i32>, %arg2 : memref<i32>) {
195-
%0 = memref.load %arg0[] : memref<i32>
196-
memref.store %0, %arg1[] : memref<i32>
197-
return
198-
// CHECK: %0 = memref.load %{{.*}}[] : memref<i32>
199-
// CHECK: memref.store %{{.*}}, %{{.*}}[] : memref<i32>
200-
}
201-
202174
// CHECK-LABEL: func @return_op(%arg0: i32) -> i32 {
203175
func.func @return_op(%a : i32) -> i32 {
204176
// CHECK: return %arg0 : i32
@@ -232,51 +204,6 @@ func.func @calls(%arg0: i32) {
232204
return
233205
}
234206

235-
// CHECK-LABEL: func @memref_cast(%arg0
236-
func.func @memref_cast(%arg0: memref<4xf32>, %arg1 : memref<?xf32>, %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>) {
237-
// CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<?xf32>
238-
%0 = memref.cast %arg0 : memref<4xf32> to memref<?xf32>
239-
240-
// CHECK: memref.cast %{{.*}} : memref<?xf32> to memref<4xf32>
241-
%1 = memref.cast %arg1 : memref<?xf32> to memref<4xf32>
242-
243-
// CHECK: memref.cast %{{.*}} : memref<64x16x4xf32, strided<[64, 4, 1]>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
244-
%2 = memref.cast %arg2 : memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>> to memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>>
245-
246-
// CHECK: memref.cast {{%.*}} : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1]>>
247-
%3 = memref.cast %2 : memref<64x16x4xf32, strided<[?, ?, ?], offset: ?>> to memref<64x16x4xf32, strided<[64, 4, 1], offset: 0>>
248-
249-
// CHECK: memref.cast %{{.*}} : memref<4xf32> to memref<*xf32>
250-
%4 = memref.cast %1 : memref<4xf32> to memref<*xf32>
251-
252-
// CHECK: memref.cast %{{.*}} : memref<*xf32> to memref<4xf32>
253-
%5 = memref.cast %4 : memref<*xf32> to memref<4xf32>
254-
return
255-
}
256-
257-
// Check that unranked memrefs with non-default memory space roundtrip
258-
// properly.
259-
// CHECK-LABEL: @unranked_memref_roundtrip(memref<*xf32, 4>)
260-
func.func private @unranked_memref_roundtrip(memref<*xf32, 4>)
261-
262-
// CHECK-LABEL: func @memref_view(%arg0
263-
func.func @memref_view(%arg0 : index, %arg1 : index, %arg2 : index) {
264-
%0 = memref.alloc() : memref<2048xi8>
265-
// Test two dynamic sizes and dynamic offset.
266-
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<?x?xf32>
267-
%1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
268-
269-
// Test one dynamic size and dynamic offset.
270-
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<4x?xf32>
271-
%3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
272-
273-
// Test static sizes and static offset.
274-
// CHECK: memref.view {{.*}} : memref<2048xi8> to memref<64x4xf32>
275-
%c0 = arith.constant 0: index
276-
%5 = memref.view %0[%c0][] : memref<2048xi8> to memref<64x4xf32>
277-
return
278-
}
279-
280207
// CHECK-LABEL: func @test_dimop
281208
// CHECK-SAME: %[[ARG:.*]]: tensor<4x4x?xf32>
282209
func.func @test_dimop(%arg0: tensor<4x4x?xf32>) {
@@ -288,11 +215,3 @@ func.func @test_dimop(%arg0: tensor<4x4x?xf32>) {
288215
%1 = affine.apply affine_map<(d0) -> (d0)>(%0)
289216
return
290217
}
291-
292-
// CHECK-LABEL: func @assume_alignment
293-
// CHECK-SAME: %[[MEMREF:.*]]: memref<4x4xf16>
294-
func.func @assume_alignment(%0: memref<4x4xf16>) {
295-
// CHECK: memref.assume_alignment %[[MEMREF]], 16 : memref<4x4xf16>
296-
memref.assume_alignment %0, 16 : memref<4x4xf16>
297-
return
298-
}

0 commit comments

Comments
 (0)