Skip to content

Commit a5985ca

Browse files
klensyklensy
and
klensy
authored
[mlir][test] Fix filecheck annotation typos [2/n] (#93476)
Few more fixes previous: #92897 pr Issues from #93154 unfixed. --------- Co-authored-by: klensy <[email protected]>
1 parent 9afb09e commit a5985ca

24 files changed

+89
-81
lines changed

mlir/test/Analysis/test-liveness.mlir

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ func.func @func_simpleBranch(%arg0: i32, %arg1 : i32) -> i32 {
4040
// CHECK-SAME: arg0@0 arg1@0 val_2
4141
// CHECK: return
4242
// CHECK-SAME: val_2
43-
// CHECK-NEXT EndCurrentlyLive
43+
// CHECK-NEXT:EndCurrentlyLive
4444
%result = arith.addi %arg0, %arg1 : i32
4545
return %result : i32
4646
}
@@ -197,9 +197,9 @@ func.func @func_ranges(%cond : i1, %arg1 : i32, %arg2 : i32, %arg3 : i32) -> i32
197197
// CHECK-NEXT: %2 = arith.addi
198198
// CHECK-NEXT: %3 = arith.muli
199199
// CHECK-NEXT: val_7
200-
// CHECK-NEXT %2 = arith.addi
201-
// CHECK-NEXT %3 = arith.muli
202-
// CHECK-NEXT %4 = arith.muli
200+
// CHECK-NEXT: %2 = arith.addi
201+
// CHECK-NEXT: %3 = arith.muli
202+
// CHECK-NEXT: %4 = arith.muli
203203
// CHECK: val_8
204204
// CHECK-NEXT: %3 = arith.muli
205205
// CHECK-NEXT: %4 = arith.muli

mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -638,7 +638,7 @@ gpu.module @test_module_30 {
638638
}
639639
// CHECK-LABEL: @subgroup_reduce_xor
640640
gpu.func @subgroup_reduce_xor(%arg0 : i32) {
641-
// CHECK nvvm.redux.sync xor {{.*}}
641+
// CHECK: nvvm.redux.sync xor {{.*}}
642642
%result = gpu.subgroup_reduce xor %arg0 uniform {} : (i32) -> (i32)
643643
gpu.return
644644
}

mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -378,14 +378,14 @@ func.func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
378378
// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr
379379
// CHECK-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>, !llvm.ptr
380380
// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i64
381-
// CHECK : llvm.mlir.undef : !llvm.struct<(i64, ptr)>
381+
// CHECK: llvm.mlir.undef : !llvm.struct<(i64, ptr)>
382382
// CHECK-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr)>
383383
// CHECK-DAG: llvm.insertvalue %[[p]], %{{.*}}[1] : !llvm.struct<(i64, ptr)>
384384
// CHECK32-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
385385
// CHECK32-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> : (i64) -> !llvm.ptr
386386
// CHECK32-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)>, !llvm.ptr
387387
// CHECK32-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i32
388-
// CHECK32 : llvm.mlir.undef : !llvm.struct<(i32, ptr)>
388+
// CHECK32: llvm.mlir.undef : !llvm.struct<(i32, ptr)>
389389
// CHECK32-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i32, ptr)>
390390
// CHECK32-DAG: llvm.insertvalue %[[p]], %{{.*}}[1] : !llvm.struct<(i32, ptr)>
391391
%0 = memref.cast %arg : memref<42x2x?xf32> to memref<*xf32>

mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ func.func @m16n8k16_fp16(%arg0: vector<4x2xf16>, %arg1: vector<2x2xf16>, %arg2:
1111
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
1212
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
1313
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
14-
// CHECK-NOT llvm.extractvalue
14+
// CHECK-NOT: llvm.extractvalue
1515
// CHECK: [[d:%.+]] = nvvm.mma.sync
1616
// CHECK-SAME: shape = #nvvm.shape<m = 16, n = 8, k = 16>
1717
%d = nvgpu.mma.sync (%arg0, %arg1, %arg2) {mmaShape = [16, 8, 16]} : (vector<4x2xf16>, vector<2x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
@@ -56,7 +56,7 @@ func.func @m16n8k8_fp16(%arg0: vector<2x2xf16>, %arg1: vector<1x2xf16>, %arg2: v
5656
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<1 x vector<2xf16>>
5757
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
5858
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
59-
// CHECK-NOT llvm.extractvalue
59+
// CHECK-NOT: llvm.extractvalue
6060
// CHECK: [[d:%.+]] = nvvm.mma.sync
6161
// CHECK-SAME: shape = #nvvm.shape<m = 16, n = 8, k = 8>
6262
%d = nvgpu.mma.sync (%arg0, %arg1, %arg2) {mmaShape = [16, 8, 8]} : (vector<2x2xf16>, vector<1x2xf16>, vector<2x2xf16>) -> vector<2x2xf16>
@@ -360,7 +360,7 @@ func.func @mma_sp_sync_f16_16832(%arg0: vector<4x2xf16>,
360360
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
361361
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
362362

363-
// CHECK-NOT llvm.extractvalue
363+
// CHECK-NOT: llvm.extractvalue
364364

365365
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
366366

@@ -396,7 +396,7 @@ func.func @mma_sp_sync_f16_16816(%arg0: vector<2x2xf16>,
396396
// CHECK: llvm.extractvalue %{{.*}}[0] : !llvm.array<2 x vector<2xf16>>
397397
// CHECK: llvm.extractvalue %{{.*}}[1] : !llvm.array<2 x vector<2xf16>>
398398

399-
// CHECK-NOT llvm.extractvalue
399+
// CHECK-NOT: llvm.extractvalue
400400

401401
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
402402

@@ -455,7 +455,7 @@ func.func @mma_sp_sync_i8_16864(%arg0: vector<4x4xi8>,
455455
// CHECK: llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<2 x vector<2xi32>>
456456
// CHECK: llvm.extractvalue %{{.*}}[{{.*}}] : !llvm.array<2 x vector<2xi32>>
457457

458-
// CHECK-NOT llvm.extractvalue
458+
// CHECK-NOT: llvm.extractvalue
459459

460460
// CHECK: %[[sparseMetadata:.+]] = llvm.bitcast %{{.+}} : vector<2xi16> to i32
461461

mlir/test/Dialect/AMX/roundtrip.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
// CHECK-LABEL: tzero
44
// CHECK: amx.tile_zero : vector<16x16xbf16>
5-
// CHECK amx.tile_store %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} : memref<?x?xbf16>, vector<16x16xbf16>
5+
// CHECK: amx.tile_store %{{.*}}[%{{.*}}, %{{.*}}], %{{.*}} : memref<?x?xbf16>, vector<16x16xbf16>
66
func.func @tzero(%arg0: memref<?x?xbf16>) {
77
%0 = arith.constant 0 : index
88
%1 = amx.tile_zero : vector<16x16xbf16>

mlir/test/Dialect/Affine/loop-fusion-3.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -889,7 +889,7 @@ func.func @reduce_add_non_innermost(%arg0: memref<64x64xf32, 1>, %arg1: memref<1
889889
// CHECK: affine.for
890890
// CHECK-NEXT: affine.for
891891
// CHECK-NEXT: affine.for
892-
// CHECK affine.for
892+
// CHECK: affine.for
893893

894894

895895

mlir/test/Dialect/Affine/unroll.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -616,7 +616,7 @@ func.func @loop_nest_non_trivial_multiple_upper_bound_alt(%M : index, %N : index
616616
// UNROLL-BY-4-NEXT: "foo"
617617
// UNROLL-BY-4-NEXT: "foo"
618618
// UNROLL-BY-4-NEXT: "foo"
619-
// UNROLL-BY-4-NOT for
619+
// UNROLL-BY-4-NOT: for
620620
// UNROLL-BY-4: return
621621
return
622622
}

mlir/test/Dialect/ArmSME/tile-allocation-liveness.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ func.func @avoidable_spill(%a: vector<[4]xf32>, %b: vector<[4]xf32>, %c: vector<
406406
// CHECK: arm_sme.get_tile {tile_id = 2 : i32} : vector<[4]x[4]xf32>
407407
// CHECK: arm_sme.get_tile {tile_id = 3 : i32} : vector<[4]x[4]xf32>
408408
// CHECK: arm_sme.move_vector_to_tile_slice {{.*}} {tile_id = 0 : i32} : vector<[4]xf32> into vector<[4]x[4]xf32>
409-
// CHECK-NOT tile_id = 16
409+
// CHECK-NOT: tile_id = 16
410410
func.func @cond_branch_with_backedge(%slice: vector<[4]xf32>) {
411411
%tileA = arm_sme.get_tile : vector<[4]x[4]xf32>
412412
%tileB = arm_sme.get_tile : vector<[4]x[4]xf32>

mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -881,7 +881,7 @@ func.func @input_stays_same(%arg0 : memref<?x1x?xf32, strided<[?, 1, 1]>>, %arg1
881881
// CHECK: func @input_stays_same(
882882
// CHECK-SAME: %[[ARG0:.*]]: memref<?x1x?xf32, strided<[?, 1, 1]>>,
883883
// CHECK-SAME: %[[ARG1:.*]]: f32, %[[ARG2:.*]]: memref<?x1x?x1x?xf32>)
884-
// CHECK-SAME -> memref<?x1x?x1x?xf32> {
884+
// CHECK-SAME: -> memref<?x1x?x1x?xf32> {
885885
// CHECK: %[[OUT:.*]] = memref.collapse_shape %[[ARG2]] {{\[}}[0, 1], [2, 3], [4]]
886886
// CHECK-SAME: : memref<?x1x?x1x?xf32> into memref<?x?x?xf32>
887887
// CHECK: linalg.generic

mlir/test/Dialect/Linalg/fusion-elementwise-ops.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@ func.func @scalar_generic_fusion
532532
// CHECK-SAME: ins(%[[ARG1]] : tensor<i32>)
533533
// CHECK: tensor.extract %[[ARG0]]
534534
// CHECK: linalg.yield
535-
// CHECK return %[[T0]]
535+
// CHECK: return %[[T0]]
536536

537537
// -----
538538

mlir/test/Dialect/Linalg/transform-ops.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,17 +2,17 @@
22

33
transform.sequence failures(propagate) {
44
^bb1(%arg0: !transform.any_op):
5-
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile
5+
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile
66
%0, %1:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
77
}
88

99
// check that the Attributes of `tile_using_for` are preserved through printing
1010
// and parsing with and without use of the optional `interchange` Attribute.
1111
transform.sequence failures(propagate) {
1212
^bb1(%arg0: !transform.any_op):
13-
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile %arg0 [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}
13+
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}
1414
%0, %1:2 = transform.structured.tile_using_for %arg0 tile_sizes [2, 0, 3] interchange = [2, 1] {test_attr1 = 1 : i64, test_attr2}: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
15-
// CHECK %{{.*}}, %{{.*}}:2 = transform.structured.tile %arg0 [4, 5, 3] {test_attr3 = 1 : i64, test_attr4}
15+
// CHECK: %{{.*}}, %{{.*}}:2 = transform.structured.tile_using_for %tiled_linalg_op tile_sizes [0, 5, 3] {test_attr3 = 1 : i64, test_attr4}
1616
%2, %3:2 = transform.structured.tile_using_for %0 tile_sizes [0, 5, 3] {test_attr3 = 1 : i64, test_attr4}: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
1717
}
1818

mlir/test/Dialect/OpenMP/ops.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,7 +142,7 @@ func.func @omp_parallel_pretty(%data_var : memref<i32>, %if_cond : i1, %num_thre
142142
omp.terminator
143143
}
144144

145-
// CHECK omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32) private(%{{.*}} : memref<i32>) proc_bind(close)
145+
// CHECK: omp.parallel if(%{{.*}}) num_threads(%{{.*}} : i32) proc_bind(close)
146146
omp.parallel num_threads(%num_threads : i32) if(%if_cond: i1) proc_bind(close) {
147147
omp.terminator
148148
}

mlir/test/Dialect/SparseTensor/sparse_reshape.mlir

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,9 +76,9 @@ func.func @sparse_expand(%arg0: tensor<100xf64, #SparseVector>) -> tensor<10x10x
7676
// CHECK: %[[T:.*]] = arith.muli %[[SI0]], %[[C10]] : index
7777
// CHECK: %[[DI:.*]] = arith.addi %[[T]], %[[SI1]] : index
7878
// CHECK: %[[R1:.*]] = tensor.insert %[[SV]] into %[[A1]]{{\[}}%[[DI]]]
79-
// CHECK scf.yield %[[R1]]
80-
// CHECK }
81-
// CHECK scf.yield %[[RET_1]]
79+
// CHECK: scf.yield %[[R1]]
80+
// CHECK: }
81+
// CHECK: scf.yield %[[RET_1]]
8282
// CHECK: }
8383
// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
8484
// CHECK-NOT: sparse_tensor.convert
@@ -170,9 +170,9 @@ func.func @dynamic_sparse_expand(%arg0: tensor<?xf64, #SparseVector>, %sz0: inde
170170
// CHECK: %[[T4:.*]] = arith.muli %[[SI1]], %[[T3]] : index
171171
// CHECK: %[[DI:.*]] = arith.addi %[[T2]], %[[T4]] : index
172172
// CHECK: %[[NT:.*]] = tensor.insert %[[SV]] into %[[R1]]{{\[}}%[[DI]]]
173-
// CHECK scf.yield %[[NT]]
174-
// CHECK }
175-
// CHECK scf.yield %[[RET_1]]
173+
// CHECK: scf.yield %[[NT]]
174+
// CHECK: }
175+
// CHECK: scf.yield %[[RET_1]]
176176
// CHECK: }
177177
// CHECK: %[[NT1:.*]] = sparse_tensor.load %[[RET]] hasInserts
178178
// CHECK-NOT: sparse_tensor.convert

mlir/test/Dialect/Tensor/canonicalize.mlir

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1120,8 +1120,8 @@ func.func @compose_expand_of_expand_of_zero_dim(%arg0 : tensor<f32>)
11201120
// CHECK-LABEL: func.func @collapse_of_cast(
11211121
// CHECK-SAME: %[[IN:.*]]: tensor<8x12x32xf32>) -> tensor<?x32xf32> {
11221122
// CHECK-NEXT: %[[COLLAPSE:.*]] = tensor.collapse_shape %[[IN]] {{\[}}[0, 1], [2]] : tensor<8x12x32xf32> into tensor<96x32xf32>
1123-
// CHECK-NEXT %[[CAST:.*]] = tensor.cast %[[COLLAPSE]] : tensor<96x32xf32> to tensor<?x32xf32>
1124-
// CHECK-NEXT return %[[CAST]] : tensor<?x32xf32>
1123+
// CHECK-NEXT: %[[CAST:.*]] = tensor.cast %[[COLLAPSE]] : tensor<96x32xf32> to tensor<?x32xf32>
1124+
// CHECK-NEXT: return %[[CAST]] : tensor<?x32xf32>
11251125
func.func @collapse_of_cast(%t: tensor<8x12x32xf32>) -> tensor<?x32xf32> {
11261126
%0 = tensor.cast %t : tensor<8x12x32xf32> to tensor<?x?x?xf32>
11271127
%1 = tensor.collapse_shape %0 [[0, 1], [2]] : tensor<?x?x?xf32> into tensor<?x?xf32>

mlir/test/Dialect/Tosa/canonicalize.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -684,7 +684,7 @@ func.func @canonicalize_concat_slice_on_non_concat_axis(%arg0 : tensor<1x12x12xf
684684

685685
// -----
686686

687-
// CHECK-LABEL
687+
// CHECK-LABEL: @fold_log_exp
688688
func.func @fold_log_exp(%arg0: tensor<?x1xf32>) -> tensor<?x1xf32> {
689689
// CHECK: return %arg{{.*}} : tensor<?x1xf32>
690690
%0 = tosa.exp %arg0 : (tensor<?x1xf32>) -> tensor<?x1xf32>

mlir/test/Dialect/Tosa/tosa-infer-shapes.mlir

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -222,13 +222,13 @@ func.func @test_binary_i32(%arg0 : tensor<4xi32>, %arg1 : tensor<i32>) -> () {
222222

223223
// CHECK-LABEL: @test_binary_i1
224224
func.func @test_binary_i1(%arg0 : tensor<4xi1>, %arg1 : tensor<i1>) -> () {
225-
// CHECK tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
225+
// CHECK: tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
226226
%0 = tosa.logical_and %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
227227

228-
// CHECK tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
228+
// CHECK: tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
229229
%1 = tosa.logical_or %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
230230

231-
// CHECK tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*4i1>
231+
// CHECK: tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<4xi1>
232232
%2 = tosa.logical_xor %arg0, %arg1 : (tensor<4xi1>, tensor<i1>) -> tensor<*xi1>
233233

234234
return

mlir/test/Dialect/Vector/vector-dropleadunitdim-transforms.mlir

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ func.func @cast_away_contraction_leading_one_dims_nonleadingunitdim_rank4_acctra
241241
// -----
242242

243243
// CHECK-LABEL: func.func @cast_away_contraction_does_not_transpose_leading_unit_dims
244-
// CHECK-NOT vector.transpose
244+
// CHECK-NOT: vector.transpose
245245
// CHECK: vector.contract
246246
func.func @cast_away_contraction_does_not_transpose_leading_unit_dims(%lhs: vector<1x1x8xi32>,
247247
%rhs: vector<1x8x8xi32>,

mlir/test/IR/parser.mlir

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -342,40 +342,40 @@ func.func @loop_bounds(%N : index) {
342342

343343
// CHECK-LABEL: func @ifinst(%{{.*}}: index) {
344344
func.func @ifinst(%N: index) {
345-
%c = arith.constant 200 : index // CHECK %{{.*}} = arith.constant 200
346-
affine.for %i = 1 to 10 { // CHECK affine.for %{{.*}} = 1 to 10 {
347-
affine.if #set0(%i)[%N, %c] { // CHECK affine.if #set0(%{{.*}})[%{{.*}}, %{{.*}}] {
345+
%c = arith.constant 200 : index // CHECK: %{{.*}} = arith.constant 200
346+
affine.for %i = 1 to 10 { // CHECK: affine.for %{{.*}} = 1 to 10 {
347+
affine.if #set0(%i)[%N, %c] { // CHECK: affine.if #set(%{{.*}})[%{{.*}}, %{{.*}}] {
348348
%x = arith.constant 1 : i32
349349
// CHECK: %{{.*}} = arith.constant 1 : i32
350350
%y = "add"(%x, %i) : (i32, index) -> i32 // CHECK: %{{.*}} = "add"(%{{.*}}, %{{.*}}) : (i32, index) -> i32
351351
%z = "mul"(%y, %y) : (i32, i32) -> i32 // CHECK: %{{.*}} = "mul"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
352352
} else { // CHECK } else {
353-
affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%N] { // CHECK affine.if (#set1(%{{.*}})[%{{.*}}]) {
353+
affine.if affine_set<(i)[N] : (i - 2 >= 0, 4 - i >= 0)>(%i)[%N] { // CHECK: affine.if #set1(%{{.*}})[%{{.*}}] {
354354
// CHECK: %{{.*}} = arith.constant 1 : index
355355
%u = arith.constant 1 : index
356356
// CHECK: %{{.*}} = affine.apply #map{{.*}}(%{{.*}}, %{{.*}})[%{{.*}}]
357357
%w = affine.apply affine_map<(d0,d1)[s0] -> (d0+d1+s0)> (%i, %i) [%u]
358358
} else { // CHECK } else {
359359
%v = arith.constant 3 : i32 // %c3_i32 = arith.constant 3 : i32
360360
}
361-
} // CHECK }
362-
} // CHECK }
363-
return // CHECK return
364-
} // CHECK }
361+
} // CHECK: }
362+
} // CHECK: }
363+
return // CHECK: return
364+
} // CHECK:}
365365

366366
// CHECK-LABEL: func @simple_ifinst(%{{.*}}: index) {
367367
func.func @simple_ifinst(%N: index) {
368-
%c = arith.constant 200 : index // CHECK %{{.*}} = arith.constant 200
369-
affine.for %i = 1 to 10 { // CHECK affine.for %{{.*}} = 1 to 10 {
370-
affine.if #set0(%i)[%N, %c] { // CHECK affine.if #set0(%{{.*}})[%{{.*}}, %{{.*}}] {
368+
%c = arith.constant 200 : index // CHECK: %{{.*}} = arith.constant 200
369+
affine.for %i = 1 to 10 { // CHECK: affine.for %{{.*}} = 1 to 10 {
370+
affine.if #set0(%i)[%N, %c] { // CHECK: affine.if #set(%{{.*}})[%{{.*}}, %{{.*}}] {
371371
%x = arith.constant 1 : i32
372372
// CHECK: %{{.*}} = arith.constant 1 : i32
373373
%y = "add"(%x, %i) : (i32, index) -> i32 // CHECK: %{{.*}} = "add"(%{{.*}}, %{{.*}}) : (i32, index) -> i32
374374
%z = "mul"(%y, %y) : (i32, i32) -> i32 // CHECK: %{{.*}} = "mul"(%{{.*}}, %{{.*}}) : (i32, i32) -> i32
375-
} // CHECK }
376-
} // CHECK }
377-
return // CHECK return
378-
} // CHECK }
375+
} // CHECK: }
376+
} // CHECK: }
377+
return // CHECK: return
378+
} // CHECK:}
379379

380380
// CHECK-LABEL: func @attributes() {
381381
func.func @attributes() {

mlir/test/Target/LLVMIR/llvmir.mlir

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2330,39 +2330,47 @@ llvm.func @streaming_compatible_func() attributes {arm_streaming_compatible} {
23302330
// -----
23312331

23322332
// CHECK-LABEL: @new_za_func
2333-
// CHECK: #[[ATTR:[0-9]*]]
2333+
// CHECK-SAME: #[[ATTR:[0-9]*]]
23342334
llvm.func @new_za_func() attributes {arm_new_za} {
23352335
llvm.return
23362336
}
2337-
// CHECK #[[ATTR]] = { "aarch64_new_za" }
2337+
// CHECK: #[[ATTR]] = { "aarch64_new_za" }
2338+
2339+
// -----
23382340

23392341
// CHECK-LABEL: @in_za_func
2340-
// CHECK: #[[ATTR:[0-9]*]]
2342+
// CHECK-SAME: #[[ATTR:[0-9]*]]
23412343
llvm.func @in_za_func() attributes {arm_in_za } {
23422344
llvm.return
23432345
}
2344-
// CHECK #[[ATTR]] = { "aarch64_in_za" }
2346+
// CHECK: #[[ATTR]] = { "aarch64_in_za" }
2347+
2348+
// -----
23452349

23462350
// CHECK-LABEL: @out_za_func
2347-
// CHECK: #[[ATTR:[0-9]*]]
2351+
// CHECK-SAME: #[[ATTR:[0-9]*]]
23482352
llvm.func @out_za_func() attributes {arm_out_za } {
23492353
llvm.return
23502354
}
2351-
// CHECK #[[ATTR]] = { "aarch64_out_za" }
2355+
// CHECK: #[[ATTR]] = { "aarch64_out_za" }
2356+
2357+
// -----
23522358

23532359
// CHECK-LABEL: @inout_za_func
2354-
// CHECK: #[[ATTR:[0-9]*]]
2360+
// CHECK-SAME: #[[ATTR:[0-9]*]]
23552361
llvm.func @inout_za_func() attributes {arm_inout_za } {
23562362
llvm.return
23572363
}
2358-
// CHECK #[[ATTR]] = { "aarch64_inout_za" }
2364+
// CHECK: #[[ATTR]] = { "aarch64_inout_za" }
2365+
2366+
// -----
23592367

23602368
// CHECK-LABEL: @preserves_za_func
2361-
// CHECK: #[[ATTR:[0-9]*]]
2369+
// CHECK-SAME: #[[ATTR:[0-9]*]]
23622370
llvm.func @preserves_za_func() attributes {arm_preserves_za} {
23632371
llvm.return
23642372
}
2365-
// CHECK #[[ATTR]] = { "aarch64_preserves_za" }
2373+
// CHECK: #[[ATTR]] = { "aarch64_preserves_za" }
23662374

23672375
// -----
23682376

0 commit comments

Comments
 (0)