21
21
}
22
22
23
23
// CHECK-LABEL: func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
24
- // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64>
25
- // CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) : tensor<1024x1024xf64>
26
- // CHECK: return %[[VAL_1]] : tensor<1024x1024xf64>
24
+ // CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : tensor<1024x1024xf64>
25
+ // CHECK: return %[[C0]] : tensor<1024x1024xf64>
27
26
// CHECK: }
28
27
func.func @fold_yield_arg_zero () -> tensor <1024 x1024 xf64 > {
29
28
%cst = arith.constant 0.000000e+00 : f64
30
- %0 = bufferization.alloc_tensor () : tensor <1024 x1024 xf64 >
29
+ %0 = tensor.empty () : tensor <1024 x1024 xf64 >
31
30
%1 = linalg.generic {index ing_maps = [affine_map <(d0 , d1 ) -> ()>,
32
31
affine_map <(d0 , d1 ) -> (d0 , d1 )>],
33
32
iterator_types = [" parallel" , " parallel" ]}
@@ -40,13 +39,12 @@ func.func @fold_yield_arg_zero() -> tensor<1024x1024xf64> {
40
39
}
41
40
42
41
// CHECK-LABEL: func.func @fold_yield_direct_zero() -> tensor<32xf64> {
43
- // CHECK: %[[VAL_0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64>
44
- // CHECK: %[[VAL_1:.*]] = bufferization.alloc_tensor() copy(%[[VAL_0]]) : tensor<32xf64>
45
- // CHECK: return %[[VAL_1]] : tensor<32xf64>
42
+ // CHECK: %[[C0:.*]] = arith.constant dense<0.000000e+00> : tensor<32xf64>
43
+ // CHECK: return %[[C0]] : tensor<32xf64>
46
44
// CHECK: }
47
45
func.func @fold_yield_direct_zero () -> tensor <32 xf64 > {
48
46
%cst = arith.constant 0.000000e+00 : f64
49
- %0 = bufferization.alloc_tensor () : tensor <32 xf64 >
47
+ %0 = tensor.empty () : tensor <32 xf64 >
50
48
%1 = linalg.generic {index ing_maps = [affine_map <(d0 ) -> (d0 )>],
51
49
iterator_types = [" parallel" ]}
52
50
outs (%0 : tensor <32 xf64 >) {
@@ -92,9 +90,9 @@ func.func @fold_yield_direct_zero() -> tensor<32xf64> {
92
90
// CHECK: %[[VAL_32:.*]] = arith.mulf %[[VAL_30]], %[[VAL_31]] : f64
93
91
// CHECK: %[[VAL_33:.*]] = arith.addf %[[VAL_28]], %[[VAL_32]] : f64
94
92
// CHECK: memref.store %[[VAL_33]], %[[VAL_16]]{{\[}}%[[VAL_20]], %[[VAL_27]]] : memref<8x8xf64>
95
- // CHECK: } {"Emitted from" = "linalg.generic"}
96
- // CHECK: } {"Emitted from" = "linalg.generic"}
97
- // CHECK: } {"Emitted from" = "linalg.generic"}
93
+ // CHECK: }
94
+ // CHECK: }
95
+ // CHECK: }
98
96
// CHECK: %[[VAL_34:.*]] = bufferization.to_tensor %[[VAL_16]] : memref<8x8xf64>
99
97
// CHECK: return %[[VAL_34]] : tensor<8x8xf64>
100
98
// CHECK: }
@@ -123,29 +121,29 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
123
121
}
124
122
125
123
// CHECK-LABEL: func.func @sparse_sampled_dd_unfused(
126
- // CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>,
124
+ // CHECK-SAME: %[[VAL_0:.*]]: tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>,
127
125
// CHECK-SAME: %[[VAL_1:.*]]: tensor<8x8xf64>,
128
- // CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> {
126
+ // CHECK-SAME: %[[VAL_2:.*]]: tensor<8x8xf64>) -> tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> {
129
127
// CHECK-DAG: %[[VAL_3:.*]] = arith.constant 8 : index
130
128
// CHECK-DAG: %[[VAL_4:.*]] = arith.constant 0 : index
131
129
// CHECK-DAG: %[[VAL_5:.*]] = arith.constant 1 : index
132
130
// CHECK-DAG: %[[VAL_6:.*]] = arith.constant false
133
131
// CHECK-DAG: %[[VAL_7:.*]] = arith.constant true
134
132
// CHECK-DAG: %[[VAL_8:.*]] = arith.constant dense<0.000000e+00> : tensor<8x8xf64>
135
- // CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) : tensor<8x8xf64>
136
- // CHECK-DAG: %[[VAL_10:.*]] = bufferization.alloc_tensor () : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>
133
+ // CHECK-DAG: %[[VAL_9:.*]] = bufferization.alloc_tensor() copy(%[[VAL_8]]) {bufferization.escape = [false]} : tensor<8x8xf64>
134
+ // CHECK-DAG: %[[VAL_10:.*]] = tensor.empty () : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>
137
135
// CHECK-DAG: %[[VAL_11:.*]] = bufferization.to_memref %[[VAL_1]] : memref<8x8xf64>
138
136
// CHECK-DAG: %[[VAL_12:.*]] = bufferization.to_memref %[[VAL_2]] : memref<8x8xf64>
139
- // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xindex>
140
- // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xindex>
141
- // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xindex>
142
- // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xindex>
143
- // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xf64>
137
+ // CHECK-DAG: %[[VAL_13:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xindex>
138
+ // CHECK-DAG: %[[VAL_14:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 0 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xindex>
139
+ // CHECK-DAG: %[[VAL_15:.*]] = sparse_tensor.positions %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xindex>
140
+ // CHECK-DAG: %[[VAL_16:.*]] = sparse_tensor.coordinates %[[VAL_0]] {level = 1 : index} : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xindex>
141
+ // CHECK-DAG: %[[VAL_17:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xf64>
144
142
// CHECK: %[[VAL_18:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_4]]] : memref<?xindex>
145
143
// CHECK: %[[VAL_19:.*]] = memref.load %[[VAL_13]]{{\[}}%[[VAL_5]]] : memref<?xindex>
146
- // CHECK: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_10]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>) {
144
+ // CHECK: %[[VAL_20:.*]] = scf.for %[[VAL_21:.*]] = %[[VAL_18]] to %[[VAL_19]] step %[[VAL_5]] iter_args(%[[VAL_22:.*]] = %[[VAL_10]]) -> (tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>) {
147
145
// CHECK: %[[VAL_23:.*]] = memref.load %[[VAL_14]]{{\[}}%[[VAL_21]]] : memref<?xindex>
148
- // CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
146
+ // CHECK: %[[VAL_24:.*]], %[[VAL_25:.*]], %[[VAL_26:.*]], %[[VAL_27:.*]] = sparse_tensor.expand %[[VAL_10]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>> to memref<?xf64>, memref<?xi1>, memref<?xindex>
149
147
// CHECK: %[[VAL_28:.*]] = scf.for %[[VAL_29:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] iter_args(%[[VAL_30:.*]] = %[[VAL_27]]) -> (index) {
150
148
// CHECK: %[[VAL_31:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_23]], %[[VAL_29]]] : memref<8x8xf64>
151
149
// CHECK: %[[VAL_32:.*]] = memref.load %[[VAL_15]]{{\[}}%[[VAL_21]]] : memref<?xindex>
@@ -170,15 +168,15 @@ func.func @sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
170
168
// CHECK: scf.yield %[[VAL_37]] : index
171
169
// CHECK: }
172
170
// CHECK: memref.store %[[VAL_44]], %[[VAL_24]]{{\[}}%[[VAL_38]]] : memref<?xf64>
173
- // CHECK: scf.yield %[[VAL_49:.* ]] : index
171
+ // CHECK: scf.yield %[[VAL_47 ]] : index
174
172
// CHECK: }
175
- // CHECK: scf.yield %[[VAL_50:.* ]] : index
173
+ // CHECK: scf.yield %[[VAL_35 ]] : index
176
174
// CHECK: }
177
- // CHECK: %[[VAL_51 :.*]] = sparse_tensor.compress %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_52:.* ]] into %[[VAL_22]]{{\[}}%[[VAL_23]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>
178
- // CHECK: scf.yield %[[VAL_51 ]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>
175
+ // CHECK: %[[VAL_49 :.*]] = sparse_tensor.compress %[[VAL_24]], %[[VAL_25]], %[[VAL_26]], %[[VAL_28 ]] into %[[VAL_22]]{{\[}}%[[VAL_23]]] : memref<?xf64>, memref<?xi1>, memref<?xindex>, tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>
176
+ // CHECK: scf.yield %[[VAL_49 ]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>
179
177
// CHECK: }
180
- // CHECK: %[[VAL_53 :.*]] = sparse_tensor.load %[[VAL_54:.* ]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>
181
- // CHECK: return %[[VAL_53 ]] : tensor<8x8xf64, #sparse_tensor.encoding<{ lvlTypes = [ "compressed", "compressed" ] }>>
178
+ // CHECK: %[[VAL_50 :.*]] = sparse_tensor.load %[[VAL_20 ]] hasInserts : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>
179
+ // CHECK: return %[[VAL_50 ]] : tensor<8x8xf64, #sparse_tensor.encoding<{{{.*}} }>>
182
180
// CHECK: }
183
181
func.func @sparse_sampled_dd_unfused (%args: tensor <8 x8 xf64 , #SM >,
184
182
%arga: tensor <8 x8 xf64 >,
@@ -194,7 +192,7 @@ func.func @sparse_sampled_dd_unfused(%args: tensor<8x8xf64, #SM>,
194
192
linalg.yield %q : f64
195
193
} -> tensor <8 x8 xf64 >
196
194
// Sample the result with elements-wise multiplication with sparse matrix.
197
- %3 = bufferization.alloc_tensor () : tensor <8 x8 xf64 , #SM >
195
+ %3 = tensor.empty () : tensor <8 x8 xf64 , #SM >
198
196
%4 = linalg.generic #trait_scale
199
197
ins (%2 , %args : tensor <8 x8 xf64 >, tensor <8 x8 xf64 , #SM >)
200
198
outs (%3 : tensor <8 x8 xf64 , #SM >) {
0 commit comments