@@ -209,21 +209,33 @@ module attributes {transform.with_named_sequence} {
209209
210210// -----
211211
212- // CHECK-LABEL: @pad(
213- func.func @pad (%arg0: tensor <24 x12 xf32 >,
214- %arg1: tensor <12 x25 xf32 >,
215- %arg2: tensor <24 x25 xf32 >) -> tensor <24 x25 xf32 > {
216- // This is attached to an error that is silenceable and is not reported by this transform
217- // {{when applied to this op}}
212+ // With all padded being static, there's nothing to pad. However, with the
213+ // `nofold` attribute set (see `pack_paddings`), the corresponding pad Ops are
214+ // preserved.
215+
216+ // CHECK-LABEL: @zero_pad_static(
217+ func.func @zero_pad_static (%arg0: tensor <24 x12 xf32 >,
218+ %arg1: tensor <12 x25 xf32 >,
219+ %arg2: tensor <24 x25 xf32 >) -> tensor <24 x25 xf32 > {
220+
221+ // CHECK-SAME: %[[ARG_0:.*]]: tensor<24x12xf32>,
222+ // CHECK-SAME: %[[ARG_1:.*]]: tensor<12x25xf32>,
223+ // CHECK-SAME: %[[ARG_2:.*]]: tensor<24x25xf32>) -> tensor<24x25xf32> {
224+
225+ // CHECK: %[[PAD_ARG_0:.*]] = tensor.pad %[[ARG_0]] nofold low[0, 0] high[0, 0]
226+ // CHECK: %[[PAD_ARG_1:.*]] = tensor.pad %[[ARG_1]] nofold low[0, 0] high[0, 0]
227+ // CHECK-NOT: tensor.pad
228+
229+ // CHECK: %[[MATMUL:.*]] = linalg.matmul
230+ // CHECK-SAME: ins(%[[PAD_ARG_0:.*]], %[[PAD_ARG_1:.*]] : tensor<24x12xf32>, tensor<12x25xf32>)
231+ // CHECK-SAME: outs(%[[ARG_2]]
218232 %0 = linalg.matmul ins (%arg0 , %arg1 : tensor <24 x12 xf32 >, tensor <12 x25 xf32 >) outs (%arg2 : tensor <24 x25 xf32 >) -> tensor <24 x25 xf32 >
219233 func.return %0 : tensor <24 x25 xf32 >
220234}
221235
222236module attributes {transform.with_named_sequence } {
223237 transform.named_sequence @__transform_main (%arg1: !transform.any_op {transform.readonly }) {
224238 %0 = transform.structured.match ops {[" linalg.matmul" ]} in %arg1 : (!transform.any_op ) -> !transform.any_op
225- // This error is silenceable and is not reported by this transform
226- // {{transform.structured.pad failed to apply}}
227239 %padded , %pad , %copy_back = transform.structured.pad %0 {
228240 padding_values =[0.0 : f32 , 0.0 : f32 , 0.0 : f32 ],
229241 padding_dimensions =[0 , 1 , 2 ],
@@ -235,6 +247,72 @@ module attributes {transform.with_named_sequence} {
235247
236248// -----
237249
250+ // With all padded dims being static, there's nothing to pad. However, with the
251+ // `nofold` attribute set (see `pack_paddings`), the corresponding pad Ops are
252+ // preserved. Same as above, but some dims are now dynamic.
253+
254+ // CHECK-LABEL: @zero_pad_dynamic(
255+ func.func @zero_pad_dynamic (%arg0: tensor <?x12 xf32 >,
256+ %arg1: tensor <12 x?xf32 >,
257+ %arg2: tensor <?x?xf32 >) -> tensor <?x?xf32 > {
258+
259+ // CHECK-SAME: %[[ARG_0:.*]]: tensor<?x12xf32>,
260+ // CHECK-SAME: %[[ARG_1:.*]]: tensor<12x?xf32>,
261+ // CHECK-SAME: %[[ARG_2:.*]]: tensor<?x?xf32>) -> tensor<?x?xf32> {
262+
263+ // CHECK: %[[PAD_ARG_0:.*]] = tensor.pad %[[ARG_0]] nofold low[0, 0] high[0, 0]
264+ // CHECK: %[[PAD_ARG_1:.*]] = tensor.pad %[[ARG_1]] nofold low[0, 0] high[0, 0]
265+ // CHECK: %[[PAD_ARG_2:.*]] = tensor.pad %[[ARG_2]] nofold low[0, 0] high[0, 0]
266+
267+ // CHECK: %[[MATMUL:.*]] = linalg.matmul
268+ // CHECK-SAME: ins(%[[PAD_ARG_0:.*]], %[[PAD_ARG_1:.*]] : tensor<?x12xf32>, tensor<12x?xf32>)
269+ // CHECK-SAME: outs(%[[PAD_ARG_2]]
270+ %0 = linalg.matmul ins (%arg0 , %arg1 : tensor <?x12 xf32 >, tensor <12 x?xf32 >) outs (%arg2 : tensor <?x?xf32 >) -> tensor <?x?xf32 >
271+ func.return %0 : tensor <?x?xf32 >
272+ }
273+
274+ module attributes {transform.with_named_sequence } {
275+ transform.named_sequence @__transform_main (%arg1: !transform.any_op {transform.readonly }) {
276+ %0 = transform.structured.match ops {[" linalg.matmul" ]} in %arg1 : (!transform.any_op ) -> !transform.any_op
277+ %padded , %pad , %copy_back = transform.structured.pad %0 {
278+ padding_values =[0.0 : f32 , 0.0 : f32 , 0.0 : f32 ],
279+ // Note - only the static dim is padded
280+ padding_dimensions =[2 ],
281+ pack_paddings =[1 , 1 , 1 ]
282+ } : (!transform.any_op ) -> (!transform.any_op , !transform.any_op , !transform.any_op )
283+ transform.yield
284+ }
285+ }
286+
287+ // -----
288+
289+ // Impossible to get a bound for padding - fails
290+
291+ func.func @negative_no_ub_estimate (%arg0: tensor <?x12 xf32 >,
292+ %arg1: tensor <12 x?xf32 >,
293+ %arg2: tensor <?x?xf32 >) -> tensor <?x?xf32 > {
294+
295+ // expected-note @below {{target op}}
296+ %0 = linalg.matmul ins (%arg0 , %arg1 : tensor <?x12 xf32 >, tensor <12 x?xf32 >) outs (%arg2 : tensor <?x?xf32 >) -> tensor <?x?xf32 >
297+ func.return %0 : tensor <?x?xf32 >
298+ }
299+
300+ module attributes {transform.with_named_sequence } {
301+ transform.named_sequence @__transform_main (%arg1: !transform.any_op {transform.readonly }) {
302+ %0 = transform.structured.match ops {[" linalg.matmul" ]} in %arg1 : (!transform.any_op ) -> !transform.any_op
303+ // expected-error @below {{ailed to pad op}}
304+ %padded , %pad , %copy_back = transform.structured.pad %0 {
305+ padding_values =[0.0 : f32 , 0.0 : f32 , 0.0 : f32 ],
306+ // Note - attempting to pad non-static dim
307+ padding_dimensions =[1 ],
308+ pack_paddings =[1 , 1 , 1 ]
309+ } : (!transform.any_op ) -> (!transform.any_op , !transform.any_op , !transform.any_op )
310+ transform.yield
311+ }
312+ }
313+
314+ // -----
315+
238316// Check that the padding can be applied even when the output argument of the
239317// linalg op is not produced by an empty op or an extract_slice op.
240318
0 commit comments