@@ -343,3 +343,65 @@ module attributes {transform.with_named_sequence} {
343343 transform.yield
344344 }
345345}
346+
347+ // -----
348+
349+ // CHECK-LABEL: pad_conv_strided
350+ func.func @pad_conv_strided (%arg0: tensor <1 x42 x42 x4 xf32 >, %arg1: tensor <16 x3 x3 x4 xf32 >, %arg2: tensor <1 x14 x14 x16 xf32 >) -> tensor <1 x14 x14 x16 xf32 > {
351+
352+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 6, 12]
353+ // CHECK: : tensor<1x42x42x4xf32> to tensor<1x42x48x16xf32>
354+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 0, 12]
355+ // CHECK: : tensor<16x3x3x4xf32> to tensor<16x3x3x16xf32>
356+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 2, 0]
357+ // CHECK: : tensor<1x14x14x16xf32> to tensor<1x14x16x16xf32>
358+ // CHECK-NEXT: linalg.conv_2d_nhwc_fhwc
359+ // CHECK: tensor.extract_slice %{{.*}}[0, 0, 0, 0] [1, 14, 14, 16] [1, 1, 1, 1] : tensor<1x14x16x16xf32> to tensor<1x14x14x16xf32>
360+
361+ %0 = linalg.conv_2d_nhwc_fhwc
362+ {dilations = dense <1 > : tensor <2 xi64 >, strides = dense <3 > : tensor <2 xi64 > }
363+ ins (%arg0 , %arg1: tensor <1 x42 x42 x4 xf32 >, tensor <16 x3 x3 x4 xf32 >)
364+ outs (%arg2: tensor <1 x14 x14 x16 xf32 >) -> tensor <1 x14 x14 x16 xf32 >
365+ return %0 : tensor <1 x14 x14 x16 xf32 >
366+ }
367+
368+ module attributes {transform.with_named_sequence } {
369+ transform.named_sequence @__transform_main (%arg1: !transform.any_op {transform.readonly }) {
370+ %0 = transform.structured.match ops {[" linalg.conv_2d_nhwc_fhwc" ]} in %arg1 : (!transform.any_op ) -> !transform.any_op
371+ %padded , %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [0 , 0 , 16 , 0 , 0 , 0 , 16 ] pad_to_multiple_of {
372+ padding_values = [0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 ]
373+ } : (!transform.any_op ) -> (!transform.any_op , !transform.any_op )
374+ transform.yield
375+ }
376+ }
377+
378+ // -----
379+
380+ // CHECK-LABEL: pad_conv_dilated
381+ func.func @pad_conv_dilated (%arg0: tensor <1 x18 x18 x4 xf32 >, %arg1: tensor <16 x3 x3 x4 xf32 >, %arg2: tensor <1 x14 x14 x16 xf32 >) -> tensor <1 x14 x14 x16 xf32 > {
382+
383+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 2, 12]
384+ // CHECK: : tensor<1x18x18x4xf32> to tensor<1x18x20x16xf32>
385+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 0, 12]
386+ // CHECK: : tensor<16x3x3x4xf32> to tensor<16x3x3x16xf32>
387+ // CHECK: tensor.pad %{{.*}} low[0, 0, 0, 0] high[0, 0, 2, 0]
388+ // CHECK: : tensor<1x14x14x16xf32> to tensor<1x14x16x16xf32>
389+ // CHECK-NEXT: linalg.conv_2d_nhwc_fhwc
390+ // CHECK: tensor.extract_slice %{{.*}}[0, 0, 0, 0] [1, 14, 14, 16] [1, 1, 1, 1] : tensor<1x14x16x16xf32> to tensor<1x14x14x16xf32>
391+
392+ %0 = linalg.conv_2d_nhwc_fhwc
393+ {dilations = dense <2 > : tensor <2 xi64 >, strides = dense <1 > : tensor <2 xi64 > }
394+ ins (%arg0 , %arg1: tensor <1 x18 x18 x4 xf32 >, tensor <16 x3 x3 x4 xf32 >)
395+ outs (%arg2: tensor <1 x14 x14 x16 xf32 >) -> tensor <1 x14 x14 x16 xf32 >
396+ return %0 : tensor <1 x14 x14 x16 xf32 >
397+ }
398+
399+ module attributes {transform.with_named_sequence } {
400+ transform.named_sequence @__transform_main (%arg1: !transform.any_op {transform.readonly }) {
401+ %0 = transform.structured.match ops {[" linalg.conv_2d_nhwc_fhwc" ]} in %arg1 : (!transform.any_op ) -> !transform.any_op
402+ %padded , %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [0 , 0 , 16 , 0 , 0 , 0 , 16 ] pad_to_multiple_of {
403+ padding_values = [0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 , 0.0 : f32 ]
404+ } : (!transform.any_op ) -> (!transform.any_op , !transform.any_op )
405+ transform.yield
406+ }
407+ }
0 commit comments