diff --git a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h index 48e657cca96e3..b36fdfbb28b58 100644 --- a/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h +++ b/mlir/include/mlir/Dialect/Linalg/Transforms/Transforms.h @@ -762,6 +762,14 @@ LogicalResult copyToGPUPrivateMemory(OpBuilder &b, Value src, Value dst); /// memory is freed when going outside of the scope. LogicalResult deallocateGPUPrivateMemory(OpBuilder &, Value /*buffer*/); +/// Return true if there's dedicated logic in the Linalg Vectorizer to +/// vectorize this Op, false otherwise. +/// +/// Note that this helper merely implements a very high level check and that the +/// vectorizer also requires various additional pre-conditions to be met for it +/// to work (these are checked by the vectorizer itself). +bool hasVectorizationImpl(Operation *); + /// Emit a suitable vector form for an operation. If provided, /// `inputVectorSizes` are used to vectorize this operation. `inputVectorSizes` /// must match the rank of the iteration space of the operation and the sizes diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp index 46c8510f4ed51..895710d3754f8 100644 --- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp +++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp @@ -3411,11 +3411,11 @@ struct VectorizationPattern : public RewritePattern { flatten1DDepthwiseConv(flattenConv) {} LogicalResult matchAndRewrite(Operation *op, PatternRewriter &rewriter) const override { - LinalgOp linalgOp = dyn_cast(op); - if (!linalgOp) - return rewriter.notifyMatchFailure(op, "expected Linalg Op"); - return vectorize(rewriter, linalgOp, /*inputVectorSizes=*/{}, - /*scalableVecDims=*/{}, vectorizeNDExtract, + if (!linalg::hasVectorizationImpl(op)) + return rewriter.notifyMatchFailure(op, + "Unsupported Op, cannot vectorize"); + return vectorize(rewriter, op, /*inputVectorSizes=*/{}, + /*inputScalableVecDims=*/{}, vectorizeNDExtract, flatten1DDepthwiseConv); } @@ -3496,8 +3496,7 @@ DiagnosedSilenceableFailure transform::VectorizeOp::apply( // TODO: Check that the correct number of vectorSizes was provided. for (Operation *target : targets) { - if (!isa( - target)) { + if (!linalg::hasVectorizationImpl(target)) { return mlir::emitSilenceableFailure(target->getLoc()) << "Unsupported Op, cannot vectorize"; } diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index ca85f4b9b9c15..d9adf3f0f86c8 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -2092,6 +2092,10 @@ LogicalResult mlir::linalg::vectorizeOpPrecondition( Operation *op, ArrayRef inputVectorSizes, ArrayRef inputScalableVecDims, bool vectorizeNDExtract, bool flatten1DDepthwiseConv) { + + if (!hasVectorizationImpl(op)) + return failure(); + if (failed(vectorizeScalableVectorPrecondition(op, inputVectorSizes, inputScalableVecDims))) return failure(); @@ -2129,6 +2133,11 @@ static void convertAffineApply(RewriterBase &rewriter, LinalgOp linalgOp) { } } +bool mlir::linalg::hasVectorizationImpl(Operation *op) { + return isa( + op); +} + /// Emit a suitable vector form for an operation. If provided, /// `inputVectorSizes` are used to vectorize this operation. /// `inputVectorSizes` must match the rank of the iteration space of the diff --git a/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir index 80564ad35cfdb..e7beb72547112 100644 --- a/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir +++ b/mlir/test/Dialect/Linalg/vectorization-with-patterns.mlir @@ -2010,3 +2010,68 @@ module attributes {transform.with_named_sequence} { // CHECK: %[[VAL_8:.*]] = vector.transpose %[[VAL_7]], [1, 2, 3, 0] : vector<1x1x12x197xf32> to vector<1x12x197x1xf32> // CHECK: %[[VAL_9:.*]] = vector.transfer_write %[[VAL_8]], %[[VAL_3]]{{\[}}%[[VAL_2]], %[[VAL_2]], %[[VAL_2]], %[[VAL_2]]] {in_bounds = [true, true, true, true]} : vector<1x12x197x1xf32>, tensor<1x12x197x1xf32> // CHECK: return %[[VAL_9]] : tensor<1x12x197x1xf32> + +// ----- + +// Input identical as the test in vectorization.mlir. Output is different - +// vector sizes are inferred (rather than user-specified) and hence _no_ +// masking was used. + +func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> { + %pack = tensor.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32> + return %pack : tensor<4x1x32x16x2xf32> +} + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op + %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op + transform.yield + } +} + +// CHECK-LABEL: func.func @test_vectorize_pack( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x8x16xf32>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> { +// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, true, true]} : tensor<32x8x16xf32>, vector<32x8x16xf32> +// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32> +// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [1, 3, 0, 4, 2] : vector<32x4x2x1x16xf32> to vector<4x1x32x16x2xf32> +// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<4x1x32x16x2xf32> +// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<4x1x32x16x2xf32>, tensor<4x1x32x16x2xf32> +// CHECK: return %[[VAL_8]] : tensor<4x1x32x16x2xf32> + +// ----- + +// Input identical as the test in vectorization.mlir. Output is different - +// vector sizes are inferred (rather than user-specified) and hence _no_ +// masking was used. + +func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> { + %pad = arith.constant 0.000000e+00 : f32 + %pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32> + return %pack : tensor<32x4x1x16x2xf32> +} + +// CHECK-LABEL: func.func @test_vectorize_padded_pack( +// CHECK-SAME: %[[VAL_0:.*]]: tensor<32x7x15xf32>, +// CHECK-SAME: %[[VAL_1:.*]]: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> { +// CHECK: %[[VAL_2:.*]] = arith.constant 0.000000e+00 : f32 +// CHECK: %[[VAL_3:.*]] = arith.constant 0 : index +// CHECK: %[[VAL_4:.*]] = vector.transfer_read %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]]], %[[VAL_2]] {in_bounds = [true, false, false]} : tensor<32x7x15xf32>, vector<32x8x16xf32> +// CHECK: %[[VAL_5:.*]] = vector.shape_cast %[[VAL_4]] : vector<32x8x16xf32> to vector<32x4x2x1x16xf32> +// CHECK: %[[VAL_6:.*]] = vector.transpose %[[VAL_5]], [0, 1, 3, 4, 2] : vector<32x4x2x1x16xf32> to vector<32x4x1x16x2xf32> +// CHECK: %[[VAL_7:.*]] = tensor.empty() : tensor<32x4x1x16x2xf32> +// CHECK: %[[VAL_8:.*]] = vector.transfer_write %[[VAL_6]], %[[VAL_7]]{{\[}}%[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]], %[[VAL_3]]] {in_bounds = [true, true, true, true, true]} : vector<32x4x1x16x2xf32>, tensor<32x4x1x16x2xf32> +// CHECK: return %[[VAL_8]] : tensor<32x4x1x16x2xf32> + +module attributes {transform.with_named_sequence} { + transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) { + %0 = transform.structured.match ops{["tensor.pack"]} in %arg0 : (!transform.any_op) -> !transform.any_op + %1 = transform.get_parent_op %0 {isolated_from_above} : (!transform.any_op) -> !transform.any_op + %2 = transform.structured.vectorize_children_and_apply_patterns %1 : (!transform.any_op) -> !transform.any_op + transform.yield + } +} diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index 0e2b2458d29cd..b4689289d892c 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -666,6 +666,10 @@ module attributes {transform.with_named_sequence} { // ----- +// Input identical as the test in vectorization-with-patterns.mlir. Output is +// different - vector sizes are inferred (rather than user-specified) and hence +// masking was used. + func.func @test_vectorize_pack(%arg0: tensor<32x8x16xf32>, %arg1: tensor<4x1x32x16x2xf32>) -> tensor<4x1x32x16x2xf32> { %pack = tensor.pack %arg0 outer_dims_perm = [1, 2, 0] inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x8x16xf32> -> tensor<4x1x32x16x2xf32> return %pack : tensor<4x1x32x16x2xf32> @@ -692,6 +696,10 @@ module attributes {transform.with_named_sequence} { // ----- +// Input identical as the test in vectorization-with-patterns.mlir. Output is +// different - vector sizes are inferred (rather than user-specified) and hence +// masking was used. + func.func @test_vectorize_padded_pack(%arg0: tensor<32x7x15xf32>, %arg1: tensor<32x4x1x16x2xf32>) -> tensor<32x4x1x16x2xf32> { %pad = arith.constant 0.000000e+00 : f32 %pack = tensor.pack %arg0 padding_value(%pad : f32) inner_dims_pos = [2, 1] inner_tiles = [16, 2] into %arg1 : tensor<32x7x15xf32> -> tensor<32x4x1x16x2xf32>