-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[mlir][transform] Drop redundant padding_dimensions spec from pad_tiling_interface #145257
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[mlir][transform] Drop redundant padding_dimensions spec from pad_tiling_interface #145257
Conversation
856d708 to
0571bcc
Compare
|
@llvm/pr-subscribers-mlir-linalg @llvm/pr-subscribers-mlir Author: Nicolas Vasilache (nicolasvasilache) ChangesThis revision aligns padding specification in pad_tiling_interface to that of tiling specification. Full diff: https://github.com/llvm/llvm-project/pull/145257.diff 5 Files Affected:
diff --git a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
index cf3f2b70580da..c5650470fdc8d 100644
--- a/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
+++ b/mlir/include/mlir/Dialect/Linalg/TransformOps/LinalgTransformOps.td
@@ -1195,17 +1195,29 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
TransformOpInterface,
ReportTrackingListenerFailuresOpTrait]> {
let description = [{
- Pads the operations pointed to by the target handle using the options
- provided as operation attributes. The operation returns a handle to the
- padded operation and to the padding operation ("tensor.pad").
+ Pads the **iteration domain** of the operations pointed to by the target
+ handle using the options provided as operation attributes. Padding the
+ iteration domain induces a padding of the operands that is consistent
+ across the op semantics and, unlike for simple elementwise ops, may not be
+ trivially deducible or specifiable on operands only (e.g. convolutions).
+
+ The specification of `padding_sizes` follows that of `tile_sizes` during
+ tiling: the value "0" on a particular iterator encode "no padding". Like in
+ the case of tiling, an automatic completion by 0 to the operation rank
+ occurs.
+
+ This transformation returns a handle to the padded operation and to the
+ padding operation ("tensor.pad").
TODO: in the future this should be moved out of a specific Linalg
implementation file and into a more general "Structured" file.
#### Return modes
- This operation ignores non-Linalg ops and drops them in the return.
- In the future, this operation will support all TilingInterfaceOps.
+ This operation ignores non-IndexingMapOpInterface ops and drops them in the
+ return. In the future, this operation will support all TilingInterfaceOps
+ for which the contract between iteration domain and operands can be
+ reified.
This operation may produce a definite failure if the padding fails for any
reason.
@@ -1219,7 +1231,6 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
let arguments =
(ins TransformHandleTypeInterface:$target,
DefaultValuedAttr<ArrayAttr, "{}">:$padding_values,
- DefaultValuedAttr<I64ArrayAttr, "{}">:$padding_dimensions,
Variadic<TransformAnyParamTypeOrAnyHandle>:$padding_sizes,
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
$static_padding_sizes,
@@ -1245,11 +1256,9 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
// add/mul ring at the moment.
// TODO: support other operations (e.g. min, max etc).
OpBuilder<(ins "Value":$target,
- "ArrayRef<int64_t>":$paddingDimensions,
CArg<"ArrayRef<int64_t>", "{}">:$staticPaddingSizes,
CArg<"bool", "false">:$padToMultipleOf)>,
OpBuilder<(ins "Value":$target,
- "ArrayRef<int64_t>":$paddingDimensions,
"ArrayRef<OpFoldResult>":$mixedPadPaddingSizes,
CArg<"bool", "false">:$usePrescribedTensorShapes)>
];
diff --git a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
index 5d55adbf46f36..d9a0ba02f4fe4 100644
--- a/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
+++ b/mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
@@ -2163,7 +2163,6 @@ LogicalResult transform::PadOp::verify() {
void transform::PadTilingInterfaceOp::build(OpBuilder &b,
OperationState &result,
Value target,
- ArrayRef<int64_t> paddingDimensions,
ArrayRef<int64_t> paddingSizes,
bool padToMultipleOf) {
auto resultType = transform::AnyOpType::get(b.getContext());
@@ -2172,7 +2171,6 @@ void transform::PadTilingInterfaceOp::build(OpBuilder &b,
/*types=*/TypeRange{resultType, resultType},
/*target=*/target,
/*paddingValues=*/ArrayAttr(), // let inference handle this
- /*paddingDimensions=*/b.getI64ArrayAttr(paddingDimensions),
/*paddingSizes=*/ValueRange{},
/*paddingSizes=*/
(paddingSizes.empty() ? DenseI64ArrayAttr()
@@ -2183,7 +2181,6 @@ void transform::PadTilingInterfaceOp::build(OpBuilder &b,
void transform::PadTilingInterfaceOp::build(
OpBuilder &b, OperationState &result, Value target,
- ArrayRef<int64_t> paddingDimensions,
ArrayRef<OpFoldResult> mixedPaddingSizes, bool padToMultipleOf) {
auto resultType = transform::AnyOpType::get(b.getContext());
SmallVector<int64_t> staticPaddingSizes;
@@ -2195,7 +2192,6 @@ void transform::PadTilingInterfaceOp::build(
/*types=*/TypeRange{resultType, resultType},
/*target=*/target,
/*paddingValues=*/ArrayAttr(), // let inference handle this
- /*paddingDimensions=*/b.getI64ArrayAttr(paddingDimensions),
/*paddingSizes=*/dynamicPaddingSizes,
/*paddingSizes=*/staticPaddingSizes,
/*usePrescribedTensorShapes=*/padToMultipleOf);
@@ -2277,8 +2273,6 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
TilingInterface paddedOp;
PadTilingInterfaceOptions options;
options.setPaddingValues(paddingValues)
- .setPaddingDimensions(
- extractFromIntegerArrayAttr<int64_t>(getPaddingDimensions()))
.setPaddingSizes(getMixedPaddingSizes())
.setPadToMultipleOf(getPadToMultipleOf());
@@ -2303,20 +2297,7 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
return DiagnosedSilenceableFailure::success();
}
-LogicalResult transform::PadTilingInterfaceOp::verify() {
- SmallVector<int64_t> paddingDimensions =
- extractFromIntegerArrayAttr<int64_t>(getPaddingDimensions());
- if (any_of(paddingDimensions,
- [](int64_t paddingDimension) { return paddingDimension < 0; })) {
- return emitOpError() << "expects padding_dimensions to contain positive "
- "integers, found "
- << getPaddingDimensions();
- }
- if (getMixedPaddingSizes().size() != paddingDimensions.size()) {
- return emitOpError() << "expects as many multiples as padding_dimensions";
- }
- return success();
-}
+LogicalResult transform::PadTilingInterfaceOp::verify() { return success(); }
//===---------------------------------------------------------------------===//
// HoistPadOp
diff --git a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
index 42dac0776bace..eda3373b4d639 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
@@ -32,29 +32,27 @@ using namespace mlir::tensor;
#define DBGSNL() (llvm::dbgs() << "\n")
/// Form a "full-rank" padding specification so that the application is easy.
-static llvm::SmallDenseMap<int64_t, OpFoldResult>
-getDimsToSize(Builder &b, ArrayRef<OpFoldResult> indexingSizes,
- const PadTilingInterfaceOptions &options) {
- llvm::SmallDenseMap<int64_t, OpFoldResult> dimsToSize;
- for (const auto &[paddingDim, paddingSize] :
- llvm::zip_equal(options.paddingDimensions, options.paddingSizes)) {
- dimsToSize[paddingDim] = paddingSize;
- }
+static SmallVector<OpFoldResult>
+getFullRankPaddingSizes(Builder &b, ArrayRef<OpFoldResult> indexingSizes,
+ const PadTilingInterfaceOptions &options) {
+ SmallVector<OpFoldResult> paddingSizes;
// Complete the padding specification to specify all dimensions.
- for (int64_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
- if (dimsToSize.find(idx) != dimsToSize.end())
- continue;
- // If a dimension is not specified, either complete with:
+ for (size_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
+ // Complete to zero if needed.
+ paddingSizes.push_back(options.paddingSizes.size() > idx
+ ? options.paddingSizes[idx]
+ : b.getIndexAttr(0));
+ // If a dimension is zero (either specified or completed), replace by:
// - 1 if we are padding to the next multiple of.
// - indexingSizes[idx] otherwise
- dimsToSize[idx] =
- options.padToMultipleOf ? b.getIndexAttr(1) : indexingSizes[idx];
- }
- for (int64_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
- LLVM_DEBUG(DBGS() << "----idx: " << idx << " : " << dimsToSize[idx]
+ if (isZeroInteger(paddingSizes[idx])) {
+ paddingSizes[idx] =
+ options.padToMultipleOf ? b.getIndexAttr(1) : indexingSizes[idx];
+ }
+ LLVM_DEBUG(DBGS() << "----idx: " << idx << " : " << paddingSizes[idx]
<< "\n");
}
- return dimsToSize;
+ return paddingSizes;
}
/// Compute the padded shape of the given value `v` of `RankedTensorType` given
@@ -80,8 +78,8 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
"rank");
// "Full-rank" padding specification.
- llvm::SmallDenseMap<int64_t, OpFoldResult> dimsToSize =
- getDimsToSize(rewriter, indexingSizes, options);
+ SmallVector<OpFoldResult> paddingSizes =
+ getFullRankPaddingSizes(rewriter, indexingSizes, options);
// For each dimension in the operand's shape, iterate over indexingSizes and
// add the various term contributions.
@@ -97,7 +95,9 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// Find all padding dimensions that contribute to this operand dimension
// and compute the padded term contribution to the final padded shape.
SmallVector<OpFoldResult> terms;
- for (const auto &[paddingDim, paddingSize] : dimsToSize) {
+ for (size_t paddingDim = 0, e = paddingSizes.size(); paddingDim != e;
+ ++paddingDim) {
+ OpFoldResult paddingSize = paddingSizes[paddingDim];
LLVM_DEBUG(DBGS() << "------try apply padding of dim: " << paddingDim
<< " to: " << paddingSize << "\n");
if (!enResults.value().isFunctionOfDim(paddingDim))
@@ -224,9 +224,6 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
SmallVector<tensor::PadOp> &padOps,
PadSizeComputationFunction computePaddingSizeFun) {
LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
- assert(constOptions.paddingSizes.size() ==
- constOptions.paddingDimensions.size() &&
- "invalid number of elements in padToMultipleOf");
Location loc = opToPad.getLoc();
PadTilingInterfaceOptions options(constOptions);
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface-multiple-of.mlir b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface-multiple-of.mlir
index 4fcbcbb2a18e3..2bba309953570 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface-multiple-of.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface-multiple-of.mlir
@@ -36,8 +36,7 @@ module attributes {transform.with_named_sequence} {
// Tile to 5 then pad to 8 (supposedly to better hit vector ops).
%matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul tile_sizes [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul_l1 to padding_sizes [8] pad_to_multiple_of {
- padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
- padding_dimensions=[0]
+ padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -73,9 +72,8 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 5] pad_to_multiple_of {
- padding_dimensions = [0, 2],
- padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
+ %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 0, 5] pad_to_multiple_of {
+ padding_values = [0.0 : f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
}
@@ -128,9 +126,8 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 5] pad_to_multiple_of {
- padding_dimensions = [0, 2],
- padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
+ %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 0, 5] pad_to_multiple_of {
+ padding_values = [0.0 : f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
}
@@ -174,9 +171,8 @@ module attributes {transform.with_named_sequence} {
: (!transform.any_op) -> !transform.any_op
// Pad then tile should produce static shapes.
- %matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul to padding_sizes [8, 16] pad_to_multiple_of {
- padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
- padding_dimensions=[0, 2]
+ %matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul to padding_sizes [8, 0, 16] pad_to_multiple_of {
+ padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%m, %l0, %l1 = transform.structured.tile_using_for %matmul_padded tile_sizes [8, 0, 16]
@@ -223,9 +219,8 @@ module attributes {transform.with_named_sequence} {
%m, %l0, %l1 = transform.structured.tile_using_for %matmul tile_sizes [8, 0, 16]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
- %matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 16] pad_to_multiple_of {
- padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
- padding_dimensions=[0, 2]
+ %matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 0, 16] pad_to_multiple_of {
+ padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -258,9 +253,8 @@ module attributes {transform.with_named_sequence} {
%m, %l0, %l1 = transform.structured.tile_using_for %matmul tile_sizes [8, 0, 16]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
- %matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 16] pad_to_multiple_of {
- padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
- padding_dimensions=[0, 2]
+ %matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 0, 16] pad_to_multiple_of {
+ padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
diff --git a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
index f0a410fa4015f..26c03ed309c05 100644
--- a/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
@@ -18,8 +18,7 @@ module attributes {transform.with_named_sequence} {
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%fill_padded, %_ = transform.structured.pad_tiling_interface %fill_l1 to padding_sizes [8] {
- padding_values=[0.0 : f32, 0.0 : f32],
- padding_dimensions=[0]
+ padding_values=[0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -55,8 +54,7 @@ module attributes {transform.with_named_sequence} {
// Tile to 5 then pad to 8 (supposedly to better hit vector ops).
%matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul tile_sizes [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul_l1 to padding_sizes [8] {
- padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
- padding_dimensions=[0]
+ padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -91,8 +89,7 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 14] {
- padding_dimensions = [0, 2],
+ %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
@@ -147,8 +144,7 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
- %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 14] {
- padding_dimensions = [0, 2],
+ %padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
|
0571bcc to
d2f8780
Compare
|
Rebase needed, but otherwise LGTM |
…ing_interface This revision aligns padding specification in pad_tiling_interface to that of tiling specification. Dimensions that should be skipped are specified by "padding by 0". Trailing dimensions that are ignored are automatically completed to "pad to 0".
a597feb to
c939822
Compare
…ing_interface (llvm#145257) This revision aligns padding specification in pad_tiling_interface to that of tiling specification. Dimensions that should be skipped are specified by "padding by 0". Trailing dimensions that are ignored are automatically completed to "pad to 0".
This revision aligns padding specification in pad_tiling_interface to that of tiling specification.
Dimensions that should be skipped are specified by "padding by 0".
Trailing dimensions that are ignored are automatically completed to "pad to 0".