Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1195,17 +1195,29 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
TransformOpInterface,
ReportTrackingListenerFailuresOpTrait]> {
let description = [{
Pads the operations pointed to by the target handle using the options
provided as operation attributes. The operation returns a handle to the
padded operation and to the padding operation ("tensor.pad").
Pads the **iteration domain** of the operations pointed to by the target
handle using the options provided as operation attributes. Padding the
iteration domain induces a padding of the operands that is consistent
across the op semantics and, unlike for simple elementwise ops, may not be
trivially deducible or specifiable on operands only (e.g. convolutions).

The specification of `padding_sizes` follows that of `tile_sizes` during
tiling: the value "0" on a particular iterator encode "no padding". Like in
the case of tiling, an automatic completion by 0 to the operation rank
occurs.

This transformation returns a handle to the padded operation and to the
padding operation ("tensor.pad").

TODO: in the future this should be moved out of a specific Linalg
implementation file and into a more general "Structured" file.

#### Return modes

This operation ignores non-Linalg ops and drops them in the return.
In the future, this operation will support all TilingInterfaceOps.
This operation ignores non-IndexingMapOpInterface ops and drops them in the
return. In the future, this operation will support all TilingInterfaceOps
for which the contract between iteration domain and operands can be
reified.

This operation may produce a definite failure if the padding fails for any
reason.
Expand All @@ -1219,7 +1231,6 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
let arguments =
(ins TransformHandleTypeInterface:$target,
DefaultValuedAttr<ArrayAttr, "{}">:$padding_values,
DefaultValuedAttr<I64ArrayAttr, "{}">:$padding_dimensions,
Variadic<TransformAnyParamTypeOrAnyHandle>:$padding_sizes,
DefaultValuedOptionalAttr<DenseI64ArrayAttr, "{}">:
$static_padding_sizes,
Expand All @@ -1245,11 +1256,9 @@ def PadTilingInterfaceOp : Op<Transform_Dialect, "structured.pad_tiling_interfac
// add/mul ring at the moment.
// TODO: support other operations (e.g. min, max etc).
OpBuilder<(ins "Value":$target,
"ArrayRef<int64_t>":$paddingDimensions,
CArg<"ArrayRef<int64_t>", "{}">:$staticPaddingSizes,
CArg<"bool", "false">:$padToMultipleOf)>,
OpBuilder<(ins "Value":$target,
"ArrayRef<int64_t>":$paddingDimensions,
"ArrayRef<OpFoldResult>":$mixedPadPaddingSizes,
CArg<"bool", "false">:$usePrescribedTensorShapes)>
];
Expand Down
21 changes: 1 addition & 20 deletions mlir/lib/Dialect/Linalg/TransformOps/LinalgTransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2163,7 +2163,6 @@ LogicalResult transform::PadOp::verify() {
void transform::PadTilingInterfaceOp::build(OpBuilder &b,
OperationState &result,
Value target,
ArrayRef<int64_t> paddingDimensions,
ArrayRef<int64_t> paddingSizes,
bool padToMultipleOf) {
auto resultType = transform::AnyOpType::get(b.getContext());
Expand All @@ -2172,7 +2171,6 @@ void transform::PadTilingInterfaceOp::build(OpBuilder &b,
/*types=*/TypeRange{resultType, resultType},
/*target=*/target,
/*paddingValues=*/ArrayAttr(), // let inference handle this
/*paddingDimensions=*/b.getI64ArrayAttr(paddingDimensions),
/*paddingSizes=*/ValueRange{},
/*paddingSizes=*/
(paddingSizes.empty() ? DenseI64ArrayAttr()
Expand All @@ -2183,7 +2181,6 @@ void transform::PadTilingInterfaceOp::build(OpBuilder &b,

void transform::PadTilingInterfaceOp::build(
OpBuilder &b, OperationState &result, Value target,
ArrayRef<int64_t> paddingDimensions,
ArrayRef<OpFoldResult> mixedPaddingSizes, bool padToMultipleOf) {
auto resultType = transform::AnyOpType::get(b.getContext());
SmallVector<int64_t> staticPaddingSizes;
Expand All @@ -2195,7 +2192,6 @@ void transform::PadTilingInterfaceOp::build(
/*types=*/TypeRange{resultType, resultType},
/*target=*/target,
/*paddingValues=*/ArrayAttr(), // let inference handle this
/*paddingDimensions=*/b.getI64ArrayAttr(paddingDimensions),
/*paddingSizes=*/dynamicPaddingSizes,
/*paddingSizes=*/staticPaddingSizes,
/*usePrescribedTensorShapes=*/padToMultipleOf);
Expand Down Expand Up @@ -2277,8 +2273,6 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
TilingInterface paddedOp;
PadTilingInterfaceOptions options;
options.setPaddingValues(paddingValues)
.setPaddingDimensions(
extractFromIntegerArrayAttr<int64_t>(getPaddingDimensions()))
.setPaddingSizes(getMixedPaddingSizes())
.setPadToMultipleOf(getPadToMultipleOf());

Expand All @@ -2303,20 +2297,7 @@ transform::PadTilingInterfaceOp::apply(transform::TransformRewriter &rewriter,
return DiagnosedSilenceableFailure::success();
}

LogicalResult transform::PadTilingInterfaceOp::verify() {
SmallVector<int64_t> paddingDimensions =
extractFromIntegerArrayAttr<int64_t>(getPaddingDimensions());
if (any_of(paddingDimensions,
[](int64_t paddingDimension) { return paddingDimension < 0; })) {
return emitOpError() << "expects padding_dimensions to contain positive "
"integers, found "
<< getPaddingDimensions();
}
if (getMixedPaddingSizes().size() != paddingDimensions.size()) {
return emitOpError() << "expects as many multiples as padding_dimensions";
}
return success();
}
LogicalResult transform::PadTilingInterfaceOp::verify() { return success(); }

//===---------------------------------------------------------------------===//
// HoistPadOp
Expand Down
45 changes: 21 additions & 24 deletions mlir/lib/Dialect/Linalg/Transforms/PadTilingInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,29 +32,27 @@ using namespace mlir::tensor;
#define DBGSNL() (llvm::dbgs() << "\n")

/// Form a "full-rank" padding specification so that the application is easy.
static llvm::SmallDenseMap<int64_t, OpFoldResult>
getDimsToSize(Builder &b, ArrayRef<OpFoldResult> indexingSizes,
const PadTilingInterfaceOptions &options) {
llvm::SmallDenseMap<int64_t, OpFoldResult> dimsToSize;
for (const auto &[paddingDim, paddingSize] :
llvm::zip_equal(options.paddingDimensions, options.paddingSizes)) {
dimsToSize[paddingDim] = paddingSize;
}
static SmallVector<OpFoldResult>
getFullRankPaddingSizes(Builder &b, ArrayRef<OpFoldResult> indexingSizes,
const PadTilingInterfaceOptions &options) {
SmallVector<OpFoldResult> paddingSizes;
// Complete the padding specification to specify all dimensions.
for (int64_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
if (dimsToSize.find(idx) != dimsToSize.end())
continue;
// If a dimension is not specified, either complete with:
for (size_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
// Complete to zero if needed.
paddingSizes.push_back(options.paddingSizes.size() > idx
? options.paddingSizes[idx]
: b.getIndexAttr(0));
// If a dimension is zero (either specified or completed), replace by:
// - 1 if we are padding to the next multiple of.
// - indexingSizes[idx] otherwise
dimsToSize[idx] =
options.padToMultipleOf ? b.getIndexAttr(1) : indexingSizes[idx];
}
for (int64_t idx = 0, e = indexingSizes.size(); idx != e; ++idx) {
LLVM_DEBUG(DBGS() << "----idx: " << idx << " : " << dimsToSize[idx]
if (isZeroInteger(paddingSizes[idx])) {
paddingSizes[idx] =
options.padToMultipleOf ? b.getIndexAttr(1) : indexingSizes[idx];
}
LLVM_DEBUG(DBGS() << "----idx: " << idx << " : " << paddingSizes[idx]
<< "\n");
}
return dimsToSize;
return paddingSizes;
}

/// Compute the padded shape of the given value `v` of `RankedTensorType` given
Expand All @@ -80,8 +78,8 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
"rank");

// "Full-rank" padding specification.
llvm::SmallDenseMap<int64_t, OpFoldResult> dimsToSize =
getDimsToSize(rewriter, indexingSizes, options);
SmallVector<OpFoldResult> paddingSizes =
getFullRankPaddingSizes(rewriter, indexingSizes, options);

// For each dimension in the operand's shape, iterate over indexingSizes and
// add the various term contributions.
Expand All @@ -97,7 +95,9 @@ SmallVector<OpFoldResult> linalg::computePaddedShape(
// Find all padding dimensions that contribute to this operand dimension
// and compute the padded term contribution to the final padded shape.
SmallVector<OpFoldResult> terms;
for (const auto &[paddingDim, paddingSize] : dimsToSize) {
for (size_t paddingDim = 0, e = paddingSizes.size(); paddingDim != e;
++paddingDim) {
OpFoldResult paddingSize = paddingSizes[paddingDim];
LLVM_DEBUG(DBGS() << "------try apply padding of dim: " << paddingDim
<< " to: " << paddingSize << "\n");
if (!enResults.value().isFunctionOfDim(paddingDim))
Expand Down Expand Up @@ -224,9 +224,6 @@ linalg::rewriteAsPaddedOp(RewriterBase &rewriter, TilingInterface opToPad,
SmallVector<tensor::PadOp> &padOps,
PadSizeComputationFunction computePaddingSizeFun) {
LLVM_DEBUG(DBGS() << "Start rewriteAsPaddedOp : " << opToPad << "\n");
assert(constOptions.paddingSizes.size() ==
constOptions.paddingDimensions.size() &&
"invalid number of elements in padToMultipleOf");

Location loc = opToPad.getLoc();
PadTilingInterfaceOptions options(constOptions);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,7 @@ module attributes {transform.with_named_sequence} {
// Tile to 5 then pad to 8 (supposedly to better hit vector ops).
%matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul tile_sizes [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul_l1 to padding_sizes [8] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0]
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

transform.yield
Expand Down Expand Up @@ -71,11 +70,10 @@ module {
return %0 : tensor<7x11x12xf32>
}
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 5] pad_to_multiple_of {
padding_dimensions = [0, 2],
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 0, 5] pad_to_multiple_of {
padding_values = [0.0 : f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
}
Expand Down Expand Up @@ -126,11 +124,10 @@ module {
return %0 : tensor<?x11x?xf32>
}
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %module_op : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 5] pad_to_multiple_of {
padding_dimensions = [0, 2],
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [3, 0, 5] pad_to_multiple_of {
padding_values = [0.0 : f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
}
Expand Down Expand Up @@ -172,9 +169,8 @@ module attributes {transform.with_named_sequence} {
: (!transform.any_op) -> !transform.any_op

// Pad then tile should produce static shapes.
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul to padding_sizes [8, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 2]
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul to padding_sizes [8, 0, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

%m, %l0, %l1 = transform.structured.tile_using_for %matmul_padded tile_sizes [8, 0, 16]
Expand Down Expand Up @@ -234,9 +230,8 @@ module attributes {transform.with_named_sequence} {
%m, %l0, %l1 = transform.structured.tile_using_for %matmul tile_sizes [8, 0, 16]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)

%matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 2]
%matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 0, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

transform.yield
Expand Down Expand Up @@ -269,9 +264,8 @@ module attributes {transform.with_named_sequence} {
%m, %l0, %l1 = transform.structured.tile_using_for %matmul tile_sizes [8, 0, 16]
: (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)

%matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0, 2]
%matmul_padded, %_ = transform.structured.pad_tiling_interface %m to padding_sizes [8, 0, 16] pad_to_multiple_of {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

transform.yield
Expand Down
12 changes: 4 additions & 8 deletions mlir/test/Dialect/Linalg/transform-op-pad-tiling-interface.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@ module attributes {transform.with_named_sequence} {
: (!transform.any_op) -> (!transform.any_op, !transform.any_op)

%fill_padded, %_ = transform.structured.pad_tiling_interface %fill_l1 to padding_sizes [8] {
padding_values=[0.0 : f32, 0.0 : f32],
padding_dimensions=[0]
padding_values=[0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

transform.yield
Expand Down Expand Up @@ -55,8 +54,7 @@ module attributes {transform.with_named_sequence} {
// Tile to 5 then pad to 8 (supposedly to better hit vector ops).
%matmul_l1, %loops_l1 = transform.structured.tile_using_for %matmul tile_sizes [5] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
%matmul_padded, %_ = transform.structured.pad_tiling_interface %matmul_l1 to padding_sizes [8] {
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32],
padding_dimensions=[0]
padding_values=[0.0: f32, 0.0 : f32, 0.0 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)

transform.yield
Expand Down Expand Up @@ -91,8 +89,7 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 14] {
padding_dimensions = [0, 2],
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
Expand Down Expand Up @@ -147,8 +144,7 @@ module {
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
%0 = transform.structured.match ops{["linalg.generic"]} in %arg0 : (!transform.any_op) -> !transform.any_op
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 14] {
padding_dimensions = [0, 2],
%padded, %pad = transform.structured.pad_tiling_interface %0 to padding_sizes [8, 0, 14] {
padding_values = [0.000000e+00 : f32, 0.000000e+00 : f32, 0.000000e+00 : f32]
} : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.yield
Expand Down
Loading