-
Notifications
You must be signed in to change notification settings - Fork 15.3k
[mlir][linalg] Take artificial padding into account for pack/unpack folding. #150272
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
2548809
2fc0316
601f21a
d832b43
a23b8dc
3aac143
bab30f2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4490,6 +4490,29 @@ Speculation::Speculatability ElementwiseOp::getSpeculatability() { | |
| //===----------------------------------------------------------------------===// | ||
| // PackOp/UnPackOp Common | ||
| //===----------------------------------------------------------------------===// | ||
|
|
||
| template <typename OpTy> | ||
| SmallVector<int64_t> | ||
| getPackedOuterShapeWithoutTransposition(OpTy packOrUnPack) { | ||
| RankedTensorType packedType = (std::is_same<OpTy, PackOp>::value) | ||
| ? packOrUnPack.getDestType() | ||
| : packOrUnPack.getSourceType(); | ||
| RankedTensorType unpackedType = (std::is_same<OpTy, PackOp>::value) | ||
| ? packOrUnPack.getSourceType() | ||
| : packOrUnPack.getDestType(); | ||
| SmallVector<int64_t> result( | ||
| packedType.getShape().take_front(unpackedType.getRank())); | ||
| if (!packOrUnPack.getOuterDimsPerm().empty()) { | ||
| applyPermutationToVector( | ||
| result, invertPermutationVector(packOrUnPack.getOuterDimsPerm())); | ||
| } | ||
| return result; | ||
| } | ||
| template SmallVector<int64_t> | ||
| getPackedOuterShapeWithoutTransposition<PackOp>(PackOp); | ||
| template SmallVector<int64_t> | ||
| getPackedOuterShapeWithoutTransposition<UnPackOp>(UnPackOp); | ||
|
|
||
| // Given the (potentially) updated packed type, `newPackedTy`, generates an | ||
| // updated mixed-tile-sizes attribute. A tile size is updated only | ||
| // when: | ||
|
|
@@ -5447,11 +5470,7 @@ LogicalResult UnPackOp::canonicalize(UnPackOp unPackOp, | |
| if (unPackOp->hasOneUse()) { | ||
| auto extractSliceUser = | ||
| dyn_cast<tensor::ExtractSliceOp>(*unPackOp->getUsers().begin()); | ||
| if (extractSliceUser && | ||
| areAllConstantIntValue(extractSliceUser.getMixedOffsets(), 0) && | ||
| areAllConstantIntValue(extractSliceUser.getMixedStrides(), 1) && | ||
| extractSliceUser.getSourceType().getRank() == | ||
| extractSliceUser.getResultType().getRank()) { | ||
| if (extractSliceUser && unPackOp.canFoldSliceOp(extractSliceUser)) { | ||
| OpBuilder::InsertionGuard g(rewriter); | ||
| rewriter.setInsertionPoint(unPackOp); | ||
| auto newDest = rewriter.create<tensor::ExtractSliceOp>( | ||
|
|
@@ -5494,6 +5513,32 @@ LogicalResult UnPackOp::canonicalize(UnPackOp unPackOp, | |
| return failure(); | ||
| } | ||
|
|
||
| bool UnPackOp::canFoldSliceOp(tensor::ExtractSliceOp sliceOp) { | ||
| // Rank-reduced folding is not supported. | ||
| if (sliceOp.getResultType().getRank() != this->getDestType().getRank()) | ||
| return false; | ||
| if (!areAllConstantIntValue(sliceOp.getMixedOffsets(), 0) || | ||
| !areAllConstantIntValue(sliceOp.getMixedStrides(), 1)) | ||
| return false; | ||
| RankedTensorType unpackedType = sliceOp.getResultType(); | ||
hanhanW marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| SmallVector<int64_t> outerShapeWithoutTranspose = | ||
| getPackedOuterShapeWithoutTransposition(*this); | ||
| for (auto [pos, tileSize] : | ||
| llvm::zip_equal(this->getInnerDimsPos(), this->getStaticInnerTiles())) { | ||
| if (unpackedType.isDynamicDim(pos)) | ||
| return false; | ||
| if (ShapedType::isDynamic(outerShapeWithoutTranspose[pos])) | ||
| return false; | ||
| if (ShapedType::isDynamic(tileSize)) | ||
| return false; | ||
| int64_t paddingSize = outerShapeWithoutTranspose[pos] * tileSize - | ||
| unpackedType.getDimSize(pos); | ||
| if (paddingSize >= tileSize) | ||
| return false; | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be good to add a test where a non-trailing dim is "sliced", e.g. func.func @fold_extract_slice_into_unpack(
%src : tensor<28x2x1x16x16xf32>, %dest : tensor<28x28x15xf32>, %size : index
) -> tensor<28x16x15xf32> {
%unpack = linalg.unpack %src
outer_dims_perm = [0, 1, 2]
inner_dims_pos = [1, 2]
inner_tiles = [16, 16]
into %dest : tensor<28x2x1x16x16xf32> -> tensor<28x28x15xf32>
%extracted_slice = tensor.extract_slice %unpack
[0, 0, 0] [28, 16, 15] [1, 1, 1] : tensor<28x28x15xf32> to tensor<28x16x15xf32>
return %extracted_slice : tensor<28x16x15xf32>
}
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Also, if I'm not missing something, I don't see tests for when we hit this case where we would need artificial padding. It would be nice to add these negative cases :)
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks, I missed that! I think @banach-space's example is a negative test. Anyway, I added two more tests that should address your comments. |
||
| } | ||
| return true; | ||
| } | ||
|
|
||
| bool UnPackOp::isLikeUnPad() { | ||
| RankedTensorType packedTensorType = getSourceType(); | ||
| return isLikePadUnPad(*this, packedTensorType); | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.