|
| 1 | +//===- BubbleUpExtractSlice.cpp ---------------------===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | +// |
| 9 | +// Swap a `tensor.extract_slice` with the producer of the source in some cases |
| 10 | +// where that is valid. When used as cleanup patterns of tile and fuse, enables |
| 11 | +// fusing the producer with the consumer even if the producer does not implement |
| 12 | +// the tiling interface. |
| 13 | +// |
| 14 | +//===----------------------------------------------------------------------===// |
| 15 | + |
| 16 | +#include "mlir/Dialect/Affine/IR/AffineOps.h" |
| 17 | +#include "mlir/Dialect/Arith/Utils/Utils.h" |
| 18 | +#include "mlir/Dialect/Tensor/Transforms/Transforms.h" |
| 19 | +#include "mlir/Dialect/Tensor/Utils/Utils.h" |
| 20 | +#include "mlir/IR/BuiltinTypes.h" |
| 21 | +#include "mlir/IR/OpDefinition.h" |
| 22 | +#include "mlir/IR/PatternMatch.h" |
| 23 | +#include "mlir/Interfaces/ValueBoundsOpInterface.h" |
| 24 | + |
| 25 | +using namespace mlir; |
| 26 | +using namespace mlir::tensor; |
| 27 | + |
| 28 | +/// Converts `tensor.extract_slice(tensor.expand_shape)` to |
| 29 | +/// `tensor.expand_shape(tensor.extract_slice)`. |
| 30 | +/// For this transformation to be possible, the slice must be fully contiguous |
| 31 | +/// within each reassociation group of the expand_shape. If the transformation |
| 32 | +/// is not possible, or if the slice is rank reducting, the function returns |
| 33 | +/// failure. |
| 34 | +/// |
| 35 | +/// Example: |
| 36 | +/// ``` |
| 37 | +/// %reshape = tensor.expand_shape %in [[0, 1], [2, 3], [4, 5, 6]] |
| 38 | +/// tensor<8x16x32xf32> to tensor<2x4x2x8x4x2x4xf32> |
| 39 | +/// %slice = tensor.extract_slice %reshape ... |
| 40 | +/// tensor<2x4x2x8x4x2x4xf32> to tensor<2x4x1x5x1x1x4xf32> |
| 41 | +/// |
| 42 | +/// // The transformation is possible because each reassociation group has a |
| 43 | +/// // contiguous slice. (i.e., [2x4->2x4], [2x8->1x5], [4x2x4->1x1x4]) |
| 44 | +/// // After the transformation: |
| 45 | +/// |
| 46 | +/// %slice = tensor.extract_slice %in ... |
| 47 | +/// tensor<8x16x32xf32> to tensor<8x5x4xf32> |
| 48 | +/// %reshape = tensor.expand_shape %slice [[0, 1], [2, 3], [4, 5, 6]] |
| 49 | +/// tensor<8x5x4xf32> to tensor<2x4x1x5x1x1x4xf32> |
| 50 | +/// ``` |
| 51 | +static LogicalResult |
| 52 | +swapExpandShapeWithSlice(RewriterBase &rewriter, |
| 53 | + tensor::ExpandShapeOp expandShapeOp, |
| 54 | + tensor::ExtractSliceOp sliceOp) { |
| 55 | + SmallVector<OpFoldResult> offsets = sliceOp.getMixedOffsets(); |
| 56 | + SmallVector<OpFoldResult> sizes = sliceOp.getMixedSizes(); |
| 57 | + |
| 58 | + if (static_cast<size_t>(sliceOp.getResultType().getRank()) != sizes.size()) { |
| 59 | + return rewriter.notifyMatchFailure(sliceOp, |
| 60 | + "unimplemented: rank reducing slice"); |
| 61 | + } |
| 62 | + |
| 63 | + // Helper variables and function for accumulating the new offset and length |
| 64 | + // values. |
| 65 | + Location loc = expandShapeOp->getLoc(); |
| 66 | + AffineExpr d0, d1, d2; |
| 67 | + bindDims(rewriter.getContext(), d0, d1, d2); |
| 68 | + // Multiply two integers. |
| 69 | + auto mul = [&](OpFoldResult v1, OpFoldResult v2) { |
| 70 | + auto mulMap = AffineMap::get(2, 0, {d0 * d1}); |
| 71 | + return affine::makeComposedFoldedAffineApply(rewriter, loc, mulMap, |
| 72 | + {v1, v2}); |
| 73 | + }; |
| 74 | + |
| 75 | + SmallVector<OpFoldResult> outputShape = |
| 76 | + getMixedValues(expandShapeOp.getStaticOutputShape(), |
| 77 | + expandShapeOp.getOutputShape(), rewriter); |
| 78 | + |
| 79 | + auto isZeroOffsetAndFullSize = [](OpFoldResult offset, OpFoldResult sliceSize, |
| 80 | + OpFoldResult size) { |
| 81 | + if (!isConstantIntValue(offset, 0)) |
| 82 | + return false; |
| 83 | + FailureOr<bool> maybeEqual = |
| 84 | + ValueBoundsConstraintSet::areEqual(sliceSize, size); |
| 85 | + return llvm::succeeded(maybeEqual) && maybeEqual.value(); |
| 86 | + }; |
| 87 | + |
| 88 | + // First verify that this is a full slice of the expanded tensor. |
| 89 | + for (const ReassociationIndices &indices : |
| 90 | + expandShapeOp.getReassociationIndices()) { |
| 91 | + int64_t i = 0; |
| 92 | + int64_t e = indices.size(); |
| 93 | + // Find the first expanded dim after the first dim with non-unit extracted |
| 94 | + // size. |
| 95 | + for (; i < e; ++i) { |
| 96 | + if (!isConstantIntValue(sizes[indices[i]], 1)) { |
| 97 | + // +1 to skip the first non-unit size dim. |
| 98 | + i++; |
| 99 | + break; |
| 100 | + } |
| 101 | + } |
| 102 | + |
| 103 | + // Verify that all subsequent dimensions extract the full size of the |
| 104 | + // source tensor. |
| 105 | + for (; i < e; ++i) { |
| 106 | + int64_t expandedDim = indices[i]; |
| 107 | + if (!isZeroOffsetAndFullSize(offsets[expandedDim], sizes[expandedDim], |
| 108 | + outputShape[expandedDim])) { |
| 109 | + return rewriter.notifyMatchFailure( |
| 110 | + sliceOp, "Not a contiguous slice of the expanded tensor."); |
| 111 | + } |
| 112 | + } |
| 113 | + } |
| 114 | + |
| 115 | + // Compute new offsets, lengths, and strides. |
| 116 | + SmallVector<OpFoldResult> newOffsets, newLengths, newStrides; |
| 117 | + for (const ReassociationIndices &indices : |
| 118 | + expandShapeOp.getReassociationIndices()) { |
| 119 | + OpFoldResult newSize = rewriter.getIndexAttr(1); |
| 120 | + SmallVector<OpFoldResult> basis, delinOffsets; |
| 121 | + |
| 122 | + int64_t i = 0; |
| 123 | + int64_t e = indices.size(); |
| 124 | + // Offset = cumulative product of leading unit extracted dims. |
| 125 | + for (; i < e; ++i) { |
| 126 | + int64_t expandedDim = indices[i]; |
| 127 | + if (!isConstantIntValue(sizes[expandedDim], 1)) |
| 128 | + break; |
| 129 | + |
| 130 | + basis.push_back(outputShape[expandedDim]); |
| 131 | + delinOffsets.push_back(offsets[expandedDim]); |
| 132 | + } |
| 133 | + |
| 134 | + if (i != e) { |
| 135 | + int64_t expandedDim = indices[i]; |
| 136 | + basis.push_back(outputShape[expandedDim]); |
| 137 | + delinOffsets.push_back(offsets[expandedDim]); |
| 138 | + newSize = sizes[expandedDim]; |
| 139 | + i++; |
| 140 | + } |
| 141 | + |
| 142 | + for (; i < e; ++i) { |
| 143 | + OpFoldResult fullSize = outputShape[indices[i]]; |
| 144 | + basis.push_back(fullSize); |
| 145 | + delinOffsets.push_back(rewriter.getIndexAttr(0)); |
| 146 | + newSize = mul(newSize, fullSize); |
| 147 | + } |
| 148 | + SmallVector<Value> offsetVals = |
| 149 | + llvm::map_to_vector(delinOffsets, [&](OpFoldResult ofr) { |
| 150 | + return getValueOrCreateConstantIndexOp(rewriter, loc, ofr); |
| 151 | + }); |
| 152 | + OpFoldResult newOffset = rewriter |
| 153 | + .create<affine::AffineLinearizeIndexOp>( |
| 154 | + loc, offsetVals, basis, /*disjoint=*/true) |
| 155 | + .getResult(); |
| 156 | + newOffsets.push_back(newOffset); |
| 157 | + newLengths.push_back(newSize); |
| 158 | + |
| 159 | + // Only unit stride supported. |
| 160 | + newStrides.push_back(rewriter.getIndexAttr(1)); |
| 161 | + } |
| 162 | + |
| 163 | + // The shape of the result can be obtained from the sizes passed in. |
| 164 | + SmallVector<Value> dynDims; |
| 165 | + SmallVector<int64_t> shape; |
| 166 | + dispatchIndexOpFoldResults(sizes, dynDims, shape); |
| 167 | + RankedTensorType resultType = RankedTensorType::get( |
| 168 | + shape, expandShapeOp.getResultType().getElementType()); |
| 169 | + |
| 170 | + // Create a new ExtractSliceOp and ExpandShapeOp. |
| 171 | + Value newSliceOp = rewriter.create<tensor::ExtractSliceOp>( |
| 172 | + loc, expandShapeOp.getSrc(), newOffsets, newLengths, newStrides); |
| 173 | + auto newExpandShapeOp = rewriter.create<tensor::ExpandShapeOp>( |
| 174 | + loc, resultType, newSliceOp, expandShapeOp.getReassociationIndices(), |
| 175 | + sizes); |
| 176 | + rewriter.replaceOp(sliceOp, newExpandShapeOp); |
| 177 | + return success(); |
| 178 | +} |
| 179 | + |
| 180 | +namespace { |
| 181 | + |
| 182 | +struct SwapExpandShapeWithSlicePattern |
| 183 | + : public OpRewritePattern<tensor::ExtractSliceOp> { |
| 184 | + using OpRewritePattern<tensor::ExtractSliceOp>::OpRewritePattern; |
| 185 | + |
| 186 | + LogicalResult matchAndRewrite(tensor::ExtractSliceOp sliceOp, |
| 187 | + PatternRewriter &rewriter) const override { |
| 188 | + auto expandOp = sliceOp.getSource().getDefiningOp<tensor::ExpandShapeOp>(); |
| 189 | + if (!expandOp) { |
| 190 | + return failure(); |
| 191 | + } |
| 192 | + |
| 193 | + if (!sliceOp.hasUnitStride()) { |
| 194 | + return rewriter.notifyMatchFailure(sliceOp, |
| 195 | + "unsupported: non-unit stride"); |
| 196 | + } |
| 197 | + |
| 198 | + return swapExpandShapeWithSlice(rewriter, expandOp, sliceOp); |
| 199 | + } |
| 200 | +}; |
| 201 | + |
| 202 | +} // namespace |
| 203 | + |
| 204 | +void mlir::tensor::populateBubbleUpExtractSliceOpPatterns( |
| 205 | + RewritePatternSet &patterns) { |
| 206 | + patterns.add<SwapExpandShapeWithSlicePattern>(patterns.getContext()); |
| 207 | +} |
0 commit comments