|
21 | 21 | #include "mlir/Dialect/Utils/StaticValueUtils.h" |
22 | 22 | #include "mlir/Dialect/Vector/IR/VectorOps.h" |
23 | 23 | #include "mlir/IR/Attributes.h" |
| 24 | +#include "mlir/IR/DialectResourceBlobManager.h" |
24 | 25 | #include "mlir/IR/Builders.h" |
25 | 26 | #include "mlir/IR/BuiltinTypes.h" |
26 | 27 | #include "mlir/IR/OpDefinition.h" |
@@ -95,6 +96,99 @@ static bool checkLayout(Value val) { |
95 | 96 | isa<StridedLayoutAttr>(type.getLayout()); |
96 | 97 | } |
97 | 98 |
|
| 99 | +/// Produce an OpFoldResult representing the product of the values or constants |
| 100 | +/// referenced by `indices`. `staticShape` provides the statically known sizes |
| 101 | +/// for the source memref, while `values` contains the mixed (value/attribute) |
| 102 | +/// representation produced by `memref.extract_strided_metadata`. |
| 103 | +static OpFoldResult getProductOfValues(ArrayRef<int64_t> indices, |
| 104 | + OpBuilder &builder, Location loc, |
| 105 | + ArrayRef<int64_t> staticShape, |
| 106 | + ArrayRef<OpFoldResult> values) { |
| 107 | + AffineExpr product = builder.getAffineConstantExpr(1); |
| 108 | + SmallVector<OpFoldResult> inputs; |
| 109 | + unsigned numSymbols = 0; |
| 110 | + for (int64_t idx : indices) { |
| 111 | + product = product * builder.getAffineSymbolExpr(numSymbols++); |
| 112 | + if (ShapedType::isDynamic(staticShape[idx])) |
| 113 | + inputs.push_back(values[idx]); |
| 114 | + else |
| 115 | + inputs.push_back(builder.getIndexAttr(staticShape[idx])); |
| 116 | + } |
| 117 | + return affine::makeComposedFoldedAffineApply(builder, loc, product, inputs); |
| 118 | +} |
| 119 | + |
| 120 | +/// Return the collapsed size (as OpFoldResult) for the reassociation group |
| 121 | +/// `groupId` of `collapseShapeOp`. |
| 122 | +static SmallVector<OpFoldResult> |
| 123 | +getCollapsedSize(memref::CollapseShapeOp collapseShapeOp, OpBuilder &builder, |
| 124 | + ArrayRef<OpFoldResult> origSizes, unsigned groupId) { |
| 125 | + SmallVector<OpFoldResult> collapsedSize; |
| 126 | + |
| 127 | + MemRefType resultType = collapseShapeOp.getResultType(); |
| 128 | + int64_t dimSize = resultType.getDimSize(groupId); |
| 129 | + if (!ShapedType::isDynamic(dimSize)) { |
| 130 | + collapsedSize.push_back(builder.getIndexAttr(dimSize)); |
| 131 | + return collapsedSize; |
| 132 | + } |
| 133 | + |
| 134 | + auto sourceType = collapseShapeOp.getSrcType(); |
| 135 | + ArrayRef<int64_t> staticShape = sourceType.getShape(); |
| 136 | + ArrayRef<int64_t> reassocGroup = |
| 137 | + collapseShapeOp.getReassociationIndices()[groupId]; |
| 138 | + |
| 139 | + collapsedSize.push_back(getProductOfValues(reassocGroup, builder, |
| 140 | + collapseShapeOp.getLoc(), |
| 141 | + staticShape, origSizes)); |
| 142 | + return collapsedSize; |
| 143 | +} |
| 144 | + |
| 145 | +/// Return the collapsed stride (as OpFoldResult) for the reassociation group |
| 146 | +/// `groupId` of `collapseShapeOp`. |
| 147 | +static SmallVector<OpFoldResult> getCollapsedStride( |
| 148 | + memref::CollapseShapeOp collapseShapeOp, OpBuilder &builder, |
| 149 | + ArrayRef<OpFoldResult> origSizes, ArrayRef<OpFoldResult> origStrides, |
| 150 | + unsigned groupId) { |
| 151 | + ArrayRef<int64_t> reassocGroup = |
| 152 | + collapseShapeOp.getReassociationIndices()[groupId]; |
| 153 | + assert(!reassocGroup.empty() && |
| 154 | + "reassociation group must contain at least one dimension"); |
| 155 | + |
| 156 | + auto sourceType = collapseShapeOp.getSrcType(); |
| 157 | + auto [strides, offset] = sourceType.getStridesAndOffset(); |
| 158 | + (void)offset; |
| 159 | + ArrayRef<int64_t> srcShape = sourceType.getShape(); |
| 160 | + |
| 161 | + OpFoldResult lastValidStride = nullptr; |
| 162 | + for (int64_t dim : reassocGroup) { |
| 163 | + if (srcShape[dim] == 1) |
| 164 | + continue; |
| 165 | + int64_t currentStride = strides[dim]; |
| 166 | + if (ShapedType::isDynamic(currentStride)) |
| 167 | + lastValidStride = origStrides[dim]; |
| 168 | + else |
| 169 | + lastValidStride = builder.getIndexAttr(currentStride); |
| 170 | + } |
| 171 | + |
| 172 | + if (!lastValidStride) { |
| 173 | + MemRefType collapsedType = collapseShapeOp.getResultType(); |
| 174 | + auto [collapsedStrides, collapsedOffset] = |
| 175 | + collapsedType.getStridesAndOffset(); |
| 176 | + (void)collapsedOffset; |
| 177 | + int64_t finalStride = collapsedStrides[groupId]; |
| 178 | + if (ShapedType::isDynamic(finalStride)) { |
| 179 | + for (int64_t dim : reassocGroup) { |
| 180 | + assert(srcShape[dim] == 1 && "expected size-one dimensions"); |
| 181 | + if (ShapedType::isDynamic(strides[dim])) |
| 182 | + return {origStrides[dim]}; |
| 183 | + } |
| 184 | + llvm_unreachable("expected to find a dynamic stride"); |
| 185 | + } |
| 186 | + return {builder.getIndexAttr(finalStride)}; |
| 187 | + } |
| 188 | + |
| 189 | + return {lastValidStride}; |
| 190 | +} |
| 191 | + |
98 | 192 | namespace { |
99 | 193 | static Value getTargetMemref(Operation *op) { |
100 | 194 | return llvm::TypeSwitch<Operation *, Value>(op) |
@@ -256,6 +350,82 @@ struct MemRefRewritePattern : public OpRewritePattern<T> { |
256 | 350 | } |
257 | 351 | }; |
258 | 352 |
|
| 353 | +/// Flattens memref global ops with more than 1 dimensions to 1 dimension. |
| 354 | +struct FlattenGlobal final : public OpRewritePattern<memref::GlobalOp> { |
| 355 | + using OpRewritePattern::OpRewritePattern; |
| 356 | + |
| 357 | + static Attribute flattenAttribute(Attribute value, ShapedType newType) { |
| 358 | + if (!value) |
| 359 | + return value; |
| 360 | + if (auto splatAttr = llvm::dyn_cast<SplatElementsAttr>(value)) { |
| 361 | + return splatAttr.reshape(newType); |
| 362 | + } else if (auto denseAttr = llvm::dyn_cast<DenseElementsAttr>(value)) { |
| 363 | + return denseAttr.reshape(newType); |
| 364 | + } else if (auto denseResourceAttr = |
| 365 | + llvm::dyn_cast<DenseResourceElementsAttr>(value)) { |
| 366 | + return DenseResourceElementsAttr::get(newType, |
| 367 | + denseResourceAttr.getRawHandle()); |
| 368 | + } |
| 369 | + return {}; |
| 370 | + } |
| 371 | + |
| 372 | + LogicalResult |
| 373 | + matchAndRewrite(memref::GlobalOp globalOp, |
| 374 | + PatternRewriter &rewriter) const override { |
| 375 | + auto oldType = llvm::dyn_cast<MemRefType>(globalOp.getType()); |
| 376 | + if (!oldType || !oldType.getLayout().isIdentity() || oldType.getRank() <= 1) |
| 377 | + return failure(); |
| 378 | + |
| 379 | + auto tensorType = RankedTensorType::get({oldType.getNumElements()}, |
| 380 | + oldType.getElementType()); |
| 381 | + auto memRefType = |
| 382 | + MemRefType::get({oldType.getNumElements()}, oldType.getElementType(), |
| 383 | + AffineMap(), oldType.getMemorySpace()); |
| 384 | + auto newInitialValue = |
| 385 | + flattenAttribute(globalOp.getInitialValueAttr(), tensorType); |
| 386 | + rewriter.replaceOpWithNewOp<memref::GlobalOp>( |
| 387 | + globalOp, globalOp.getSymName(), globalOp.getSymVisibilityAttr(), |
| 388 | + memRefType, newInitialValue, globalOp.getConstant(), |
| 389 | + /*alignment=*/IntegerAttr()); |
| 390 | + return success(); |
| 391 | + } |
| 392 | +}; |
| 393 | + |
| 394 | +struct FlattenCollapseShape final |
| 395 | + : public OpRewritePattern<memref::CollapseShapeOp> { |
| 396 | + using OpRewritePattern::OpRewritePattern; |
| 397 | + |
| 398 | + LogicalResult matchAndRewrite(memref::CollapseShapeOp op, |
| 399 | + PatternRewriter &rewriter) const override { |
| 400 | + Location loc = op.getLoc(); |
| 401 | + memref::ExtractStridedMetadataOp metadata = |
| 402 | + memref::ExtractStridedMetadataOp::create(rewriter, loc, op.getSrc()); |
| 403 | + |
| 404 | + SmallVector<OpFoldResult> origSizes = metadata.getConstifiedMixedSizes(); |
| 405 | + SmallVector<OpFoldResult> origStrides = metadata.getConstifiedMixedStrides(); |
| 406 | + OpFoldResult offset = metadata.getConstifiedMixedOffset(); |
| 407 | + |
| 408 | + SmallVector<OpFoldResult> collapsedSizes; |
| 409 | + SmallVector<OpFoldResult> collapsedStrides; |
| 410 | + unsigned numGroups = op.getReassociationIndices().size(); |
| 411 | + collapsedSizes.reserve(numGroups); |
| 412 | + collapsedStrides.reserve(numGroups); |
| 413 | + for (unsigned i = 0; i < numGroups; ++i) { |
| 414 | + SmallVector<OpFoldResult> groupSizes = |
| 415 | + getCollapsedSize(op, rewriter, origSizes, i); |
| 416 | + SmallVector<OpFoldResult> groupStrides = |
| 417 | + getCollapsedStride(op, rewriter, origSizes, origStrides, i); |
| 418 | + collapsedSizes.append(groupSizes.begin(), groupSizes.end()); |
| 419 | + collapsedStrides.append(groupStrides.begin(), groupStrides.end()); |
| 420 | + } |
| 421 | + |
| 422 | + rewriter.replaceOpWithNewOp<memref::ReinterpretCastOp>( |
| 423 | + op, op.getType(), op.getSrc(), offset, collapsedSizes, |
| 424 | + collapsedStrides); |
| 425 | + return success(); |
| 426 | + } |
| 427 | +}; |
| 428 | + |
259 | 429 | struct FlattenMemrefsPass |
260 | 430 | : public mlir::memref::impl::FlattenMemrefsPassBase<FlattenMemrefsPass> { |
261 | 431 | using Base::Base; |
@@ -288,12 +458,52 @@ void memref::populateFlattenVectorOpsOnMemrefPatterns( |
288 | 458 | patterns.getContext()); |
289 | 459 | } |
290 | 460 |
|
| 461 | +/// Special pattern for GetGlobalOp to avoid infinite loops |
| 462 | +struct FlattenGetGlobal : public OpRewritePattern<memref::GetGlobalOp> { |
| 463 | + using OpRewritePattern::OpRewritePattern; |
| 464 | + |
| 465 | + LogicalResult matchAndRewrite(memref::GetGlobalOp op, |
| 466 | + PatternRewriter &rewriter) const override { |
| 467 | + // Check if this get_global references a multi-dimensional global |
| 468 | + auto module = op->template getParentOfType<ModuleOp>(); |
| 469 | + auto globalOp = module.template lookupSymbol<memref::GlobalOp>(op.getName()); |
| 470 | + if (!globalOp) { |
| 471 | + return failure(); |
| 472 | + } |
| 473 | + |
| 474 | + auto globalType = globalOp.getType(); |
| 475 | + auto resultType = op.getType(); |
| 476 | + |
| 477 | + // Only apply if the global has been flattened but the get_global hasn't |
| 478 | + if (globalType.getRank() == 1 && resultType.getRank() > 1) { |
| 479 | + auto newGetGlobal = memref::GetGlobalOp::create( |
| 480 | + rewriter, op.getLoc(), globalType, op.getName()); |
| 481 | + |
| 482 | + // Cast the flattened result back to the original shape |
| 483 | + memref::ExtractStridedMetadataOp stridedMetadata = |
| 484 | + memref::ExtractStridedMetadataOp::create(rewriter, op.getLoc(), op.getResult()); |
| 485 | + auto castResult = memref::ReinterpretCastOp::create( |
| 486 | + rewriter, op.getLoc(), resultType, newGetGlobal, |
| 487 | + /*offset=*/rewriter.getIndexAttr(0), |
| 488 | + stridedMetadata.getConstifiedMixedSizes(), |
| 489 | + stridedMetadata.getConstifiedMixedStrides()); |
| 490 | + rewriter.replaceOp(op, castResult); |
| 491 | + return success(); |
| 492 | + } |
| 493 | + |
| 494 | + return failure(); |
| 495 | + } |
| 496 | +}; |
| 497 | + |
291 | 498 | void memref::populateFlattenMemrefOpsPatterns(RewritePatternSet &patterns) { |
292 | 499 | patterns.insert<MemRefRewritePattern<memref::LoadOp>, |
293 | 500 | MemRefRewritePattern<memref::StoreOp>, |
294 | 501 | MemRefRewritePattern<memref::AllocOp>, |
295 | 502 | MemRefRewritePattern<memref::AllocaOp>, |
296 | | - MemRefRewritePattern<memref::DeallocOp>>( |
| 503 | + MemRefRewritePattern<memref::DeallocOp>, |
| 504 | + FlattenCollapseShape, |
| 505 | + FlattenGetGlobal, |
| 506 | + FlattenGlobal>( |
297 | 507 | patterns.getContext()); |
298 | 508 | } |
299 | 509 |
|
|
0 commit comments