|
| 1 | +// Copyright 2025 The IREE Authors |
| 2 | +// |
| 3 | +// Licensed under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | + |
| 7 | +#include <cstdint> |
| 8 | +#include <numeric> |
| 9 | +#include <optional> |
| 10 | +#include "iree/compiler/Codegen/Common/GPU/Passes.h" |
| 11 | +#include "iree/compiler/Codegen/Dialect/Codegen/IR/IREECodegenAttrs.h" |
| 12 | +#include "iree/compiler/Codegen/Dialect/GPU/IR/IREEGPUAttrs.h" |
| 13 | +#include "iree/compiler/Codegen/Dialect/GPU/IR/IREEGPUOps.h" |
| 14 | +#include "iree/compiler/Codegen/Utils/GPUUtils.h" |
| 15 | +#include "llvm/ADT/ArrayRef.h" |
| 16 | +#include "llvm/Support/Debug.h" |
| 17 | +#include "mlir/Dialect/Affine/IR/AffineOps.h" |
| 18 | +#include "mlir/Dialect/Arith/IR/Arith.h" |
| 19 | +#include "mlir/Dialect/Arith/Utils/Utils.h" |
| 20 | +#include "mlir/Dialect/GPU/IR/GPUDialect.h" |
| 21 | +#include "mlir/Dialect/MemRef/IR/MemRef.h" |
| 22 | +#include "mlir/Dialect/MemRef/Utils/MemRefUtils.h" |
| 23 | +#include "mlir/Dialect/SCF/IR/SCF.h" |
| 24 | +#include "mlir/Dialect/SCF/Utils/Utils.h" |
| 25 | +#include "mlir/Dialect/Utils/StaticValueUtils.h" |
| 26 | +#include "mlir/Dialect/Vector/Transforms/VectorTransforms.h" |
| 27 | +#include "mlir/IR/BuiltinAttributes.h" |
| 28 | +#include "mlir/IR/OpDefinition.h" |
| 29 | +#include "mlir/Support/LLVM.h" |
| 30 | +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" |
| 31 | + |
| 32 | +#define DEBUG_TYPE "iree-codegen-gpu-lower-to-global-loads" |
| 33 | +#define LDBG(X) LLVM_DEBUG(llvm::dbgs() << X << "\n") |
| 34 | + |
| 35 | +namespace mlir::iree_compiler { |
| 36 | + |
| 37 | +#define GEN_PASS_DEF_GPULOWERTOGLOBALLOADSPASS |
| 38 | +#include "iree/compiler/Codegen/Common/GPU/Passes.h.inc" |
| 39 | + |
| 40 | +static constexpr int kNumBitsPerCopy = 32; |
| 41 | + |
| 42 | +static LogicalResult |
| 43 | +distributeLinalgCopyToThreads(RewriterBase &rewriter, linalg::CopyOp copy, |
| 44 | + ArrayRef<int64_t> workgroupSize, |
| 45 | + int64_t subgroupSize) { |
| 46 | + LDBG("==== distributing op: "); |
| 47 | + LDBG(*copy); |
| 48 | + Location loc = copy.getLoc(); |
| 49 | + |
| 50 | + // The linalg.copy we are dealing with represents a region we need to copy to |
| 51 | + // workgroup memory. Assume there are N threads in the workgroup, then there |
| 52 | + // are `num_subgroups = N / gpu.subgroup_size` subgroups in the workgroup. |
| 53 | + // |
| 54 | + // So we are slicing up the target memref into `num_subgroups` consecutive |
| 55 | + // slices, and threads in the same subgroup will copy their slice to workgroup |
| 56 | + // memory slice. |
| 57 | + |
| 58 | + // Get the copy size: |
| 59 | + auto copyMemRefType = cast<MemRefType>(copy.getOperand(1).getType()); |
| 60 | + if (!memref::isStaticShapeAndContiguousRowMajor(copyMemRefType)) { |
| 61 | + return rewriter.notifyMatchFailure(copy, |
| 62 | + "Copy to non-static or non-contiguous, " |
| 63 | + "non-row major memref."); |
| 64 | + } |
| 65 | + int64_t rank = copyMemRefType.getRank(); |
| 66 | + SmallVector<OpFoldResult> tileSize(rank - 1, rewriter.getIndexAttr(1)); |
| 67 | + |
| 68 | + int64_t elementBitWidth = copyMemRefType.getElementTypeBitWidth(); |
| 69 | + if (kNumBitsPerCopy % elementBitWidth != 0) { |
| 70 | + return rewriter.notifyMatchFailure(copy, "Copy size is not a multiple of " |
| 71 | + "element bit width."); |
| 72 | + } |
| 73 | + int64_t elementsPerCopy = kNumBitsPerCopy / elementBitWidth; |
| 74 | + |
| 75 | + // Divide the copy by subgroup, and load linearly. |
| 76 | + assert(workgroupSize[0] % subgroupSize == 0); |
| 77 | + |
| 78 | + int64_t numSubgroups = workgroupSize[0] / subgroupSize; |
| 79 | + int64_t totalCopySize = copyMemRefType.getNumElements(); |
| 80 | + int64_t totalCopySizePerSubgroup = totalCopySize / numSubgroups; |
| 81 | + int64_t numCopiesPerThread = |
| 82 | + (totalCopySizePerSubgroup / elementsPerCopy) / subgroupSize; |
| 83 | + int64_t residualElements = |
| 84 | + totalCopySizePerSubgroup % (subgroupSize * elementsPerCopy); |
| 85 | + |
| 86 | + LDBG("-- elementsPerCopy: " << elementsPerCopy); |
| 87 | + LDBG("-- workgroupSize: " << workgroupSize[0]); |
| 88 | + LDBG("-- numSubgroups: " << numSubgroups); |
| 89 | + LDBG("-- totalCopySize: " << totalCopySize); |
| 90 | + LDBG("-- totalCopySizePerSubgroup: " << totalCopySizePerSubgroup); |
| 91 | + LDBG("-- numCopiesPerThread: " << numCopiesPerThread); |
| 92 | + LDBG("-- residualElements: " << residualElements); |
| 93 | + |
| 94 | + if (residualElements != 0) { |
| 95 | + return rewriter.notifyMatchFailure( |
| 96 | + copy, "Cannot proceed: cannot handle copying residual elements."); |
| 97 | + } |
| 98 | + |
| 99 | + Value subgroupId = rewriter.create<gpu::SubgroupIdOp>(loc, nullptr); |
| 100 | + Value laneId = rewriter.create<gpu::LaneIdOp>(loc, nullptr); |
| 101 | + |
| 102 | + auto sourceType = cast<MemRefType>(copy.getOperand(0).getType()); |
| 103 | + auto localType = cast<MemRefType>(copy.getOutputs().front().getType()); |
| 104 | + |
| 105 | + auto getGlobalGatherIndex = [&](Value sgIdVal, Value lIdVal, |
| 106 | + Value indVar) -> Value { |
| 107 | + auto zero = rewriter.create<arith::ConstantIndexOp>(loc, 0); |
| 108 | + return rewriter.create<affine::AffineLinearizeIndexOp>( |
| 109 | + loc, ValueRange{sgIdVal, indVar, lIdVal, zero}, |
| 110 | + ArrayRef<int64_t>{numSubgroups, numCopiesPerThread, subgroupSize, |
| 111 | + elementsPerCopy}, |
| 112 | + /*disjoint=*/true); |
| 113 | + }; |
| 114 | + |
| 115 | + auto getSubgroupStoreBaseIndex = [&](Value sgIdVal, Value indVar) -> Value { |
| 116 | + auto zero = rewriter.create<arith::ConstantIndexOp>(loc, 0); |
| 117 | + return getGlobalGatherIndex(sgIdVal, zero, indVar); |
| 118 | + }; |
| 119 | + |
| 120 | + // Build a for loop skeleton: |
| 121 | + scf::ForOp forOp = rewriter.create<scf::ForOp>( |
| 122 | + loc, /*lb=*/rewriter.create<arith::ConstantIndexOp>(loc, 0), |
| 123 | + /*ub=*/rewriter.create<arith::ConstantIndexOp>(loc, numCopiesPerThread), |
| 124 | + /*steps=*/rewriter.create<arith::ConstantIndexOp>(loc, 1)); |
| 125 | + |
| 126 | + auto delinearizeIndex = [&](Value index, ArrayRef<int64_t> shape) { |
| 127 | + return rewriter.create<affine::AffineDelinearizeIndexOp>(loc, index, shape) |
| 128 | + .getMultiIndex(); |
| 129 | + }; |
| 130 | + |
| 131 | + // For loop body: |
| 132 | + { |
| 133 | + OpBuilder::InsertionGuard guard(rewriter); |
| 134 | + rewriter.setInsertionPointToStart(forOp.getBody()); |
| 135 | + auto inductionVar = forOp.getInductionVar(); |
| 136 | + Value linearizedGatherIndices = |
| 137 | + getGlobalGatherIndex(subgroupId, laneId, inductionVar); |
| 138 | + ValueRange delinearizedGlobalIndices = |
| 139 | + delinearizeIndex(linearizedGatherIndices, sourceType.getShape()); |
| 140 | + Value linearizedBaseIndices = |
| 141 | + getSubgroupStoreBaseIndex(subgroupId, inductionVar); |
| 142 | + ValueRange delinearizedLocalIndices = |
| 143 | + delinearizeIndex(linearizedBaseIndices, localType.getShape()); |
| 144 | + rewriter.create<IREE::GPU::GlobalLoadDMAOp>( |
| 145 | + loc, copy.getOperand(0), delinearizedGlobalIndices, |
| 146 | + copy.getOutputs()[0], delinearizedLocalIndices); |
| 147 | + } |
| 148 | + |
| 149 | + // Sync at the end of the loop across threads. |
| 150 | + rewriter.replaceOpWithNewOp<gpu::BarrierOp>(copy); |
| 151 | + return success(); |
| 152 | +} |
| 153 | + |
| 154 | +static LogicalResult isEligibleForGlobalDMA(linalg::CopyOp copy) { |
| 155 | + // Source must be global address and target must be workgroup address. |
| 156 | + auto sourceType = cast<MemRefType>(copy.getOperand(0).getType()); |
| 157 | + auto targetType = cast<MemRefType>(copy.getOutputs().front().getType()); |
| 158 | + |
| 159 | + if (!getLoweringConfig<IREE::GPU::UseGlobalLoadDMAAttr>(copy)) { |
| 160 | + LDBG("-- Op: " << *copy); |
| 161 | + LDBG("-- does not have `use_global_load_dma` attribute, skipping."); |
| 162 | + return failure(); |
| 163 | + } |
| 164 | + |
| 165 | + if (!hasGlobalMemoryAddressSpace(sourceType) || |
| 166 | + !hasSharedMemoryAddressSpace(targetType)) { |
| 167 | + LDBG("-- Op: " << *copy); |
| 168 | + LDBG("-- incompatible source or target memory address space."); |
| 169 | + return failure(); |
| 170 | + } |
| 171 | + |
| 172 | + // TODO: check that the copy's target memref is not a subview: a subview |
| 173 | + // cannot guarantee contiguity of dest memory region. |
| 174 | + return success(); |
| 175 | +} |
| 176 | + |
| 177 | +struct LowerToDMAPattern : public OpRewritePattern<linalg::CopyOp> { |
| 178 | + LowerToDMAPattern(MLIRContext *context, ArrayRef<int64_t> workgroupSize, |
| 179 | + int64_t subgroupSize) |
| 180 | + : OpRewritePattern<linalg::CopyOp>(context), workgroupSize(workgroupSize), |
| 181 | + subgroupSize(subgroupSize) {} |
| 182 | + |
| 183 | + LogicalResult matchAndRewrite(linalg::CopyOp copy, |
| 184 | + PatternRewriter &rewriter) const override { |
| 185 | + if (failed(isEligibleForGlobalDMA(copy))) { |
| 186 | + return failure(); |
| 187 | + } |
| 188 | + return distributeLinalgCopyToThreads(rewriter, copy, workgroupSize, |
| 189 | + subgroupSize); |
| 190 | + } |
| 191 | + |
| 192 | +private: |
| 193 | + ArrayRef<int64_t> workgroupSize; |
| 194 | + int64_t subgroupSize; |
| 195 | +}; |
| 196 | + |
| 197 | +namespace { |
| 198 | +struct GPULowerToGlobalLoadsPass final |
| 199 | + : impl::GPULowerToGlobalLoadsPassBase<GPULowerToGlobalLoadsPass> { |
| 200 | + |
| 201 | + void runOnOperation() override { |
| 202 | + MLIRContext *context = &getContext(); |
| 203 | + auto funcOp = getOperation(); |
| 204 | + |
| 205 | + std::optional<SmallVector<int64_t>> workgroupSize = |
| 206 | + mlir::iree_compiler::getWorkgroupSize(funcOp); |
| 207 | + if (!workgroupSize) { |
| 208 | + funcOp.emitOpError( |
| 209 | + "unimplemented: Distribution with dynamic workgroup size."); |
| 210 | + return signalPassFailure(); |
| 211 | + } |
| 212 | + auto subgroupSize = mlir::iree_compiler::getSubgroupSize(funcOp); |
| 213 | + if (!subgroupSize) { |
| 214 | + funcOp.emitOpError( |
| 215 | + "unimplemented: Distribution with dynamic subgroup size."); |
| 216 | + return signalPassFailure(); |
| 217 | + } |
| 218 | + |
| 219 | + RewritePatternSet patterns(context); |
| 220 | + patterns.add<LowerToDMAPattern>(context, *workgroupSize, *subgroupSize); |
| 221 | + (void)applyPatternsGreedily(funcOp, std::move(patterns)); |
| 222 | + } |
| 223 | +}; |
| 224 | +} // namespace |
| 225 | +} // namespace mlir::iree_compiler |
0 commit comments