|
| 1 | +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" |
| 2 | +#include "triton/Dialect/TritonGPU/Transforms/Utility.h" |
| 3 | +#include "triton/Dialect/TritonNvidiaGPU/IR/Dialect.h" |
| 4 | +#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h" |
| 5 | +#include "llvm/ADT/AddressRanges.h" |
| 6 | + |
| 7 | +namespace { |
| 8 | + |
| 9 | +using namespace mlir; |
| 10 | + |
| 11 | +namespace ttng = triton::nvidia_gpu; |
| 12 | +namespace ttg = triton::gpu; |
| 13 | +namespace tt = triton; |
| 14 | + |
| 15 | +#define GEN_PASS_CLASSES |
| 16 | +#include "triton/Dialect/TritonNvidiaGPU/Transforms/Passes.h.inc" |
| 17 | + |
| 18 | +// If we don't know the effects of the op, we add all possible effects. |
| 19 | +void addAllValuelessEffects( |
| 20 | + SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { |
| 21 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Read>()); |
| 22 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Write>()); |
| 23 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Allocate>()); |
| 24 | + effects.emplace_back(MemoryEffects::Effect::get<MemoryEffects::Free>()); |
| 25 | +} |
| 26 | + |
| 27 | +bool collectEffects(Operation *op, |
| 28 | + SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { |
| 29 | + // Collect effect instances the operation. Note that the implementation of |
| 30 | + // getEffects erases all effect instances that have the type other than the |
| 31 | + // template parameter so we collect them first in a local buffer and then |
| 32 | + // copy. |
| 33 | + if (auto iface = dyn_cast<MemoryEffectOpInterface>(op)) { |
| 34 | + SmallVector<MemoryEffects::EffectInstance> localEffects; |
| 35 | + iface.getEffects(localEffects); |
| 36 | + llvm::append_range(effects, localEffects); |
| 37 | + return true; |
| 38 | + } |
| 39 | + if (op->hasTrait<OpTrait::HasRecursiveMemoryEffects>()) { |
| 40 | + for (auto ®ion : op->getRegions()) { |
| 41 | + for (auto &block : region) { |
| 42 | + for (auto &innerOp : block) |
| 43 | + if (!collectEffects(&innerOp, effects)) |
| 44 | + return false; |
| 45 | + } |
| 46 | + } |
| 47 | + return true; |
| 48 | + } |
| 49 | + |
| 50 | + // We need to be conservative here in case the op doesn't have the interface |
| 51 | + // and assume it can have any possible effect. |
| 52 | + addAllValuelessEffects(effects); |
| 53 | + return false; |
| 54 | +} |
| 55 | + |
| 56 | +struct AccessRange { |
| 57 | + SmallVector<std::optional<llvm::AddressRange>> ranges; |
| 58 | + unsigned rankOffset = 0; |
| 59 | +}; |
| 60 | + |
| 61 | +// Simple local alias analysis that looks for a single underlying allocation and |
| 62 | +// an access subrange. |
| 63 | +std::pair<Value, AccessRange> findBufferAccess(Value a) { |
| 64 | + // Handle block arguments. |
| 65 | + if (auto arg = dyn_cast<BlockArgument>(a)) { |
| 66 | + Operation *parentOp = arg.getOwner()->getParentOp(); |
| 67 | + |
| 68 | + // Look through `ttg.warp_specialize` explicit captures. |
| 69 | + if (auto wsOp = dyn_cast<ttg::WarpSpecializePartitionsOp>(parentOp)) { |
| 70 | + return findBufferAccess( |
| 71 | + wsOp.getParentOp().getExplicitCaptures()[arg.getArgNumber()]); |
| 72 | + } |
| 73 | + |
| 74 | + // Unknown block argument. |
| 75 | + return {}; |
| 76 | + } |
| 77 | + |
| 78 | + Operation *defOp = a.getDefiningOp(); |
| 79 | + // Accessing the alloc accesses the whole buffer. |
| 80 | + if (auto alloc = dyn_cast<ttng::TMEMAllocOp>(defOp)) { |
| 81 | + AccessRange access; |
| 82 | + for (uint64_t dim : alloc.getType().getShape()) |
| 83 | + access.ranges.push_back({{0, dim}}); |
| 84 | + return {a, std::move(access)}; |
| 85 | + } |
| 86 | + |
| 87 | + // Trans and Reshape views don't change the access size. |
| 88 | + if (isa<ttg::MemDescTransOp, ttg::MemDescReshapeOp>(defOp)) { |
| 89 | + return findBufferAccess(defOp->getOperand(0)); |
| 90 | + } |
| 91 | + |
| 92 | + // Subviews can reduce the access sizes. |
| 93 | + if (auto subview = dyn_cast<ttg::MemDescSubviewOp>(defOp)) { |
| 94 | + auto [alloc, parentAccess] = findBufferAccess(subview.getSrc()); |
| 95 | + if (!alloc) |
| 96 | + return {}; |
| 97 | + // Handle subview of a subview. The first `rankOffset` access sizes are |
| 98 | + // the same as in the parent access. |
| 99 | + AccessRange childAccess; |
| 100 | + for (auto i : llvm::seq(parentAccess.rankOffset)) |
| 101 | + childAccess.ranges.push_back(parentAccess.ranges[i]); |
| 102 | + |
| 103 | + // The subview may have a smaller rank, in which case its access size is |
| 104 | + // just 1 for the higher dims. |
| 105 | + childAccess.rankOffset = |
| 106 | + subview.getSrc().getType().getRank() - subview.getType().getRank(); |
| 107 | + for (auto [i, offset] : llvm::enumerate(subview.getOffsets())) { |
| 108 | + auto parentRange = parentAccess.ranges[i + parentAccess.rankOffset]; |
| 109 | + if (!parentRange) { |
| 110 | + childAccess.ranges.push_back({}); |
| 111 | + continue; |
| 112 | + } |
| 113 | + |
| 114 | + // If the offset is not known, then the entire dim may be accessed. |
| 115 | + APInt value; |
| 116 | + if (!matchPattern(offset, m_ConstantInt(&value))) { |
| 117 | + childAccess.ranges.push_back({}); |
| 118 | + continue; |
| 119 | + } |
| 120 | + |
| 121 | + uint64_t accessStart = parentRange->start() + value.getSExtValue(); |
| 122 | + uint64_t accessSize = 1; |
| 123 | + if (i >= childAccess.rankOffset) |
| 124 | + accessSize = subview.getType().getShape()[i - childAccess.rankOffset]; |
| 125 | + childAccess.ranges.push_back({{accessStart, accessStart + accessSize}}); |
| 126 | + } |
| 127 | + return {alloc, std::move(childAccess)}; |
| 128 | + } |
| 129 | + |
| 130 | + // Subslice is a subview only on the N dimension. |
| 131 | + if (auto subslice = dyn_cast<ttng::TMEMSubSliceOp>(defOp)) { |
| 132 | + auto [alloc, parentAccess] = findBufferAccess(subslice.getSrc()); |
| 133 | + if (!alloc) |
| 134 | + return {}; |
| 135 | + if (!parentAccess.ranges[1]) |
| 136 | + return {alloc, parentAccess}; |
| 137 | + uint64_t mStart = parentAccess.ranges[1]->start() + subslice.getN(); |
| 138 | + uint64_t mSize = subslice.getType().getShape()[1]; |
| 139 | + AccessRange childAccess = parentAccess; |
| 140 | + childAccess.ranges[1] = {{mStart, mStart + mSize}}; |
| 141 | + return {alloc, std::move(childAccess)}; |
| 142 | + } |
| 143 | + |
| 144 | + // Unknown defining op. |
| 145 | + return {}; |
| 146 | +} |
| 147 | + |
| 148 | +bool tmemMayAlias(Value a, Value b) { |
| 149 | + auto [aAlloc, aRanges] = findBufferAccess(a); |
| 150 | + auto [bAlloc, bRanges] = findBufferAccess(b); |
| 151 | + // If the underlying buffer was not identified, assume mayalias. |
| 152 | + if (!aAlloc || !bAlloc) |
| 153 | + return true; |
| 154 | + // If the buffers are different, they don't alias. |
| 155 | + if (aAlloc != bAlloc) |
| 156 | + return false; |
| 157 | + // If the access ranges along any dimension are known to not overlap, then the |
| 158 | + // accesses don't alias. |
| 159 | + for (auto [aRange, bRange] : llvm::zip(aRanges.ranges, bRanges.ranges)) { |
| 160 | + // If either access range at this dim is unknown, we can't determine if they |
| 161 | + // don't overlap. |
| 162 | + if (!aRange || !bRange) |
| 163 | + continue; |
| 164 | + // The access ranges are known and don't overlap. |
| 165 | + if (!aRange->intersects(*bRange)) |
| 166 | + return false; |
| 167 | + } |
| 168 | + return true; |
| 169 | +} |
| 170 | + |
| 171 | +// Sink tmem_loads as close to their use as possible to reduce register |
| 172 | +// pressure. |
| 173 | +bool sinkOps(Value buffer, ArrayRef<Operation *> useChain) { |
| 174 | + Operation *insertBefore = nullptr; |
| 175 | + Operation *next = useChain.back()->getNextNode(); |
| 176 | + while (next && !next->hasTrait<OpTrait::IsTerminator>()) { |
| 177 | + insertBefore = next; |
| 178 | + bool dep = false; |
| 179 | + for (auto operand : getNestedOperands(next)) { |
| 180 | + if (llvm::any_of(useChain, [&](Operation *op) { |
| 181 | + return llvm::is_contained(op->getResults(), operand); |
| 182 | + })) { |
| 183 | + dep = true; |
| 184 | + break; |
| 185 | + } |
| 186 | + } |
| 187 | + // Don't sink past barrier signals, since they may guard the liverange |
| 188 | + // of the buffer. |
| 189 | + if (isa<ttng::ArriveBarrierOp>(next)) |
| 190 | + break; |
| 191 | + if (!isMemoryEffectFree(next)) { |
| 192 | + SmallVector<MemoryEffects::EffectInstance> effects; |
| 193 | + collectEffects(next, effects); |
| 194 | + for (auto effect : effects) { |
| 195 | + // Look for potentially aliasing write or free effects. |
| 196 | + if (!isa<MemoryEffects::Write, MemoryEffects::Free>(effect.getEffect())) |
| 197 | + continue; |
| 198 | + if (isa<SideEffects::DefaultResource>(effect.getResource())) { |
| 199 | + dep = true; |
| 200 | + break; |
| 201 | + } |
| 202 | + if (isa<ttng::TensorMemory>(effect.getResource()) && |
| 203 | + (!effect.getValue() || tmemMayAlias(effect.getValue(), buffer))) { |
| 204 | + dep = true; |
| 205 | + break; |
| 206 | + } |
| 207 | + } |
| 208 | + } |
| 209 | + if (dep) |
| 210 | + break; |
| 211 | + next = next->getNextNode(); |
| 212 | + } |
| 213 | + if (insertBefore && insertBefore != useChain.back()->getNextNode()) { |
| 214 | + for (Operation *op : useChain) |
| 215 | + op->moveBefore(insertBefore); |
| 216 | + return true; |
| 217 | + } |
| 218 | + return false; |
| 219 | +} |
| 220 | + |
| 221 | +// Try to sink a load and a collection of its users. |
| 222 | +bool trySinkOp(Operation *op, Value buffer) { |
| 223 | + SmallVector<Operation *> useChain{op}; |
| 224 | + while (useChain.back()->hasOneUse() && |
| 225 | + isPure(*useChain.back()->user_begin()) && |
| 226 | + useChain.back()->getNextNode() == *useChain.back()->user_begin()) { |
| 227 | + useChain.push_back(*useChain.back()->user_begin()); |
| 228 | + } |
| 229 | + return sinkOps(buffer, useChain); |
| 230 | +} |
| 231 | + |
| 232 | +struct TritonNvidiaGPUInterleaveTMemPass |
| 233 | + : public TritonNvidiaGPUInterleaveTMemPassBase< |
| 234 | + TritonNvidiaGPUInterleaveTMemPass> { |
| 235 | + using TritonNvidiaGPUInterleaveTMemPassBase:: |
| 236 | + TritonNvidiaGPUInterleaveTMemPassBase; |
| 237 | + |
| 238 | + void runOnOperation() override { |
| 239 | + MLIRContext *context = &getContext(); |
| 240 | + ModuleOp m = getOperation(); |
| 241 | + SmallVector<std::pair<Operation *, Value>> opsToSink; |
| 242 | + m.walk([&](Operation *op) { |
| 243 | + if (auto load = dyn_cast<ttng::TMEMLoadOp>(op)) |
| 244 | + opsToSink.emplace_back(load, load.getSrc()); |
| 245 | + else if (auto alloc = dyn_cast<ttng::TMEMAllocOp>(op)) |
| 246 | + opsToSink.emplace_back(alloc, alloc.getResult()); |
| 247 | + }); |
| 248 | + for (auto [op, buffer] : opsToSink) { |
| 249 | + while (trySinkOp(op, buffer)) { |
| 250 | + // Keep trying to sink loads and their users. |
| 251 | + } |
| 252 | + } |
| 253 | + } |
| 254 | +}; |
| 255 | + |
| 256 | +} // namespace |
| 257 | + |
| 258 | +std::unique_ptr<Pass> mlir::createTritonNvidiaGPUInterleaveTMemPass() { |
| 259 | + return std::make_unique<TritonNvidiaGPUInterleaveTMemPass>(); |
| 260 | +} |
0 commit comments