|
| 1 | +//===- XeGPUTransformOps.cpp - Implementation of XeGPU transformation ops -===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | + |
| 9 | +#include "mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.h" |
| 10 | +#include "mlir/Dialect/SCF/IR/SCF.h" |
| 11 | +#include "mlir/Dialect/XeGPU/IR/XeGPU.h" |
| 12 | +#include "mlir/Dialect/XeGPU/Utils/XeGPUUtils.h" |
| 13 | + |
| 14 | +#include <optional> |
| 15 | + |
| 16 | +using namespace mlir; |
| 17 | +using namespace mlir::transform; |
| 18 | + |
| 19 | +/// Assuming that `ofr` is an index attr or a param of index type |
| 20 | +/// or a transform dialect handle mapped to exactly one op |
| 21 | +/// with one index result, get that value and cast it to int type. |
| 22 | +static DiagnosedSilenceableFailure convertMixedValuesToInt( |
| 23 | + transform::TransformState &state, TransformOpInterface transformOp, |
| 24 | + SmallVectorImpl<int32_t> &result, ArrayRef<OpFoldResult> ofrs) { |
| 25 | + for (OpFoldResult ofr : ofrs) { |
| 26 | + // Attribute case. |
| 27 | + if (auto attr = dyn_cast<Attribute>(ofr)) { |
| 28 | + if (auto intAttr = dyn_cast<IntegerAttr>(attr)) { |
| 29 | + result.push_back(intAttr.getInt()); |
| 30 | + continue; |
| 31 | + } |
| 32 | + return transformOp.emitDefiniteFailure() << "expected IntegerAttr"; |
| 33 | + } |
| 34 | + |
| 35 | + // Transform param case. |
| 36 | + Value transformValue = cast<Value>(ofr); |
| 37 | + if (isa<TransformParamTypeInterface>(transformValue.getType())) { |
| 38 | + ArrayRef<Attribute> params = state.getParams(transformValue); |
| 39 | + if (params.size() != 1) |
| 40 | + return transformOp.emitDefiniteFailure() |
| 41 | + << "requires exactly one parameter associated"; |
| 42 | + result.push_back( |
| 43 | + cast<IntegerAttr>(params.front()).getValue().getSExtValue()); |
| 44 | + continue; |
| 45 | + } |
| 46 | + |
| 47 | + // Payload value case. |
| 48 | + auto payloadOps = state.getPayloadOps(transformValue); |
| 49 | + if (!llvm::hasSingleElement(payloadOps)) { |
| 50 | + DiagnosedSilenceableFailure diag = |
| 51 | + transformOp.emitSilenceableError() |
| 52 | + << "handle must be mapped to exactly one payload op"; |
| 53 | + diag.attachNote(transformValue.getLoc()) |
| 54 | + << "mapped to " << llvm::range_size(payloadOps) << " payload ops"; |
| 55 | + return diag; |
| 56 | + } |
| 57 | + |
| 58 | + Operation *op = *payloadOps.begin(); |
| 59 | + if (op->getNumResults() != 1 || !op->getResult(0).getType().isIndex()) { |
| 60 | + DiagnosedSilenceableFailure diag = |
| 61 | + transformOp.emitSilenceableError() |
| 62 | + << "payload op must have exactly 1 index result"; |
| 63 | + diag.attachNote(op->getLoc()) |
| 64 | + << "has " << op->getNumResults() << " results"; |
| 65 | + return diag; |
| 66 | + } |
| 67 | + |
| 68 | + IntegerAttr intAttr; |
| 69 | + if (!matchPattern(op->getResult(0), m_Constant(&intAttr))) |
| 70 | + return transformOp.emitSilenceableError() |
| 71 | + << "requires param or handle to be the result of a constant like " |
| 72 | + "op"; |
| 73 | + |
| 74 | + result.push_back(intAttr.getInt()); |
| 75 | + } |
| 76 | + return DiagnosedSilenceableFailure::success(); |
| 77 | +} |
| 78 | + |
| 79 | +/// Create a layout attribute from the given parameters. |
| 80 | +static xegpu::LayoutAttr |
| 81 | +createLayoutAttr(MLIRContext *ctx, ArrayRef<int32_t> sgLayout, |
| 82 | + ArrayRef<int32_t> sgData, |
| 83 | + std::optional<ArrayRef<int32_t>> instData) { |
| 84 | + return xegpu::LayoutAttr::get( |
| 85 | + ctx, DenseI32ArrayAttr::get(ctx, sgLayout), |
| 86 | + DenseI32ArrayAttr::get(ctx, sgData), |
| 87 | + instData ? DenseI32ArrayAttr::get(ctx, instData.value()) : nullptr, |
| 88 | + /*lane_layout=*/nullptr, |
| 89 | + /*lane_data=*/nullptr, |
| 90 | + /*order=*/nullptr); |
| 91 | +} |
| 92 | + |
| 93 | +/// Replace xegpu.create_nd_desc op with a new one with the given layout. |
| 94 | +static xegpu::CreateNdDescOp |
| 95 | +setDescLayout(transform::TransformRewriter &rewriter, |
| 96 | + xegpu::CreateNdDescOp descOp, xegpu::LayoutAttr layout) { |
| 97 | + assert(descOp.getMixedOffsets().size() == 0 && |
| 98 | + "create desc op with offsets is not supported"); |
| 99 | + auto oldTensorDesc = descOp.getType(); |
| 100 | + auto descType = xegpu::TensorDescType::get( |
| 101 | + oldTensorDesc.getShape(), oldTensorDesc.getElementType(), |
| 102 | + /*array_length=*/oldTensorDesc.getArrayLength(), |
| 103 | + /*boundary_check=*/oldTensorDesc.getBoundaryCheck(), |
| 104 | + /*memory_space=*/oldTensorDesc.getMemorySpace(), |
| 105 | + /*layout=*/layout); |
| 106 | + |
| 107 | + rewriter.setInsertionPointAfter(descOp); |
| 108 | + auto newDescOp = rewriter.replaceOpWithNewOp<xegpu::CreateNdDescOp>( |
| 109 | + descOp, descType, descOp.getSource(), descOp.getMixedSizes(), |
| 110 | + descOp.getMixedStrides()); |
| 111 | + return newDescOp; |
| 112 | +} |
| 113 | + |
| 114 | +void transform::SetDescLayoutOp::build(OpBuilder &builder, |
| 115 | + OperationState &result, Value target, |
| 116 | + ArrayRef<OpFoldResult> mixedSgLayout, |
| 117 | + ArrayRef<OpFoldResult> mixedSgData, |
| 118 | + ArrayRef<OpFoldResult> mixedInstData) { |
| 119 | + SmallVector<int64_t> staticSgLayout, staticSgData, staticInstData; |
| 120 | + SmallVector<Value> dynamicSgLayout, dynamicSgData, dynamicInstData; |
| 121 | + dispatchIndexOpFoldResults(mixedSgLayout, dynamicSgLayout, staticSgLayout); |
| 122 | + dispatchIndexOpFoldResults(mixedSgData, dynamicSgData, staticSgData); |
| 123 | + dispatchIndexOpFoldResults(mixedInstData, dynamicInstData, staticInstData); |
| 124 | + build(builder, result, target.getType(), |
| 125 | + /*target=*/target, |
| 126 | + /*sg_layout=*/dynamicSgLayout, |
| 127 | + /*sg_data=*/dynamicSgData, |
| 128 | + /*inst_data=*/dynamicInstData, |
| 129 | + /*static_sg_layout=*/staticSgLayout, |
| 130 | + /*static_sg_data=*/staticSgData, |
| 131 | + /*static_inst_data=*/staticInstData); |
| 132 | +} |
| 133 | + |
| 134 | +DiagnosedSilenceableFailure |
| 135 | +transform::SetDescLayoutOp::apply(transform::TransformRewriter &rewriter, |
| 136 | + transform::TransformResults &results, |
| 137 | + transform::TransformState &state) { |
| 138 | + auto targetOps = state.getPayloadOps(getTarget()); |
| 139 | + if (!llvm::hasSingleElement(targetOps)) { |
| 140 | + return emitDefiniteFailure() << "requires exactly one targetOp handle (got " |
| 141 | + << llvm::range_size(targetOps) << ")"; |
| 142 | + } |
| 143 | + Operation *target = *targetOps.begin(); |
| 144 | + |
| 145 | + SmallVector<int32_t> sgLayout; |
| 146 | + DiagnosedSilenceableFailure status = |
| 147 | + convertMixedValuesToInt(state, (*this), sgLayout, getMixedSgLayout()); |
| 148 | + if (!status.succeeded()) |
| 149 | + return status; |
| 150 | + |
| 151 | + SmallVector<int32_t> sgData; |
| 152 | + status = convertMixedValuesToInt(state, (*this), sgData, getMixedSgData()); |
| 153 | + if (!status.succeeded()) |
| 154 | + return status; |
| 155 | + |
| 156 | + SmallVector<int32_t> instData; |
| 157 | + status = |
| 158 | + convertMixedValuesToInt(state, (*this), instData, getMixedInstData()); |
| 159 | + if (!status.succeeded()) |
| 160 | + return status; |
| 161 | + auto maybeInstData = instData.empty() |
| 162 | + ? std::nullopt |
| 163 | + : std::optional<ArrayRef<int32_t>>(instData); |
| 164 | + |
| 165 | + // For now only create_nd_desc op is supported. |
| 166 | + auto descOp = dyn_cast<xegpu::CreateNdDescOp>(target); |
| 167 | + if (!descOp) { |
| 168 | + auto diag = emitSilenceableFailure(getLoc()) |
| 169 | + << "Expected a xegpu.create_nd_desc op, but got: " |
| 170 | + << target->getName(); |
| 171 | + diag.attachNote(target->getLoc()) << "target op"; |
| 172 | + return diag; |
| 173 | + } |
| 174 | + |
| 175 | + // Set layout attr in desc op's return type. Replaces old desc op. |
| 176 | + auto layoutAttr = |
| 177 | + createLayoutAttr(rewriter.getContext(), sgLayout, sgData, maybeInstData); |
| 178 | + auto newdescOp = setDescLayout(rewriter, descOp, layoutAttr); |
| 179 | + |
| 180 | + // Map result handles. |
| 181 | + results.set(cast<OpResult>(getTransformed()), {newdescOp.getOperation()}); |
| 182 | + |
| 183 | + return DiagnosedSilenceableFailure::success(); |
| 184 | +} |
| 185 | + |
| 186 | +void transform::SetDescLayoutOp::getEffects( |
| 187 | + ::llvm::SmallVectorImpl<MemoryEffects::EffectInstance> &effects) { |
| 188 | + consumesHandle(getTargetMutable(), effects); |
| 189 | + onlyReadsHandle(getSgLayoutMutable(), effects); |
| 190 | + onlyReadsHandle(getSgDataMutable(), effects); |
| 191 | + onlyReadsHandle(getInstDataMutable(), effects); |
| 192 | + producesHandle(getOperation()->getOpResults(), effects); |
| 193 | + modifiesPayload(effects); |
| 194 | +} |
| 195 | + |
| 196 | +namespace { |
| 197 | +class XeGPUTransformDialectExtension |
| 198 | + : public transform::TransformDialectExtension< |
| 199 | + XeGPUTransformDialectExtension> { |
| 200 | +public: |
| 201 | + MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(XeGPUTransformDialectExtension) |
| 202 | + |
| 203 | + using Base::Base; |
| 204 | + |
| 205 | + void init(); |
| 206 | +}; |
| 207 | + |
| 208 | +void XeGPUTransformDialectExtension::init() { |
| 209 | + declareGeneratedDialect<scf::SCFDialect>(); |
| 210 | + declareGeneratedDialect<arith::ArithDialect>(); |
| 211 | + declareGeneratedDialect<xegpu::XeGPUDialect>(); |
| 212 | + |
| 213 | + registerTransformOps< |
| 214 | +#define GET_OP_LIST |
| 215 | +#include "mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp.inc" |
| 216 | + >(); |
| 217 | +} |
| 218 | +} // namespace |
| 219 | + |
| 220 | +#define GET_OP_CLASSES |
| 221 | +#include "mlir/Dialect/XeGPU/TransformOps/XeGPUTransformOps.cpp.inc" |
| 222 | + |
| 223 | +void mlir::xegpu::registerTransformDialectExtension(DialectRegistry ®istry) { |
| 224 | + registry.addExtensions<XeGPUTransformDialectExtension>(); |
| 225 | +} |
0 commit comments