-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[MLIR][XeGPU] Scattered ops sg-to-wi distribution #154949
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
e174b69
6d22968
a4d4e66
daa143f
bcc9d85
ffde76c
6bafb05
310fed9
3a1e5ef
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -807,6 +807,210 @@ struct GpuBarrierDistribution final : public gpu::WarpDistributionPattern { | |
} | ||
}; | ||
|
||
/// Distribute a scattered store op. The offsets argument is required. | ||
/// Both offset and mask vectors must be 1D and have #subgroup_size elements. | ||
/// The layouts are fixed and implicit: one offset/mask per lane. | ||
/// The pass changes the offset/mask vector shapes to a | ||
/// single-element vector, **it is assumed that their producer will also be | ||
/// distributed**. The payload vector also has a fixed distribution: | ||
/// no chunk size -> vector of one element. | ||
/// chunk size -> vector of the innermost dimension of the SG-payload. | ||
/// Example 1 (no chunk size): | ||
/// %mask = producer_op : vector<16xi1> | ||
/// %offset = producer_op : vector<16xindex> | ||
/// xegpu.store %payload, %src[%offset], %mask : vector<16xf16>, | ||
/// memref<256xf16>, vector<16xindex>, vector<16xi1> | ||
/// To | ||
/// %mask = producer_op : vector<1xi1> | ||
/// %offset = producer_op : vector<1xindex> | ||
/// xegpu.store %payload, %src[%offset], %mask : vector<1xf16>, | ||
/// memref<256xf16>, vector<1xindex>, vector<1xi1> | ||
/// Example 2 (chunk size, same mask and offsets): | ||
/// xegpu.store %payload, %src[%offset], %mask <{chunk_size=8}> : | ||
/// vector<16x8xf16>, memref<256xf16>, vector<16xindex>, vector<16xi1> | ||
/// To | ||
/// xegpu.store %payload, %src[%offset], %mask <{chunk_size=8}> : | ||
/// vector<8xf16>, memref<256xf16>, vector<1xindex>, vector<1xi1> | ||
struct StoreDistribution final : public gpu::WarpDistributionPattern { | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
using gpu::WarpDistributionPattern::WarpDistributionPattern; | ||
LogicalResult matchAndRewrite(gpu::WarpExecuteOnLane0Op warpOp, | ||
PatternRewriter &rewriter) const override { | ||
Operation *lastNode = warpOp.getTerminator()->getPrevNode(); | ||
auto storeScatterOp = dyn_cast_or_null<xegpu::StoreScatterOp>(lastNode); | ||
if (!storeScatterOp) | ||
return failure(); | ||
auto offsets = storeScatterOp.getOffsets(); | ||
if (!offsets || !isa<VectorType>(offsets.getType())) | ||
return rewriter.notifyMatchFailure( | ||
storeScatterOp, "Store op must have a vector of offsets argument"); | ||
VectorType offsetsTy = cast<VectorType>(offsets.getType()); | ||
VectorType maskTy = cast<VectorType>(storeScatterOp.getMask().getType()); | ||
if (offsetsTy.getRank() != 1 || maskTy.getRank() != 1) | ||
return rewriter.notifyMatchFailure(storeScatterOp, | ||
"Expected 1D offsets and mask vector"); | ||
VectorType storeVecTy = cast<VectorType>(storeScatterOp.getValueType()); | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
assert(storeVecTy.getRank() <= 2 && | ||
"Expected at most 2D result at SG level"); | ||
VectorType distStoreVecTy; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. strongly suggest using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The mask/offset/payload vectors have predetermined rules for their shape and lane assignment. Their distribution is fixed at all times, so user-side layouts are redundant. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I understand that the layout is not useful here. But it is better to keep this logic in a single place. This also ensures that the layout assigned to offsets (byt propagation logic) is indeed correct. |
||
if (storeVecTy.getRank() == 2) | ||
charithaintc marked this conversation as resolved.
Show resolved
Hide resolved
|
||
distStoreVecTy = VectorType::Builder(storeVecTy).dropDim(0); | ||
else // rank 1 | ||
distStoreVecTy = VectorType::Builder(storeVecTy).setDim(0, 1); | ||
// Assume offset and mask producers will be distributed as well. | ||
VectorType distOffsetsTy = | ||
VectorType::get({1}, getElementTypeOrSelf(offsetsTy)); | ||
VectorType distMaskTy = VectorType::get( | ||
{1}, getElementTypeOrSelf(storeScatterOp.getMask().getType())); | ||
std::string layoutPayloadName = | ||
xegpu::getLayoutName(storeScatterOp->getOpOperand(0)); | ||
std::string layoutOffsetsName = | ||
xegpu::getLayoutName(storeScatterOp->getOpOperand(2)); | ||
std::string layoutMaskName = | ||
xegpu::getLayoutName(storeScatterOp->getOpOperand(3)); | ||
|
||
xegpu::LayoutAttr layoutPayload = | ||
storeScatterOp->getAttrOfType<xegpu::LayoutAttr>(layoutPayloadName); | ||
xegpu::LayoutAttr layoutOffsets = | ||
storeScatterOp->getAttrOfType<xegpu::LayoutAttr>(layoutOffsetsName); | ||
xegpu::LayoutAttr layoutMask = | ||
storeScatterOp->getAttrOfType<xegpu::LayoutAttr>(layoutMaskName); | ||
|
||
FailureOr<VectorType> distStoreVecByWarpOpOrFailure = | ||
getDistVecTypeBasedOnLaneLayout(layoutPayload, storeVecTy); | ||
FailureOr<VectorType> distOffsetsByWarpOpOrFailure = | ||
getDistVecTypeBasedOnLaneLayout(layoutOffsets, offsetsTy); | ||
FailureOr<VectorType> distMaskByWarpOpOrFailure = | ||
getDistVecTypeBasedOnLaneLayout(layoutMask, maskTy); | ||
if (failed(distStoreVecByWarpOpOrFailure) || | ||
failed(distOffsetsByWarpOpOrFailure) || | ||
failed(distMaskByWarpOpOrFailure)) { | ||
storeScatterOp.emitWarning( | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
"Some vector operands have no layouts, using defaults instead."); | ||
} | ||
distStoreVecTy = distStoreVecByWarpOpOrFailure.value_or(distStoreVecTy); | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
distOffsetsTy = distOffsetsByWarpOpOrFailure.value_or(distOffsetsTy); | ||
distMaskTy = distMaskByWarpOpOrFailure.value_or(distMaskTy); | ||
|
||
SmallVector<size_t> newRetIndices; | ||
SmallVector<Value> operands = storeScatterOp->getOperands(); | ||
SmallVector<Type> operandTypesToYield = { | ||
distStoreVecTy, operands[1].getType(), distOffsetsTy, distMaskTy}; | ||
|
||
gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( | ||
rewriter, warpOp, operands, operandTypesToYield, newRetIndices); | ||
SmallVector<Value> newStoreScatterOpOperands = llvm::map_to_vector( | ||
newRetIndices, [&](size_t idx) { return newWarpOp.getResult(idx); }); | ||
|
||
rewriter.setInsertionPointAfter(newWarpOp); | ||
xegpu::StoreScatterOp newOp = xegpu::StoreScatterOp::create( | ||
rewriter, newWarpOp.getLoc(), TypeRange{}, newStoreScatterOpOperands, | ||
storeScatterOp->getAttrs()); | ||
xegpu::removeLayoutAttrs(newOp); | ||
rewriter.eraseOp(storeScatterOp); | ||
return success(); | ||
} | ||
}; | ||
|
||
/// Distribute a scattered load op. The logic and requirements are the same as | ||
/// for the scattered store distribution. The warpOp's payload vector is | ||
/// expected to be distributed by the load's result consumer. | ||
/// Example 1 (no chunk size): | ||
/// %mask = producer_op : vector<16xi1> | ||
/// %offset = producer_op : vector<16xindex> | ||
/// %0 = xegpu.load %payload, %src[%offset], %mask : memref<256xf16>, | ||
/// vector<16xindex>, vector<16xi1> -> vector<16xf16> | ||
/// To | ||
/// %mask = producer_op : vector<1xi1> | ||
/// %offset = producer_op : vector<1xindex> | ||
/// %0 = xegpu.load %payload, %src[%offset], %mask : memref<256xf16>, | ||
/// vector<1xindex>, vector<1xi1> -> vector<1xf16> | ||
/// Example 2 (chunk size, same mask and offsets): | ||
/// %0 = xegpu.load %payload, %src[%offset], %mask <{chunk_size=8}> : | ||
/// memref<256xf16>, vector<16xindex>, vector<16xi1> -> vector<16x8xf16> | ||
/// To | ||
/// %0 = xegpu.load %payload, %src[%offset], %mask <{chunk_size=8}> : | ||
/// memref<256xf16>, vector<1xindex>, vector<1xi1> -> vector<8xf16> | ||
struct LoadDistribution final : public gpu::WarpDistributionPattern { | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
using gpu::WarpDistributionPattern::WarpDistributionPattern; | ||
LogicalResult matchAndRewrite(gpu::WarpExecuteOnLane0Op warpOp, | ||
PatternRewriter &rewriter) const override { | ||
OpOperand *producedByLastLoad = getWarpResult(warpOp, [&](Operation *op) { | ||
// Check if the yield operand that was produced by the *last* scattered | ||
// load op to avoid sinking it before barriers (maintain memory order). | ||
return isa<xegpu::LoadGatherOp>(op) && | ||
warpOp.getTerminator()->getPrevNode() == op; | ||
}); | ||
if (!producedByLastLoad) | ||
return rewriter.notifyMatchFailure( | ||
warpOp, "The last op is not xegpu::LoadGatherOp"); | ||
|
||
auto loadGatherOp = | ||
producedByLastLoad->get().getDefiningOp<xegpu::LoadGatherOp>(); | ||
auto offsets = loadGatherOp.getOffsets(); | ||
if (!offsets || !isa<VectorType>(offsets.getType()) || | ||
!isa<VectorType>(loadGatherOp.getMask().getType())) | ||
return rewriter.notifyMatchFailure( | ||
loadGatherOp, | ||
"Load op must have a vector arguments for offsets and mask"); | ||
VectorType offsetsTy = cast<VectorType>(offsets.getType()); | ||
VectorType maskTy = cast<VectorType>(loadGatherOp.getMask().getType()); | ||
if (offsetsTy.getRank() != 1 || maskTy.getRank() != 1) | ||
return rewriter.notifyMatchFailure(loadGatherOp, | ||
"Expected 1D offsets and mask vector"); | ||
// Assume offset and mask producers will be distributed as well. | ||
VectorType distOffsetsTy = | ||
VectorType::get({1}, getElementTypeOrSelf(offsetsTy)); | ||
VectorType distMaskTy = VectorType::get({1}, getElementTypeOrSelf(maskTy)); | ||
|
||
std::string layoutOffsetsName = | ||
xegpu::getLayoutName(loadGatherOp->getOpOperand(1)); | ||
std::string layoutMaskName = | ||
xegpu::getLayoutName(loadGatherOp->getOpOperand(2)); | ||
|
||
xegpu::LayoutAttr layoutOffsets = | ||
loadGatherOp->getAttrOfType<xegpu::LayoutAttr>(layoutOffsetsName); | ||
xegpu::LayoutAttr layoutMask = | ||
loadGatherOp->getAttrOfType<xegpu::LayoutAttr>(layoutMaskName); | ||
|
||
FailureOr<VectorType> distOffsetsByWarpOpOrFailure = | ||
getDistVecTypeBasedOnLaneLayout(layoutOffsets, offsetsTy); | ||
FailureOr<VectorType> distMaskByWarpOpOrFailure = | ||
getDistVecTypeBasedOnLaneLayout(layoutMask, maskTy); | ||
if (failed(distOffsetsByWarpOpOrFailure) || | ||
failed(distMaskByWarpOpOrFailure)) { | ||
loadGatherOp.emitWarning( | ||
"Some vector operands have no layouts, using defaults instead."); | ||
} | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
distOffsetsTy = distOffsetsByWarpOpOrFailure.value_or(distOffsetsTy); | ||
distMaskTy = distMaskByWarpOpOrFailure.value_or(distMaskTy); | ||
|
||
SmallVector<size_t> newRetIndices; | ||
SmallVector<Value> operands = loadGatherOp->getOperands(); | ||
SmallVector<Type> operandTypesToYield = {operands[0].getType(), | ||
distOffsetsTy, distMaskTy}; | ||
|
||
gpu::WarpExecuteOnLane0Op newWarpOp = moveRegionToNewWarpOpAndAppendReturns( | ||
rewriter, warpOp, operands, operandTypesToYield, newRetIndices); | ||
|
||
SmallVector<Value> newLoadGatherOperands = llvm::map_to_vector( | ||
newRetIndices, [&](size_t idx) { return newWarpOp.getResult(idx); }); | ||
|
||
const unsigned operandIdx = producedByLastLoad->getOperandNumber(); | ||
VectorType loadVecTy = | ||
cast<VectorType>(warpOp.getResult(operandIdx).getType()); | ||
assert(loadVecTy.getRank() == 1 && "Expected a distributed vector"); | ||
|
||
rewriter.setInsertionPointAfter(newWarpOp); | ||
xegpu::LoadGatherOp newOp = rewriter.create<xegpu::LoadGatherOp>( | ||
newWarpOp.getLoc(), loadVecTy, newLoadGatherOperands, | ||
loadGatherOp->getAttrs()); | ||
xegpu::removeLayoutAttrs(newOp); | ||
Value distributedVal = newWarpOp.getResult(operandIdx); | ||
rewriter.replaceAllUsesWith(distributedVal, newOp->getResult(0)); | ||
return success(); | ||
} | ||
}; | ||
|
||
} // namespace | ||
|
||
namespace { | ||
|
@@ -819,10 +1023,11 @@ struct XeGPUSubgroupDistributePass final | |
|
||
void xegpu::populateXeGPUSubgroupDistributePatterns( | ||
RewritePatternSet &patterns) { | ||
patterns.add<CreateNdDescDistribution, StoreNdDistribution, | ||
LoadNdDistribution, DpasDistribution, PrefetchNdDistribution, | ||
UpdateNdOffsetDistribution, GpuBarrierDistribution>( | ||
patterns.getContext()); | ||
patterns | ||
.add<CreateNdDescDistribution, StoreNdDistribution, LoadNdDistribution, | ||
DpasDistribution, PrefetchNdDistribution, UpdateNdOffsetDistribution, | ||
GpuBarrierDistribution, LoadDistribution, StoreDistribution>( | ||
patterns.getContext()); | ||
} | ||
|
||
void XeGPUSubgroupDistributePass::runOnOperation() { | ||
|
@@ -837,6 +1042,9 @@ void XeGPUSubgroupDistributePass::runOnOperation() { | |
if (!isa<VectorType>(operand.get().getType())) | ||
continue; | ||
|
||
// Vectors operands of these ops have a fixed and implicit layout. | ||
if (isa<xegpu::LoadGatherOp, xegpu::StoreScatterOp>(op)) | ||
akroviakov marked this conversation as resolved.
Show resolved
Hide resolved
|
||
continue; | ||
auto layout = | ||
xegpu::getDistributeLayoutAttrOfType<xegpu::LayoutAttr>(operand); | ||
if (!layout) { | ||
|
Uh oh!
There was an error while loading. Please reload this page.