Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions mlir/include/mlir/Dialect/AMDGPU/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,20 @@ def AmdgpuResolveStridedMetadataPass : Pass<"amdgpu-resolve-strided-metadata"> {
def AmdgpuTransferReadToLoadPass : Pass<"amdgpu-transfer-read-to-load"> {
let summary = "Lower the operations from the vector transfer_read to vector load";
let description = [{
This pass creates a transfer read op lowering. A vector trasfer read op
will be lowered to a combination of vector.load, arith.select and
vector.broadcast.
This pass creates a transfer read op lowering optimization. The lowering
will produce a conditional check at runtime. If within bounds, a vector
trasfer read op will be lowered to a combination of vector.load, arith.select
and vector.broadcast. If not, it will fallback to the default lowering
of the transfer_read op.

This pattern will make it possible for masked transfer_read to be lowered
towards buffer load with bounds check, allowing a more optimized global
load accessing pattern compared with existing implementation of
llvm.intr.masked.load on vectors.
}];
let dependentDialects = [];
let dependentDialects = [
"scf::SCFDialect",
"memref::MemRefDialect"
];
}
#endif // MLIR_DIALECT_AMDGPU_TRANSFORMS_PASSES_TD_
1 change: 1 addition & 0 deletions mlir/lib/Dialect/AMDGPU/Transforms/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ add_mlir_dialect_library(MLIRAMDGPUTransforms
MLIRAMDGPUUtils
MLIRArithDialect
MLIRMemRefDialect
MLIRSCFDialect
MLIRVectorDialect
MLIRControlFlowDialect
MLIRFuncDialect
Expand Down
159 changes: 145 additions & 14 deletions mlir/lib/Dialect/AMDGPU/Transforms/TransferReadToLoad.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,22 @@
#include "mlir/Dialect/AMDGPU/Transforms/Passes.h"

#include "mlir/Dialect/AMDGPU/IR/AMDGPUDialect.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Arith/Utils/Utils.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/MemRef/Utils/MemRefUtils.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/Vector/Transforms/LoweringPatterns.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/WalkPatternRewriteDriver.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "llvm/Support/MathExtras.h"

namespace mlir::amdgpu {
#define GEN_PASS_DEF_AMDGPUTRANSFERREADTOLOADPASS
Expand Down Expand Up @@ -67,6 +76,9 @@ static LogicalResult transferPreconditions(
if (!memRefType.isLastDimUnitStride())
return rewriter.notifyMatchFailure(xferOp, "!= 1 stride needs VectorToSCF");

if (memRefType.getElementTypeBitWidth() < 8)
return rewriter.notifyMatchFailure(xferOp, "unsupported sub-byte type");

// If there is broadcasting involved then we first load the unbroadcasted
// vector, and then broadcast it with `vector.broadcast`.
ArrayRef<int64_t> vectorShape = xferOp.getVectorType().getShape();
Expand Down Expand Up @@ -101,13 +113,35 @@ static LogicalResult transferPreconditions(
return success();
}

static Value createVectorLoadForMaskedLoad(OpBuilder &builder, Location loc,
vector::TransferReadOp readOp,
bool requiresBroadcasting,
VectorType unbroadcastedVectorType) {
Value fill = builder.create<vector::SplatOp>(loc, unbroadcastedVectorType,
readOp.getPadding());
Value load = builder.create<vector::LoadOp>(
loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices());
Value res = builder.create<arith::SelectOp>(loc, unbroadcastedVectorType,
readOp.getMask(), load, fill);
// Insert a broadcasting op if required.
if (requiresBroadcasting) {
res = builder.create<vector::BroadcastOp>(loc, readOp.getVectorType(), res);
}
return res;
}

static constexpr char kTransferReadNeedsMask[] =
"amdgpu.buffer_transfer_read_needs_mask";

namespace {

struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
using OpRewritePattern::OpRewritePattern;

LogicalResult matchAndRewrite(vector::TransferReadOp readOp,
PatternRewriter &rewriter) const override {
if (readOp->hasAttr(kTransferReadNeedsMask))
return failure();

bool requiresBroadcasting = false;
VectorType unbroadcastedVectorType;
Expand All @@ -117,20 +151,115 @@ struct TransferReadLowering final : OpRewritePattern<vector::TransferReadOp> {
}

Location loc = readOp.getLoc();
Value fill = rewriter.create<vector::SplatOp>(loc, unbroadcastedVectorType,
readOp.getPadding());
Value load = rewriter.create<vector::LoadOp>(
loc, unbroadcastedVectorType, readOp.getSource(), readOp.getIndices());
Value res = rewriter.create<arith::SelectOp>(loc, unbroadcastedVectorType,
readOp.getMask(), load, fill);

// Insert a broadcasting op if required.
if (requiresBroadcasting) {
res = rewriter.create<vector::BroadcastOp>(loc, readOp.getVectorType(),
res);
Value src = readOp.getSource();

VectorType vectorType = readOp.getVectorType();
int64_t vectorSize = vectorType.getNumElements();
int64_t elementBitWidth = vectorType.getElementTypeBitWidth();
SmallVector<OpFoldResult> indices = readOp.getIndices();

auto stridedMetadata =
rewriter.create<memref::ExtractStridedMetadataOp>(loc, src);
SmallVector<OpFoldResult> strides =
stridedMetadata.getConstifiedMixedStrides();
SmallVector<OpFoldResult> sizes = stridedMetadata.getConstifiedMixedSizes();
OpFoldResult offset = stridedMetadata.getConstifiedMixedOffset();
OpFoldResult linearizedIndices;
std::tie(std::ignore, linearizedIndices) =
memref::getLinearizedMemRefOffsetAndSize(rewriter, loc, elementBitWidth,
elementBitWidth, offset, sizes,
strides, indices);

// TODO(jerryyin): Fix the getLinearizedMemRefOffsetAndSize() function
// Note below doesn't give the correct result for the linearized size.
// Value totalSize = getValueOrCreateConstantIndexOp(
// rewriter, loc, linearizedInfo.linearizedSize);
// It computes the multiplied sizes of all dimensions instead of taking
// the maximum of each dimension size * stride.
SmallVector<AffineExpr> productExpressions;
SmallVector<Value> productResults;
unsigned sourceRank = cast<ShapedType>(src.getType()).getRank();

SmallVector<AffineExpr> symbols(2 * sourceRank);
SmallVector<Value> offsetValues;
bindSymbolsList(rewriter.getContext(), MutableArrayRef{symbols});

size_t symbolIndex = 0;
for (size_t i = 0; i < sourceRank; ++i) {
AffineExpr strideExpr, sizeExpr;
OpFoldResult stride = strides[i];
OpFoldResult size = sizes[i];
if (auto constantStride = getConstantIntValue(stride)) {
strideExpr = rewriter.getAffineConstantExpr(*constantStride);
} else {
strideExpr = symbols[symbolIndex++];
offsetValues.push_back(
getValueOrCreateConstantIndexOp(rewriter, loc, stride));
}

if (auto constantSize = getConstantIntValue(size)) {
sizeExpr = rewriter.getAffineConstantExpr(*constantSize);
} else {
sizeExpr = symbols[symbolIndex++];
offsetValues.push_back(
getValueOrCreateConstantIndexOp(rewriter, loc, size));
}

productExpressions.push_back(strideExpr * sizeExpr);
}

rewriter.replaceOp(readOp, res);
AffineMap maxMap = AffineMap::get(
/*dimCount=*/0, /*symbolCount=*/symbolIndex, productExpressions,
rewriter.getContext());
Value totalSize =
rewriter.create<affine::AffineMaxOp>(loc, maxMap, offsetValues);

// delta = bufferSize - linearizedOffset
Value vectorSizeOffset =
rewriter.create<arith::ConstantIndexOp>(loc, vectorSize);
Value linearIndex =
getValueOrCreateConstantIndexOp(rewriter, loc, linearizedIndices);
Value delta = rewriter.create<arith::SubIOp>(loc, totalSize, linearIndex);

// 1) check if delta < vectorSize
Value isOutofBounds = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ult, delta, vectorSizeOffset);

// 2) check if (detla_bytes % (32 / elementBitwidth) != 0)
Value deltaBytes = rewriter.create<arith::MulIOp>(
loc, delta,
rewriter.create<arith::ConstantIndexOp>(loc, elementBitWidth / 8));
Value elementsPerWord = rewriter.create<arith::ConstantIndexOp>(
loc, llvm::divideCeil(32, elementBitWidth));
Value isNotWordAligned = rewriter.create<arith::CmpIOp>(
loc, arith::CmpIPredicate::ne,
rewriter.create<arith::RemUIOp>(loc, deltaBytes, elementsPerWord),
rewriter.create<arith::ConstantIndexOp>(loc, 0));

// We take the fallback of transfer_read default lowering only it is both
// out-of-bounds and not word aligned. The fallback ensures correct results
// when loading at the boundary of the buffer since buffer load returns
// inconsistent zeros for the whole word when boundary is crossed.
Value ifCondition =
rewriter.create<arith::AndIOp>(loc, isOutofBounds, isNotWordAligned);

auto thenBuilder = [&](OpBuilder &builder, Location loc) {
Operation *read = builder.clone(*readOp.getOperation());
read->setAttr(kTransferReadNeedsMask, builder.getUnitAttr());
Value readResult = read->getResult(0);
builder.create<scf::YieldOp>(loc, readResult);
};

auto elseBuilder = [&](OpBuilder &builder, Location loc) {
Value res = createVectorLoadForMaskedLoad(
builder, loc, readOp, requiresBroadcasting, unbroadcastedVectorType);
rewriter.create<scf::YieldOp>(loc, res);
};

auto ifOp =
rewriter.create<scf::IfOp>(loc, ifCondition, thenBuilder, elseBuilder);

rewriter.replaceOp(readOp, ifOp);

return success();
}
Expand All @@ -149,6 +278,8 @@ struct AmdgpuTransferReadToLoadPass final
void runOnOperation() override {
RewritePatternSet patterns(&getContext());
populateAmdgpuTransferReadToLoadPatterns(patterns);
walkAndApplyPatterns(getOperation(), std::move(patterns));
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) {
return signalPassFailure();
}
}
};
94 changes: 77 additions & 17 deletions mlir/test/Dialect/AMDGPU/transfer-read-to-load.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,71 @@ func.func @transfer_to_maskedload_fatrawbuffer(%mem : memref<8x8xf32, #amdgpu.ad
%res = vector.transfer_read %mem[%idx, %idx], %cf0, %mask {in_bounds = [true]} : memref<8x8xf32, #amdgpu.address_space<fat_raw_buffer>>, vector<4xf32>
return %res : vector<4xf32>
}
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[SPLAT:.*]] = vector.splat %[[CST]]

// CHECK: %[[FALSE:.*]] = arith.constant false
// CHECK: %[[IF:.*]] = scf.if %[[FALSE]] -> (vector<4xf32>) {
// CHECK: vector.transfer_read %[[ARG0]][%[[ARG1]], %[[ARG1]]]

// CHECK: } else {
// CHECK: %[[LOAD:.*]] = vector.load %arg0[%arg1, %arg1]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[SPLAT]]
// CHECK: return %[[SELECT]] : vector<4xf32>
// CHECK: %[[SELECT:.*]] = arith.select %[[ARG2]], %[[LOAD]]

// CHECK: return %[[IF]] : vector<4xf32>

// -----

// CHECK: #map = affine_map<()[s0, s1] -> (s0 * 8 + s1)>
// CHECK-LABEL: func @transfer_to_maskedload_fatrawbuffer_f16(
// CHECK-SAME: %[[ARG0:.+]]: memref<8x8xf16, #amdgpu.address_space<fat_raw_buffer>>,
// CHECK-SAME: %[[ARG1:.+]]: index, %[[ARG2:.+]]: index,
// CHECK-SAME: %[[ARG3:.+]]: vector<4xi1>)
func.func @transfer_to_maskedload_fatrawbuffer_f16(%mem : memref<8x8xf16, #amdgpu.address_space<fat_raw_buffer>>, %idx0 : index, %idx1 : index, %mask : vector<4xi1>) -> vector<4xf16> {
%cf0 = arith.constant 0.0 : f16
%res = vector.transfer_read %mem[%idx0, %idx1], %cf0, %mask {in_bounds = [true]} : memref<8x8xf16, #amdgpu.address_space<fat_raw_buffer>>, vector<4xf16>
return %res : vector<4xf16>
}
// CHECK-DAG: %[[C0:.*]] = arith.constant 0
// CHECK-DAG: %[[SIZE:.*]] = arith.constant 64
// CHECK-DAG: %[[BYTES:.*]] = arith.constant 2
// CHECK-DAG: %[[VECTORSIZE:.*]] = arith.constant 4

// CHECK: %[[LINEAR:.*]] = affine.apply #map()[%[[ARG1]], %[[ARG2]]]
// CHECK: %[[DELTA:.*]] = arith.subi %[[SIZE]], %[[LINEAR]]
// CHECK: %[[COND1:.*]] = arith.cmpi ult, %[[DELTA]], %[[VECTORSIZE]]

// CHECK: %[[DELTABYTES:.*]] = arith.muli %[[DELTA]], %[[BYTES]]
// CHECK: %[[REM:.*]] = arith.remui %[[DELTABYTES]], %[[BYTES]]
// CHECK: %[[COND2:.*]] = arith.cmpi ne, %[[REM]], %[[C0]]

// CHECK: %[[COND:.*]] = arith.andi %[[COND1]], %[[COND2]]
// CHECK: %[[IF:.*]] = scf.if %[[COND]] -> (vector<4xf16>) {
// CHECK: vector.transfer_read %[[ARG0]][%[[ARG1]], %[[ARG2]]]
// CHECK: } else {
// CHECK: %[[LOAD:.*]] = vector.load %[[ARG0]][%[[ARG1]], %[[ARG2]]]
// CHECK: return %[[IF]] : vector<4xf16>

// -----

// CHECK: #map = affine_map<()[s0, s1, s2] -> (s0 * s1 + s2)>
// CHECK: #map1 = affine_map<()[s0, s1, s2] -> (s0 * s1, s2)>
// CHECK-LABEL: func @transfer_to_maskedload_fatrawbuffer_dynamic_i8(
// CHECK-SAME: %[[ARG0:.*]]: memref<?x?xi8, #amdgpu.address_space<fat_raw_buffer>>
// CHECK-SAME: %[[ARG1:.*]]: index, %[[ARG2:.*]]: index
// CHECK-SAME: %[[ARG3:.*]]: vector<4xi1>
func.func @transfer_to_maskedload_fatrawbuffer_dynamic_i8(%mem : memref<?x?xi8, #amdgpu.address_space<fat_raw_buffer>>, %idx0 : index, %idx1 : index, %mask : vector<4xi1>) -> vector<4xi8> {
%cf0 = arith.constant 0 : i8
%res = vector.transfer_read %mem[%idx0, %idx1], %cf0, %mask {in_bounds = [true]} : memref<?x?xi8, #amdgpu.address_space<fat_raw_buffer>>, vector<4xi8>
return %res : vector<4xi8>
}

// CHECK: %[[CST:.*]] = arith.constant dense<0> : vector<4xi8>
// CHECK: %[[C0:.*]] = arith.constant 0 : index
// CHECK: %[[C4:.*]] = arith.constant 4 : index
// CHECK: %[[BASE:.*]], %[[OFFSET:.*]], %[[SIZES:.*]]:2, %[[STRIDES:.*]]:2 = memref.extract_strided_metadata %[[ARG0]]
// CHECK: %[[LINEAR:.*]] = affine.apply #map()[%[[ARG1]], %[[STRIDES]]#0, %[[ARG2]]]
// CHECK: %[[SIZE:.*]] = affine.max #map1()[%[[STRIDES]]#0, %[[SIZES]]#0, %[[SIZES]]#1]
// CHECK: %[[IF:.*]] = scf.if
// CHECK: return

// -----

Expand All @@ -26,8 +86,8 @@ func.func @transfer_to_maskedload_regular(%mem : memref<8x8xf32>, %idx : index,
%res = vector.transfer_read %mem[%idx, %idx], %cf0, %mask {in_bounds = [true]} : memref<8x8xf32>, vector<4xf32>
return %res : vector<4xf32>
}
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[RES:.*]] = vector.transfer_read %arg0[%arg1, %arg1], %[[CST]], %arg2 {in_bounds = [true]} : memref<8x8xf32>, vector<4xf32>
// CHECK: %[[CST:.*]] = arith.constant 0.000000e+00
// CHECK: %[[RES:.*]] = vector.transfer_read %[[ARG0]][%[[ARG1]], %[[ARG1]]], %[[CST]], %[[ARG2]]
// CHECK: return %[[RES]] : vector<4xf32>

// -----
Expand All @@ -41,8 +101,8 @@ func.func @transfer_to_maskedload_addrspace(%mem : memref<8x8xf32, #gpu.address_
%res = vector.transfer_read %mem[%idx, %idx], %cf0, %mask {in_bounds = [true]} : memref<8x8xf32, #gpu.address_space<workgroup>>, vector<4xf32>
return %res : vector<4xf32>
}
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[RES:.*]] = vector.transfer_read %arg0[%arg1, %arg1], %[[CST]], %arg2 {in_bounds = [true]} : memref<8x8xf32, #gpu.address_space<workgroup>>, vector<4xf32>
// CHECK: %[[CST:.*]] = arith.constant 0.000000e+00
// CHECK: %[[RES:.*]] = vector.transfer_read %[[ARG0]][%[[ARG1]], %[[ARG1]]], %[[CST]], %[[ARG2]]
// CHECK: return %[[RES]] : vector<4xf32>

// -----
Expand All @@ -59,12 +119,12 @@ func.func @transfer_broadcasting(%mem : memref<8x8xf32, #amdgpu.address_space<fa
: memref<8x8xf32, #amdgpu.address_space<fat_raw_buffer>>, vector<4xf32>
return %res : vector<4xf32>
}
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[SPLAT:.*]] = vector.splat %[[CST]]
// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<1xf32>
// CHECK: %[[FALSE:.*]] = arith.constant false
// CHECK: %[[IF:.*]] = scf.if %[[FALSE]] -> (vector<4xf32>) {
// CHECK: %[[LOAD:.*]] = vector.load %arg0[%arg1, %arg1]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[SPLAT]]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[CST]]
// CHECK: %[[BROADCAST:.*]] = vector.broadcast %[[SELECT]] : vector<1xf32> to vector<4xf32>
// CHECK: return %[[BROADCAST]] : vector<4xf32>

// -----

Expand All @@ -79,8 +139,8 @@ func.func @transfer_scalar(%mem : memref<8x8xf32, #amdgpu.address_space<fat_raw_
: memref<8x8xf32, #amdgpu.address_space<fat_raw_buffer>>, vector<1xf32>
return %res : vector<1xf32>
}
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[SPLAT:.*]] = vector.splat %[[CST]]
// CHECK: %[[LOAD:.*]] = vector.load %arg0[%arg1, %arg1]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[SPLAT]]
// CHECK: return %[[SELECT]] : vector<1xf32>
// CHECK: %[[CST:.*]] = arith.constant dense<0.000000e+00> : vector<1xf32>
// CHECK: %[[FALSE:.*]] = arith.constant false
// CHECK: %[[IF:.*]] = scf.if %[[FALSE]] -> (vector<1xf32>) {
// CHECK: %[[LOAD:.*]] = vector.load %[[ARG0]][%[[ARG1]], %[[ARG1]]]
// CHECK: %[[SELECT:.*]] = arith.select %arg2, %[[LOAD]], %[[CST]]
1 change: 1 addition & 0 deletions utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -1568,6 +1568,7 @@ cc_library(
":IR",
":MemRefDialect",
":Pass",
":SCFDialect",
":SideEffectInterfaces",
":Support",
":TransformUtils",
Expand Down
Loading