Skip to content

Commit 60d5258

Browse files
committed
[mlir][AMDGPU] Add canonicalizer for folding casts into gather_to_lds
1 parent c9cea24 commit 60d5258

File tree

3 files changed

+72
-0
lines changed

3 files changed

+72
-0
lines changed

mlir/include/mlir/Dialect/AMDGPU/IR/AMDGPU.td

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -921,6 +921,7 @@ def AMDGPU_GatherToLDSOp :
921921
$src `[` $srcIndices `]` `,` $dst `[` $dstIndices `]` attr-dict `:` $transferType `,` type($src) `,` type($dst)
922922
}];
923923
let hasVerifier = 1;
924+
let hasCanonicalizer = 1;
924925
}
925926

926927
def AMDGPU_TransposeLoadOp :

mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -510,6 +510,10 @@ LogicalResult DPPOp::verify() {
510510
return success();
511511
}
512512

513+
//===----------------------------------------------------------------------===//
514+
// GatherToLDSOp
515+
//===----------------------------------------------------------------------===//
516+
513517
LogicalResult GatherToLDSOp::verify() {
514518
MemRefType srcType = cast<MemRefType>(getSrc().getType());
515519
MemRefType dstType = cast<MemRefType>(getDst().getType());
@@ -546,6 +550,59 @@ LogicalResult GatherToLDSOp::verify() {
546550
return success();
547551
}
548552

553+
namespace {
554+
/// If the source/target of a CopyOp is a CastOp that does not modify the shape
555+
/// and element type, the cast can be skipped. Such CastOps only cast the layout
556+
/// of the type.
557+
struct FoldGatherToLDSOfCast : public OpRewritePattern<GatherToLDSOp> {
558+
using OpRewritePattern<GatherToLDSOp>::OpRewritePattern;
559+
560+
LogicalResult matchAndRewrite(GatherToLDSOp gatherOp,
561+
PatternRewriter &rewriter) const override {
562+
bool modified = false;
563+
564+
// Check source.
565+
if (auto castOp = gatherOp.getSrc().getDefiningOp<memref::CastOp>()) {
566+
auto fromType = llvm::dyn_cast<MemRefType>(castOp.getSource().getType());
567+
auto toType = llvm::dyn_cast<MemRefType>(castOp.getSource().getType());
568+
569+
if (fromType && toType &&
570+
fromType.getElementType() == toType.getElementType()) {
571+
rewriter.modifyOpInPlace(gatherOp, [&] {
572+
gatherOp.getSrcMutable().assign(castOp.getSource());
573+
});
574+
modified = true;
575+
}
576+
}
577+
578+
// Check target.
579+
if (auto castOp = gatherOp.getDst().getDefiningOp<memref::CastOp>()) {
580+
auto fromType = llvm::dyn_cast<MemRefType>(castOp.getSource().getType());
581+
auto toType = llvm::dyn_cast<MemRefType>(castOp.getSource().getType());
582+
583+
if (fromType && toType &&
584+
fromType.getElementType() == toType.getElementType()) {
585+
rewriter.modifyOpInPlace(gatherOp, [&] {
586+
gatherOp.getDstMutable().assign(castOp.getSource());
587+
});
588+
modified = true;
589+
}
590+
}
591+
592+
return success(modified);
593+
}
594+
};
595+
} // namespace
596+
597+
void GatherToLDSOp::getCanonicalizationPatterns(RewritePatternSet &results,
598+
MLIRContext *context) {
599+
results.add<FoldGatherToLDSOfCast>(context);
600+
}
601+
602+
//===----------------------------------------------------------------------===//
603+
// TransposeLoadOp
604+
//===----------------------------------------------------------------------===//
605+
549606
LogicalResult TransposeLoadOp::verify() {
550607
MemRefType srcType = cast<MemRefType>(getSrc().getType());
551608

mlir/test/Dialect/AMDGPU/canonicalize.mlir

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -130,3 +130,17 @@ func.func @dead_atomic_add(%arg0: memref<4xf32>, %arg1: f32) {
130130
amdgpu.raw_buffer_atomic_fadd {boundsCheck = true} %arg1 -> %arg0[%c4_i32] : f32 -> memref<4xf32>, i32
131131
func.return
132132
}
133+
134+
// -----
135+
136+
// CHECK-LABEL: func @fold_gather_to_lds_of_cast
137+
func.func @fold_gather_to_lds_of_cast(%global: memref<128x72xf32, 1>, %lds: memref<64x64xf32, 3>) {
138+
// CHECK-SAME: %[[GLOBAL:[A-Za-z0-9]+]]: memref<128x72xf32, 1>
139+
%c0 = arith.constant 0 : index
140+
%0 = memref.cast %global : memref<128x72xf32, 1> to memref<?x?xf32, 1>
141+
// CHECK: amdgpu.gather_to_lds %[[GLOBAL]]
142+
// CHECK-SAME: : f32, memref<128x72xf32, 1>
143+
amdgpu.gather_to_lds %0[%c0, %c0], %lds[%c0, %c0]
144+
: f32, memref<?x?xf32, 1>, memref<64x64xf32, 3>
145+
func.return
146+
}

0 commit comments

Comments
 (0)