-
Notifications
You must be signed in to change notification settings - Fork 167
[CIR][ThroughMLIR] Lower simple SwitchOp. #1871
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
#include "mlir/IR/BuiltinOps.h" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This needs a file header comment block. |
||
#include "mlir/IR/IRMapping.h" | ||
#include "mlir/Pass/Pass.h" | ||
#include "mlir/Transforms/DialectConversion.h" | ||
#include "clang/CIR/Dialect/Builder/CIRBaseBuilder.h" | ||
#include "clang/CIR/Dialect/IR/CIRDialect.h" | ||
|
||
using namespace llvm; | ||
using namespace cir; | ||
|
||
namespace cir { | ||
|
||
struct MLIRLoweringPrepare | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This should probably be in the |
||
: public mlir::PassWrapper<MLIRLoweringPrepare, | ||
mlir::OperationPass<mlir::ModuleOp>> { | ||
// `scf.index_switch` requires that switch branches do not fall through. | ||
// We need to copy the next branch's body when the current `cir.case` does not | ||
// terminate with a break. | ||
void removeFallthrough(llvm::SmallVector<CaseOp> &cases); | ||
|
||
void runOnOp(mlir::Operation *op); | ||
void runOnOperation() final; | ||
|
||
StringRef getDescription() const override { | ||
return "Rewrite CIR module to be more 'scf' dialect-friendly"; | ||
} | ||
|
||
StringRef getArgument() const override { return "mlir-lowering-prepare"; } | ||
}; | ||
|
||
// `scf.index_switch` requires that switch branches do not fall through. | ||
// We need to copy the next branch's body when the current `cir.case` does not | ||
// terminate with a break. | ||
void MLIRLoweringPrepare::removeFallthrough(llvm::SmallVector<CaseOp> &cases) { | ||
CIRBaseBuilderTy builder(getContext()); | ||
// Note we enumerate in the reverse order, to facilitate the cloning. | ||
for (auto it = cases.rbegin(); it != cases.rend(); it++) { | ||
auto caseOp = *it; | ||
auto ®ion = caseOp.getRegion(); | ||
auto &lastBlock = region.back(); | ||
mlir::Operation &last = lastBlock.back(); | ||
if (isa<BreakOp>(last)) | ||
continue; | ||
|
||
// The last op must be a `cir.yield`. As it falls through, we copy the | ||
// previous case's body to this one. | ||
if (!isa<YieldOp>(last)) { | ||
caseOp->dump(); | ||
continue; | ||
} | ||
assert(isa<YieldOp>(last)); | ||
|
||
// If there's no previous case, we can simply change the yield into a break. | ||
if (it == cases.rbegin()) { | ||
builder.setInsertionPointAfter(&last); | ||
builder.create<BreakOp>(last.getLoc()); | ||
last.erase(); | ||
continue; | ||
} | ||
|
||
auto prevIt = it; | ||
--prevIt; | ||
CaseOp &prev = *prevIt; | ||
auto &prevRegion = prev.getRegion(); | ||
mlir::IRMapping mapping; | ||
builder.cloneRegionBefore(prevRegion, region, region.end()); | ||
|
||
// We inline the block to the end. | ||
// This is required because `scf.index_switch` expects that each of its | ||
// region contains a single block. | ||
mlir::Block *cloned = lastBlock.getNextNode(); | ||
for (auto it = cloned->begin(); it != cloned->end();) { | ||
auto next = it; | ||
next++; | ||
it->moveBefore(&last); | ||
it = next; | ||
} | ||
cloned->erase(); | ||
last.erase(); | ||
} | ||
} | ||
|
||
void MLIRLoweringPrepare::runOnOp(mlir::Operation *op) { | ||
if (auto switchOp = dyn_cast<SwitchOp>(op)) { | ||
llvm::SmallVector<CaseOp> cases; | ||
if (!switchOp.isSimpleForm(cases)) | ||
llvm_unreachable("NYI"); | ||
|
||
removeFallthrough(cases); | ||
return; | ||
} | ||
llvm_unreachable("unexpected op type"); | ||
} | ||
|
||
void MLIRLoweringPrepare::runOnOperation() { | ||
auto module = getOperation(); | ||
|
||
llvm::SmallVector<mlir::Operation *> opsToTransform; | ||
module->walk([&](mlir::Operation *op) { | ||
if (isa<SwitchOp>(op)) | ||
opsToTransform.push_back(op); | ||
}); | ||
|
||
for (auto *op : opsToTransform) | ||
runOnOp(op); | ||
} | ||
|
||
std::unique_ptr<mlir::Pass> createMLIRLoweringPreparePass() { | ||
return std::make_unique<MLIRLoweringPrepare>(); | ||
} | ||
|
||
} // namespace cir |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,50 @@ | ||
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir -fno-clangir-direct-lowering -emit-mlir=core %s -o %t.mlir | ||
// RUN: FileCheck --input-file=%t.mlir %s | ||
|
||
void fallthrough() { | ||
int i = 0; | ||
switch (i) { | ||
case 2: | ||
i++; | ||
case 3: | ||
i++; | ||
break; | ||
case 8: | ||
i++; | ||
} | ||
|
||
// This should copy the `i++; break` in case 3 to case 2. | ||
|
||
// CHECK: memref.alloca_scope { | ||
// CHECK: %[[I:.+]] = memref.load %alloca[] | ||
// CHECK: %[[CASTED:.+]] = arith.index_cast %[[I]] | ||
// CHECK: scf.index_switch %[[CASTED]] | ||
// CHECK: case 2 { | ||
// CHECK: %[[I:.+]] = memref.load %alloca[] | ||
// CHECK: %[[ONE:.+]] = arith.constant 1 | ||
// CHECK: %[[ADD:.+]] = arith.addi %[[I]], %[[ONE]] | ||
// CHECK: memref.store %[[ADD]], %alloca[] | ||
// CHECK: %[[I:.+]] = memref.load %alloca[] | ||
// CHECK: %[[ONE:.+]] = arith.constant 1 | ||
// CHECK: %[[ADD:.+]] = arith.addi %[[I]], %[[ONE]] | ||
// CHECK: memref.store %[[ADD]], %alloca[] | ||
// CHECK: scf.yield | ||
// CHECK: } | ||
// CHECK: case 3 { | ||
// CHECK: %[[I:.+]] = memref.load %alloca[] | ||
// CHECK: %[[ONE:.+]] = arith.constant 1 | ||
// CHECK: %[[ADD:.+]] = arith.addi %[[I]], %[[ONE]] | ||
// CHECK: memref.store %[[ADD]], %alloca[] | ||
// CHECK: scf.yield | ||
// CHECK: } | ||
// CHECK: case 8 { | ||
// CHECK: %[[I:.+]] = memref.load %alloca[] | ||
// CHECK: %[[ONE:.+]] = arith.constant 1 | ||
// CHECK: %[[ADD:.+]] = arith.addi %[[I]], %[[ONE]] | ||
// CHECK: memref.store %[[ADD]], %alloca[] | ||
// CHECK: scf.yield | ||
// CHECK: } | ||
// CHECK: default { | ||
// CHECK: } | ||
// CHECK: } | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'm a bit skeptical of having an
MLIRLoweringPrepare
pass that is separate from the existingLoweringPrepare
pass. For one thing, "lowering through MLIR" is a very vague concept. The fact that you're mentioningscf
as the target dialect here is good. I'd like to consider what's inLoweringPrepare
to see if things can be reorganized in a more general way so anyone lowering to arbitrary dialects will be able to reason about what passes they need to run to prepare.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I agree.
I'd expect lowering to SCF to be selected at
LoweringPreparePass::runOnOperation()
based using-fno-direct-lowering
. You could still keep the implementation on a distinct file, but following Andy's suggestion of location