diff --git a/llvm/lib/Target/EVM/CMakeLists.txt b/llvm/lib/Target/EVM/CMakeLists.txt index 950857965cc9..f3e2ebfa3448 100644 --- a/llvm/lib/Target/EVM/CMakeLists.txt +++ b/llvm/lib/Target/EVM/CMakeLists.txt @@ -24,6 +24,7 @@ add_llvm_target(EVMCodeGen EVMAsmPrinter.cpp EVMBackwardPropagationStackification.cpp EVMCalculateModuleSize.cpp + EVMConstantSpiller.cpp EVMConstantUnfolding.cpp EVMCodegenPrepare.cpp EVMFinalizeStackFrames.cpp diff --git a/llvm/lib/Target/EVM/EVMCalculateModuleSize.cpp b/llvm/lib/Target/EVM/EVMCalculateModuleSize.cpp index b0ee0db5603e..0e135e2af2c3 100644 --- a/llvm/lib/Target/EVM/EVMCalculateModuleSize.cpp +++ b/llvm/lib/Target/EVM/EVMCalculateModuleSize.cpp @@ -89,6 +89,10 @@ static unsigned getInstSize(const MachineInstr &MI, // already exceeds the cap, so the push width is moot. Size += TII->get(EVM::PUSH2_S).getSize() + TII->get(EVM::JUMP_S).getSize(); break; + case EVM::PUSH_FRAME: + // Typical frame index offsets can be encoded in a single byte, but to + // be conservative, let’s assume 2 bytes per offset. + LLVM_FALLTHROUGH; case EVM::PUSH_LABEL: // We emit PUSH4_S here. The linker usually relaxes it to PUSH2_S, // since a 16-bit immediate covers the 24,576-byte EVM runtime code cap diff --git a/llvm/lib/Target/EVM/EVMConstantSpiller.cpp b/llvm/lib/Target/EVM/EVMConstantSpiller.cpp new file mode 100644 index 000000000000..6339d9df424b --- /dev/null +++ b/llvm/lib/Target/EVM/EVMConstantSpiller.cpp @@ -0,0 +1,92 @@ +//===----- EVMConstantSpiller.cpp - Spill constants to memory --*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file identifies CONSTANT_RELOAD instructions representing spilled +// constants throughout the module. It spills constants at the start of the +// entry function and replaces CONSTANT_RELOAD with the corresponding reload +// instructions. +// +//===----------------------------------------------------------------------===// + +#include "EVMConstantSpiller.h" +#include "EVMInstrInfo.h" +#include "EVMSubtarget.h" +#include "MCTargetDesc/EVMMCTargetDesc.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/CodeGen/TargetSubtargetInfo.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; + +#define DEBUG_TYPE "evm-spill-constants" + +constexpr uint64_t SpillSlotSize = 32; + +uint64_t EVMConstantSpiller::getSpillSize() const { + return ConstantToUseCount.size() * SpillSlotSize; +} + +void EVMConstantSpiller::emitSpills(uint64_t SpillOffset, + MachineFunction &EntryMF) { + const EVMInstrInfo *TII = EntryMF.getSubtarget().getInstrInfo(); + + DenseMap ConstantToSpillOffset; + for (const auto &KV : ConstantToUseCount) { + ConstantToSpillOffset[KV.first] = SpillOffset; + SpillOffset += SpillSlotSize; + } + + // Emit constant stores in prologue of the entry function. + // TODO: #925, elaborate analysis to determine the most suitable location + // for performing constant spilling. + MachineBasicBlock &SpillMBB = EntryMF.front(); + for (const auto &[Imm, Offset] : ConstantToSpillOffset) { + LLVM_DEBUG({ + dbgs() << "Spilling constant: " << Imm + << ", number of uses: " << ConstantToUseCount.at(Imm) + << ", at offset: " << Offset << '\n'; + }); + + // Push the constant + TII->insertPush(Imm, SpillMBB, SpillMBB.begin(), DebugLoc()); + // Push the offset + TII->insertPush(APInt(256, Offset), SpillMBB, std::next(SpillMBB.begin()), + DebugLoc()); + BuildMI(SpillMBB, std::next(SpillMBB.begin(), 2), DebugLoc(), + TII->get(EVM::MSTORE_S)); + } + + // Reload spilled constants. + for (MachineInstr *MI : Reloads) { + const APInt Imm = MI->getOperand(0).getCImm()->getValue().zext(256); + uint64_t Offset = ConstantToSpillOffset.at(Imm); + + MachineBasicBlock *MBB = MI->getParent(); + TII->insertPush(APInt(256, Offset), *MBB, MI, MI->getDebugLoc()); + auto Load = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(EVM::MLOAD_S)); + Load->setAsmPrinterFlag(MachineInstr::ReloadReuse); + MI->eraseFromParent(); + } +} + +EVMConstantSpiller::EVMConstantSpiller(SmallVector &MFs) { + for (MachineFunction *MF : MFs) { + for (MachineBasicBlock &MBB : *MF) { + for (MachineInstr &MI : MBB) { + if (MI.getOpcode() != EVM::CONSTANT_RELOAD) + continue; + + const APInt Imm = MI.getOperand(0).getCImm()->getValue().zext(256); + ConstantToUseCount[Imm]++; + Reloads.push_back(&MI); + } + } + } +} diff --git a/llvm/lib/Target/EVM/EVMConstantSpiller.h b/llvm/lib/Target/EVM/EVMConstantSpiller.h new file mode 100644 index 000000000000..c2f5d9490655 --- /dev/null +++ b/llvm/lib/Target/EVM/EVMConstantSpiller.h @@ -0,0 +1,52 @@ +//===----- EVMConstantSpiller.h - Spill constants to memory ----*- C++ -*--===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file identifies CONSTANT_RELOAD instructions representing spilled +// constants throughout the module. It spills constants at the start of the +// entry function and replaces CONSTANT_RELOAD with the corresponding reload +// instructions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_LIB_TARGET_EVM_EVMCONSTANTSPILLER_H +#define LLVM_LIB_TARGET_EVM_EVMCONSTANTSPILLER_H + +// #include "llvm/IR/Module.h" +#include "llvm/ADT/APInt.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" + +namespace llvm { + +class MachineInstr; +class MachineFunction; + +class EVMConstantSpiller { +public: + explicit EVMConstantSpiller(SmallVector &MFs); + + /// Inserts constant spills into the first basic block of the entry + /// function and replaces CONSTANT_RELOAD with the corresponding reload + /// instructions at their use sites. + void emitSpills(uint64_t SpillOffset, MachineFunction &EntryMF); + + /// Return the total size needed for the spill area. + uint64_t getSpillSize() const; + +private: + /// Maps each APInt constant to the number of times it appears across all + /// functions in the module + SmallDenseMap ConstantToUseCount; + + /// CONSTANT_RELOAD instructions that need to be converted into actuall + /// reloads. + SmallVector Reloads; +}; +} // namespace llvm + +#endif // LLVM_LIB_TARGET_EVM_EVMCONSTANTSPILLER_H diff --git a/llvm/lib/Target/EVM/EVMConstantUnfolding.cpp b/llvm/lib/Target/EVM/EVMConstantUnfolding.cpp index 786b1a426bfe..299bfb956853 100644 --- a/llvm/lib/Target/EVM/EVMConstantUnfolding.cpp +++ b/llvm/lib/Target/EVM/EVMConstantUnfolding.cpp @@ -25,16 +25,17 @@ #include "EVM.h" #include "EVMCalculateModuleSize.h" +#include "EVMConstantSpiller.h" #include "EVMInstrInfo.h" #include "EVMSubtarget.h" #include "MCTargetDesc/EVMMCTargetDesc.h" #include "TargetInfo/EVMTargetInfo.h" #include "llvm/ADT/Statistic.h" #include "llvm/CodeGen/MachineDominators.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/IR/Module.h" -#include "llvm/IR/PassInstrumentation.h" #include "llvm/InitializePasses.h" #include "llvm/Support/Debug.h" #include @@ -75,9 +76,67 @@ static cl::opt MetadataSize("evm-metadata-size", cl::Hidden, cl::init(0), cl::desc("EVM metadata size")); +static cl::opt ConstantReloadThreshold( + "evm-constant-reload-threshold", cl::Hidden, cl::init(30), + cl::desc("Minimum number of uses of a constant across the module required " + "before reloading it from memory is considered profitable")); + +static cl::opt EnableConstSpillWithoutUnsafeAsm( + "enable-constant-spilling-without-unsafe-asm", + cl::desc("Also enable constant spilling if there is no unsafe asm in the " + "module"), + cl::init(false), + cl::Hidden // optional: hide from general users if needed +); + namespace { using InstrsPerLoopDepthTy = SmallVector>; +// This helper calculates the frequency of constants across all machine +// functions in the module and determines whether a given constant is +// eligible for spilling. +class SpillHelper { +public: + explicit SpillHelper(SmallVector &MFs, bool HasUnsafeAsm) { + // Perform constant spilling only if the module already requires + // spilling after stackification. This helps reduce compilation time, + // since otherwise we would need to compile roughly twice as many + // contracts. + if (none_of(MFs, + [](const MachineFunction *MF) { + return MF->getFrameInfo().hasStackObjects(); + }) && + (!EnableConstSpillWithoutUnsafeAsm || HasUnsafeAsm)) + return; + + for (MachineFunction *MF : MFs) { + for (MachineBasicBlock &MBB : *MF) { + for (MachineInstr &MI : MBB) { + if (!EVMInstrInfo::isPush(&MI) || (MI.getOpcode() == EVM::PUSH0_S)) + continue; + + const APInt Imm = MI.getOperand(0).getCImm()->getValue().zext(256); + // Perform spilling only for large constants where it can have + // a noticeable impact. + if (Imm.getActiveBits() > 16 * 8) + ImmToUseCount[Imm]++; + } + } + } + } + + bool isSpillEligible(const APInt &Imm) const { + auto It = ImmToUseCount.find(Imm); + if (It == ImmToUseCount.end()) + return false; + + return It->second >= ConstantReloadThreshold; + } + +private: + SmallDenseMap ImmToUseCount; +}; + // Estimates the execution cost of EVM-style stack operations. // Tracks instruction count, gas cost, and unfolded bytecode size. // It abstracts gas accounting for pushes and simple arithmetic/logical @@ -93,6 +152,22 @@ class StackCostModel { Gas += EVMInstrInfo::getGasCost(Desc); } + void accountSpill(const TargetInstrInfo *TII) { + // In most cases, offsets within the spill area can be represented + // using a single byte, but let's pessimistically assume they require + // 2 bytes. + // + // PUSH2 offset + // MLOAD + + InstrCount = 2; + const MCInstrDesc &PushDesc = TII->get(EVM::PUSH2_S); + const MCInstrDesc &LoadDesc = TII->get(EVM::MLOAD_S); + ByteSize = PushDesc.getSize() + LoadDesc.getSize(); + Gas = + EVMInstrInfo::getGasCost(PushDesc) + EVMInstrInfo::getGasCost(LoadDesc); + } + unsigned getInstrCount() const { return InstrCount; } unsigned getByteSize() const { return ByteSize; } unsigned getGas() const { return Gas; } @@ -110,7 +185,7 @@ class StackCostModel { // to track the accumulated cost. class TransformationCandidate { public: - TransformationCandidate(LLVMContext &Context, const TargetInstrInfo *TII) + TransformationCandidate(LLVMContext &Context, const EVMInstrInfo *TII) : Context(Context), TII(TII) {} void addShl() { @@ -131,55 +206,69 @@ class TransformationCandidate { BuildItems.push_back([this](MachineInstr &MI) { insertInstr(MI, Opc); }); } + void addReload() { + CostModel.accountSpill(TII); + IsReload = true; + } + void addImm(const APInt &Val) { unsigned Opc = EVM::getStackOpcode(EVM::getPUSHOpcode(Val)); CostModel.accountInstr(Opc, TII); - BuildItems.push_back([this, Opc, Val = Val](MachineInstr &MI) { - insertImmInstr(MI, Opc, Val); + BuildItems.push_back([this, Val = Val](MachineInstr &MI) { + TII->insertPush(Val, *MI.getParent(), MI, MI.getDebugLoc()); }); } // Applies queued build instruction steps to replace a given instruction. void apply(MachineInstr &MI) const { + if (IsReload) { + auto NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), + TII->get(EVM::CONSTANT_RELOAD)); + + const APInt Imm = MI.getOperand(0).getCImm()->getValue(); + NewMI.addCImm(ConstantInt::get(Context, Imm)); + return; + } + for (const auto &func : BuildItems) func(MI); } const StackCostModel &getCost() const { return CostModel; } + bool isReload() const { return IsReload; } + private: using BuildFunction = std::function; LLVMContext &Context; - const TargetInstrInfo *TII{}; + const EVMInstrInfo *TII{}; StackCostModel CostModel; SmallVector BuildItems; + bool IsReload = false; + void insertInstr(MachineInstr &MI, unsigned Opc) { BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opc)); } - - void insertImmInstr(MachineInstr &MI, unsigned Opc, const APInt &Val) { - auto NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opc)); - if (Opc != EVM::PUSH0_S) - NewMI.addCImm(ConstantInt::get(Context, Val)); - } }; // Discovers, applies, and caches optimal constant unfolding // transformations. class ConstantUnfolder { public: - explicit ConstantUnfolder(LLVMContext *Context) : Context(Context) {} + explicit ConstantUnfolder(LLVMContext *Context, SpillHelper &SpillHelper) + : Context(Context), SpillHelper(SpillHelper) {} unsigned getCodeSizeReduction() const { return OverallCodeReductionSize; } bool tryToUnfoldConstant(MachineInstr &MI, bool OptForSize, - const TargetInstrInfo *TII); + const EVMInstrInfo *TII); private: LLVMContext *Context{}; + SpillHelper &SpillHelper; // The 'second' field can be set to 0 or 1, indicating whether to // optimize for performance or size. @@ -191,7 +280,7 @@ class ConstantUnfolder { const TransformationCandidate * findOptimalTransformation(const APInt &Imm, bool OptForSize, - const TargetInstrInfo *TII); + const EVMInstrInfo *TII); void reduceCodeSizeOn(unsigned Size) { OverallCodeReductionSize += Size; @@ -241,11 +330,11 @@ static bool isBetterCandidate(const TransformationCandidate &A, unsigned ScoreB = 0; if (OptForSize) { - ScoreA = (CostA.getByteSize() * Weight) + CostA.getGas(); - ScoreB = (CostB.getByteSize() * Weight) + CostB.getGas(); + ScoreA = (CostA.getByteSize() * Weight) + CostA.getGas() + A.isReload(); + ScoreB = (CostB.getByteSize() * Weight) + CostB.getGas() + B.isReload(); } else { - ScoreA = CostA.getByteSize() + (Weight * CostA.getGas()); - ScoreB = CostB.getByteSize() + (Weight * CostB.getGas()); + ScoreA = CostA.getByteSize() + (Weight * CostA.getGas()) + A.isReload(); + ScoreB = CostB.getByteSize() + (Weight * CostB.getGas()) + B.isReload(); } if (ScoreA != ScoreB) return ScoreA < ScoreB; @@ -255,7 +344,7 @@ static bool isBetterCandidate(const TransformationCandidate &A, const TransformationCandidate * ConstantUnfolder::findOptimalTransformation(const APInt &Imm, bool OptForSize, - const TargetInstrInfo *TII) { + const EVMInstrInfo *TII) { if (auto It = TransformationCache.find({Imm, OptForSize}); It != TransformationCache.end()) { LLVM_DEBUG( @@ -432,6 +521,20 @@ ConstantUnfolder::findOptimalTransformation(const APInt &Imm, bool OptForSize, Transformations.emplace_back(std::move(Tr)); } + // 7. Checks whether spilling the constant is appropriate. + // + // Cost: + // PUSH1 frame_off // 2 + // MLOAD // 1 + // + // Typically one byte is enough to encode the offset. + // + if (SpillHelper.isSpillEligible(Imm)) { + auto Tr = std::make_unique(*Context, TII); + Tr->addReload(); + Transformations.emplace_back(std::move(Tr)); + } + // Find optimal transformation. auto *OptIt = std::min_element(Transformations.begin(), Transformations.end(), [OptForSize](const auto &A, const auto &B) { @@ -445,7 +548,10 @@ ConstantUnfolder::findOptimalTransformation(const APInt &Imm, bool OptForSize, LLVM_DEBUG({ dbgs() << " [size: " << Cost.getByteSize() << ", instr count: " << Cost.getInstrCount() - << ", gas: " << Cost.getGas() << "]\n"; + << ", gas: " << Cost.getGas(); + if (Tr->isReload()) + dbgs() << ", IsRelaod"; + dbgs() << "]\n"; }); } #endif // NDEBUG @@ -476,7 +582,7 @@ static bool isProfitableToTranform(const APInt &Imm, const StackCostModel &Cost, } bool ConstantUnfolder::tryToUnfoldConstant(MachineInstr &MI, bool OptForSize, - const TargetInstrInfo *TII) { + const EVMInstrInfo *TII) { const APInt Imm = MI.getOperand(0).getCImm()->getValue().zext(256); unsigned OrigSize = (alignTo(Imm.getActiveBits(), 8) / 8) + 1; assert(Imm.getActiveBits() > 4 * 8); @@ -503,7 +609,10 @@ bool ConstantUnfolder::tryToUnfoldConstant(MachineInstr &MI, bool OptForSize, dbgs() << " Optimal transformation:\n" << " [size: " << OptCost.getByteSize() << ", instr count: " << OptCost.getInstrCount() - << ", gas: " << OptCost.getGas() << "]\n"; + << ", gas: " << OptCost.getGas(); + if (OptTransformation->isReload()) + dbgs() << ", IsRelaod"; + dbgs() << "]\n"; }); if (OptCost.getInstrCount() == 1) { @@ -609,7 +718,7 @@ class LoopDepthInstrCache { static bool processInstructions(ConstantUnfolder &Unfolder, const SmallVector &Instrs, DenseSet &Visited, - bool OptForSize, const TargetInstrInfo *TII) { + bool OptForSize, const EVMInstrInfo *TII) { bool Changed = false; for (MachineInstr *MI : Instrs) { if (Visited.count(MI)) @@ -628,7 +737,15 @@ static bool processInstructions(ConstantUnfolder &Unfolder, static bool runImpl(Module &M, MachineModuleInfo &MMI) { bool Changed = false; - ConstantUnfolder Unfolder(&M.getContext()); + + SmallVector MFs; + for_each(M.getFunctionList(), [&MFs, &MMI](Function &F) { + if (MachineFunction *MF = MMI.getMachineFunction(F)) + MFs.push_back(MF); + }); + + SpillHelper SpillHelper(MFs, M.getNamedMetadata("llvm.evm.hasunsafeasm")); + ConstantUnfolder Unfolder(&M.getContext(), SpillHelper); // Metadata size is included into the bytecode size. const unsigned ModuleCodeSize = @@ -638,11 +755,7 @@ static bool runImpl(Module &M, MachineModuleInfo &MMI) { DenseMap> InstrCacheMap; - for (Function &F : M) { - MachineFunction *MF = MMI.getMachineFunction(F); - if (!MF) - continue; - + for (MachineFunction *MF : MFs) { // Compute MachineLoopInfo on the fly, as it's not available on the // Module pass level. auto OwnedMDT = std::make_unique(); diff --git a/llvm/lib/Target/EVM/EVMFinalizeStackFrames.cpp b/llvm/lib/Target/EVM/EVMFinalizeStackFrames.cpp index 0ece97c31870..1864a65e9fee 100644 --- a/llvm/lib/Target/EVM/EVMFinalizeStackFrames.cpp +++ b/llvm/lib/Target/EVM/EVMFinalizeStackFrames.cpp @@ -12,6 +12,9 @@ //===----------------------------------------------------------------------===// #include "EVM.h" +#include "EVMConstantSpiller.h" +#include "EVMInstrInfo.h" +#include "EVMSubtarget.h" #include "MCTargetDesc/EVMMCTargetDesc.h" #include "TargetInfo/EVMTargetInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h" @@ -105,7 +108,7 @@ void EVMFinalizeStackFrames::replaceFrameIndices( assert(MFI.hasStackObjects() && "Cannot replace frame indices without stack objects"); - const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); + const EVMInstrInfo *TII = MF.getSubtarget().getInstrInfo(); for (MachineBasicBlock &MBB : MF) { for (MachineInstr &MI : make_early_inc_range(MBB)) { if (MI.getOpcode() != EVM::PUSH_FRAME) @@ -118,12 +121,7 @@ void EVMFinalizeStackFrames::replaceFrameIndices( // Replace the frame index with the corresponding stack offset. APInt Offset(256, StackRegionStart + MFI.getObjectOffset(FIOp.getIndex())); - unsigned PushOpc = EVM::getPUSHOpcode(Offset); - auto NewMI = BuildMI(MBB, MI, MI.getDebugLoc(), - TII->get(EVM::getStackOpcode(PushOpc))); - if (PushOpc != EVM::PUSH0) - NewMI.addCImm(ConstantInt::get(MF.getFunction().getContext(), Offset)); - + TII->insertPush(Offset, MBB, MI, MI.getDebugLoc()); MI.eraseFromParent(); } } @@ -147,12 +145,31 @@ bool EVMFinalizeStackFrames::runOnModule(Module &M) { MachineModuleInfo &MMI = getAnalysis().getMMI(); SmallVector, 8> ToReplaceFI; - // Calculate the stack size for each function. - for (Function &F : M) { - MachineFunction *MF = MMI.getMachineFunction(F); - if (!MF) - continue; + SmallVector MFs; + for (Function &F : M.getFunctionList()) { + if (MachineFunction *MF = MMI.getMachineFunction(F)) + MFs.push_back(MF); + } + if (MFs.empty()) + return false; + + // For spilled constants, we allocate stack slots and perform the spilling + // in the first machine function of the module layout, even though reloads + // may occur in any function. In this case, we cannot use PUSH_FRAME. + // Instead, we compute the actual offset in the heap corresponding to the + // start of the constant spill area. + EVMConstantSpiller ConstantSpiller(MFs); + if (ConstantSpiller.getSpillSize()) { + MachineFunction *MF = MFs.front(); + uint64_t StackSize = calculateFrameObjectOffsets(*MF); + MF->getFrameInfo().CreateSpillStackObject(ConstantSpiller.getSpillSize(), + Align(32)); + ConstantSpiller.emitSpills(StackRegionOffset + StackSize, *MF); + } + + // Calculate the stack size for each function. + for (MachineFunction *MF : MFs) { uint64_t StackSize = calculateFrameObjectOffsets(*MF); if (StackSize == 0) continue; diff --git a/llvm/lib/Target/EVM/EVMInstrInfo.cpp b/llvm/lib/Target/EVM/EVMInstrInfo.cpp index 2144c2102871..7f1155abb2bd 100644 --- a/llvm/lib/Target/EVM/EVMInstrInfo.cpp +++ b/llvm/lib/Target/EVM/EVMInstrInfo.cpp @@ -13,6 +13,9 @@ #include "EVMInstrInfo.h" #include "EVMMachineFunctionInfo.h" #include "MCTargetDesc/EVMMCTargetDesc.h" +#include "TargetInfo/EVMTargetInfo.h" +#include "llvm/IR/LLVMContext.h" + using namespace llvm; #define DEBUG_TYPE "evm-instr-info" @@ -292,3 +295,13 @@ bool EVMInstrInfo::reverseBranchCondition( Cond.front() = MachineOperand::CreateImm(!Cond.front().getImm()); return false; } + +MachineInstr *EVMInstrInfo::insertPush(const APInt &Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + const DebugLoc &DL) const { + unsigned Opc = EVM::getPUSHOpcode(Imm); + auto MI = BuildMI(MBB, I, DL, get(EVM::getStackOpcode(Opc))); + if (Opc != EVM::PUSH0) + MI.addCImm(ConstantInt::get(MI->getMF()->getFunction().getContext(), Imm)); + return MI; +} diff --git a/llvm/lib/Target/EVM/EVMInstrInfo.h b/llvm/lib/Target/EVM/EVMInstrInfo.h index 5b0699529bfa..06515a232c04 100644 --- a/llvm/lib/Target/EVM/EVMInstrInfo.h +++ b/llvm/lib/Target/EVM/EVMInstrInfo.h @@ -86,6 +86,13 @@ class EVMInstrInfo final : public EVMGenInstrInfo { bool reverseBranchCondition(SmallVectorImpl &Cond) const override; + /// EVMInstrInfo specific methods. + + /// Inserts a PUSH instruction appropriate for the bit width of the Imm. + MachineInstr *insertPush(const APInt &Imm, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, + const DebugLoc &DL) const; + /// TSFlags extraction static uint64_t getTSFlag(const MCInstrDesc &Desc, unsigned Pos, uint64_t Mask) { diff --git a/llvm/lib/Target/EVM/EVMInstrInfo.td b/llvm/lib/Target/EVM/EVMInstrInfo.td index a9a2b8701500..824a2a8ad1aa 100644 --- a/llvm/lib/Target/EVM/EVMInstrInfo.td +++ b/llvm/lib/Target/EVM/EVMInstrInfo.td @@ -857,6 +857,11 @@ def PUSH_FRAME : NI<(outs), (ins i256imm:$imm), [], true, "", 0, 0> { let isNotDuplicable = 1; } +def CONSTANT_RELOAD : NI<(outs), (ins i256imm:$imm), [], true, "", 0, 0> { + let isCodeGenOnly = 1; + let isNotDuplicable = 1; +} + // Define register PUSH* instructions let IsPush = 1 in diff --git a/llvm/lib/Target/EVM/EVMStackifyCodeEmitter.cpp b/llvm/lib/Target/EVM/EVMStackifyCodeEmitter.cpp index 5fdb3e3b11c7..ca8987ecf213 100644 --- a/llvm/lib/Target/EVM/EVMStackifyCodeEmitter.cpp +++ b/llvm/lib/Target/EVM/EVMStackifyCodeEmitter.cpp @@ -103,11 +103,7 @@ void EVMStackifyCodeEmitter::CodeEmitter::emitPOP() { void EVMStackifyCodeEmitter::CodeEmitter::emitConstant(const APInt &Val) { StackHeight += 1; - unsigned Opc = EVM::getPUSHOpcode(Val); - auto NewMI = BuildMI(*CurMBB, CurMBB->end(), DebugLoc(), - TII->get(EVM::getStackOpcode(Opc))); - if (Opc != EVM::PUSH0) - NewMI.addCImm(ConstantInt::get(MF.getFunction().getContext(), Val)); + auto *NewMI = TII->insertPush(Val, *CurMBB, CurMBB->end(), DebugLoc()); verify(NewMI); } diff --git a/llvm/lib/Target/EVM/EVMTargetMachine.cpp b/llvm/lib/Target/EVM/EVMTargetMachine.cpp index 47ebfa4a7d2d..dba135db007b 100644 --- a/llvm/lib/Target/EVM/EVMTargetMachine.cpp +++ b/llvm/lib/Target/EVM/EVMTargetMachine.cpp @@ -299,8 +299,6 @@ void EVMPassConfig::addPreEmitPass() { addPass(createEVMOptimizeLiveIntervals()); addPass(createEVMSingleUseExpression()); addPass(createEVMBPStackification()); - addPass(&StackSlotColoringID); - addPass(createEVMFinalizeStackFrames()); // Optimize branch instructions after stackification. This is done again // here, since EVMSplitCriticalEdges may introduce new BBs that could @@ -314,9 +312,12 @@ void EVMPassConfig::addPreEmitPass() { void EVMPassConfig::addPreEmitPass2() { addPass(createEVMLowerJumpUnless()); - addPass(createEVMConstantUnfolding()); - if (getOptLevel() != CodeGenOptLevel::None) + addPass(&StackSlotColoringID); + if (getOptLevel() != CodeGenOptLevel::None) { + addPass(createEVMConstantUnfolding()); addPass(createEVMPeepholePass()); + } + addPass(createEVMFinalizeStackFrames()); } TargetPassConfig *EVMTargetMachine::createPassConfig(PassManagerBase &PM) { diff --git a/llvm/test/CodeGen/EVM/O0-pipeline.ll b/llvm/test/CodeGen/EVM/O0-pipeline.ll index 02d77d8eeabf..7a5c956f5b83 100644 --- a/llvm/test/CodeGen/EVM/O0-pipeline.ll +++ b/llvm/test/CodeGen/EVM/O0-pipeline.ll @@ -64,15 +64,18 @@ target triple = "evm" ; CHECK-NEXT: Live Stack Slot Analysis ; CHECK-NEXT: Machine Block Frequency Analysis ; CHECK-NEXT: EVM backward propagation stackification -; CHECK-NEXT: Stack Slot Coloring -; CHECK-NEXT: EVM finalize stack frames -; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Machine Sanitizer Binary Metadata ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter ; CHECK-NEXT: Stack Frame Layout Analysis ; CHECK-NEXT: EVM Lower jump_unless -; CHECK-NEXT: EVM constant unfolding +; CHECK-NEXT: Slot index numbering +; CHECK-NEXT: Live Stack Slot Analysis +; CHECK-NEXT: MachineDominator Tree Construction +; CHECK-NEXT: Machine Natural Loop Construction +; CHECK-NEXT: Machine Block Frequency Analysis +; CHECK-NEXT: Stack Slot Coloring +; CHECK-NEXT: EVM finalize stack frames ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter diff --git a/llvm/test/CodeGen/EVM/O3-pipeline.ll b/llvm/test/CodeGen/EVM/O3-pipeline.ll index 78f378a181d5..2735bb37a48f 100644 --- a/llvm/test/CodeGen/EVM/O3-pipeline.ll +++ b/llvm/test/CodeGen/EVM/O3-pipeline.ll @@ -153,12 +153,6 @@ target triple = "evm" ; CHECK-NEXT: Live Stack Slot Analysis ; CHECK-NEXT: Machine Block Frequency Analysis ; CHECK-NEXT: EVM backward propagation stackification -; CHECK-NEXT: Stack Slot Coloring -; CHECK-NEXT: EVM finalize stack frames -; CHECK-NEXT: FunctionPass Manager -; CHECK-NEXT: MachineDominator Tree Construction -; CHECK-NEXT: Machine Natural Loop Construction -; CHECK-NEXT: Machine Block Frequency Analysis ; CHECK-NEXT: Control Flow Optimizer ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Tail Duplication @@ -167,9 +161,17 @@ target triple = "evm" ; CHECK-NEXT: Machine Optimization Remark Emitter ; CHECK-NEXT: Stack Frame Layout Analysis ; CHECK-NEXT: EVM Lower jump_unless +; CHECK-NEXT: Slot index numbering +; CHECK-NEXT: Live Stack Slot Analysis +; CHECK-NEXT: MachineDominator Tree Construction +; CHECK-NEXT: Machine Natural Loop Construction +; CHECK-NEXT: Machine Block Frequency Analysis +; CHECK-NEXT: Stack Slot Coloring ; CHECK-NEXT: EVM constant unfolding ; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: EVM Peephole +; CHECK-NEXT: EVM finalize stack frames +; CHECK-NEXT: FunctionPass Manager ; CHECK-NEXT: Lazy Machine Block Frequency Analysis ; CHECK-NEXT: Machine Optimization Remark Emitter ; CHECK-NEXT: EVM Assembly diff --git a/llvm/test/CodeGen/EVM/branch-folding-clear-condition.mir b/llvm/test/CodeGen/EVM/branch-folding-clear-condition.mir index b2759e026498..4d6a3c30b29e 100644 --- a/llvm/test/CodeGen/EVM/branch-folding-clear-condition.mir +++ b/llvm/test/CodeGen/EVM/branch-folding-clear-condition.mir @@ -1,5 +1,5 @@ # RUN: llc -x mir -run-pass=branch-folder < %s | FileCheck %s -# RUN: llc -x mir -start-after=evm-finalize-stack-frames < %s | FileCheck --check-prefix=ASM %s +# RUN: llc -x mir -start-after=evm-backward-propagation-stackification < %s | FileCheck --check-prefix=ASM %s # Test that after branch folding, the condition at the top of # the stack is correctly cleared when a conditional branch is removed. diff --git a/llvm/test/CodeGen/EVM/constant-spilling.ll b/llvm/test/CodeGen/EVM/constant-spilling.ll new file mode 100644 index 000000000000..a000f4742fc6 --- /dev/null +++ b/llvm/test/CodeGen/EVM/constant-spilling.ll @@ -0,0 +1,77 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -O3 --evm-stack-region-size=32 --evm-stack-region-offset=128 --evm-constant-reload-threshold=2 --enable-constant-spilling-without-unsafe-asm < %s | FileCheck %s +target datalayout = "E-p:256:256-i256:256:256-S256-a:256:256" +target triple = "evm-unknown-unknown" + +declare void @llvm.evm.return(ptr addrspace(1), i256) noreturn + +define void @entry() #1 { +; CHECK-LABEL: entry: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: PUSH32 0x4E487B7100000000000000000000000000000000000000000000000000000000 +; CHECK-NEXT: PUSH1 0x80 +; CHECK-NEXT: MSTORE +; CHECK-NEXT: PUSH1 0x80 +; CHECK-NEXT: MLOAD ; Reload Reuse +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: RETURN +entry: + tail call void @llvm.evm.return(ptr addrspace(1) null, i256 35408467139433450592217433187231851964531694900788300625387963629091585785856) + unreachable +} + +define void @test_spill() #2 { +; CHECK-LABEL: test_spill: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: JUMPDEST +; CHECK-NEXT: PUSH1 0x80 +; CHECK-NEXT: MLOAD ; Reload Reuse +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: RETURN +entry: + tail call void @llvm.evm.return(ptr addrspace(1) null, i256 35408467139433450592217433187231851964531694900788300625387963629091585785856) + unreachable +} + +; Check that -1 constant is not spilled. +define void @test_not_spiller() #2 { +; CHECK-LABEL: test_not_spiller: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: JUMPDEST +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: NOT +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: RETURN +entry: + tail call void @llvm.evm.return(ptr addrspace(1) null, i256 -1) + unreachable +} + +; Check that small constants are not spilled. +define void @test_not_spilled2() #2 { +; CHECK-LABEL: test_not_spilled2: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: JUMPDEST +; CHECK-NEXT: PUSH5 0x100000000 +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: RETURN +entry: + tail call void @llvm.evm.return(ptr addrspace(1) null, i256 4294967296) + unreachable +} + +; Check that small constants are not spilled. +define void @test_not_spilled3() #2 { +; CHECK-LABEL: test_not_spilled3: +; CHECK: ; %bb.0: ; %entry +; CHECK-NEXT: JUMPDEST +; CHECK-NEXT: PUSH5 0x100000000 +; CHECK-NEXT: PUSH0 +; CHECK-NEXT: RETURN +entry: + tail call void @llvm.evm.return(ptr addrspace(1) null, i256 4294967296) + unreachable +} + +attributes #1 = { nofree noreturn nounwind "evm-entry-function" } +attributes #2 = { nofree nounwind noreturn }