diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 7ad5a60688..63353dc9c3 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,8 +9,6 @@ jobs: coverage: name: Hardhat / Unit Tests runs-on: ubuntu-latest - env: - NODE_OPTIONS: --max_old_space_size=6400 permissions: contents: write @@ -24,7 +22,8 @@ jobs: uses: ./.github/workflows/setup - name: Collect coverage - run: yarn test:coverage + run: NODE_OPTIONS="--max-old-space-size=10240" yarn test:coverage + timeout-minutes: 30 - name: Produce the coverage report uses: lidofinance/coverage-action@a94351baa279790f736655b1891178b1515594ea diff --git a/.github/workflows/tests-integration-hoodi.yml b/.github/workflows/tests-integration-hoodi.yml index 2615d471f6..2b2ad9bf7b 100644 --- a/.github/workflows/tests-integration-hoodi.yml +++ b/.github/workflows/tests-integration-hoodi.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 env: - NODE_OPTIONS: --max_old_space_size=7200 + NODE_OPTIONS: --max_old_space_size=10240 SKIP_GAS_REPORT: true SKIP_CONTRACT_SIZE: true SKIP_INTERFACES_CHECK: true diff --git a/.github/workflows/tests-integration-mainnet.yml b/.github/workflows/tests-integration-mainnet.yml index 836ce0b1a6..b6432fb802 100644 --- a/.github/workflows/tests-integration-mainnet.yml +++ b/.github/workflows/tests-integration-mainnet.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 env: - NODE_OPTIONS: --max_old_space_size=7200 + NODE_OPTIONS: --max_old_space_size=10240 SKIP_GAS_REPORT: true SKIP_CONTRACT_SIZE: true SKIP_INTERFACES_CHECK: true diff --git a/.github/workflows/tests-unit.yml b/.github/workflows/tests-unit.yml index 471024d4e5..10bd66dc8e 100644 --- a/.github/workflows/tests-unit.yml +++ b/.github/workflows/tests-unit.yml @@ -14,7 +14,7 @@ jobs: uses: ./.github/workflows/setup - name: Run unit tests - run: yarn test + run: NODE_OPTIONS="--max-old-space-size=10240" yarn test test_foundry_fuzzing: name: Foundry / Fuzzing & Invariants diff --git a/contracts/0.4.24/Lido.sol b/contracts/0.4.24/Lido.sol index 8fff6b8891..6613194e9e 100644 --- a/contracts/0.4.24/Lido.sol +++ b/contracts/0.4.24/Lido.sol @@ -11,37 +11,28 @@ import {ILidoLocator} from "../common/interfaces/ILidoLocator.sol"; import {StETHPermit} from "./StETHPermit.sol"; import {Versioned} from "./utils/Versioned.sol"; - -import {Math256} from "../common/lib/Math256.sol"; import {StakeLimitUtils, StakeLimitUnstructuredStorage, StakeLimitState} from "./lib/StakeLimitUtils.sol"; import {UnstructuredStorageExt} from "./utils/UnstructuredStorageExt.sol"; - -interface IBurnerMigration { - function migrate(address _oldBurner) external; -} +import {Math256} from "../common/lib/Math256.sol"; interface IStakingRouter { - function deposit(uint256 _depositsCount, uint256 _stakingModuleId, bytes _depositCalldata) external payable; - - function getStakingModuleMaxDepositsCount( - uint256 _stakingModuleId, - uint256 _maxDepositsValue - ) external view returns (uint256); - function getTotalFeeE4Precision() external view returns (uint16 totalFee); function TOTAL_BASIS_POINTS() external view returns (uint256); function getWithdrawalCredentials() external view returns (bytes32); - function getStakingFeeAggregateDistributionE4Precision() external view returns (uint16 modulesFee, uint16 treasuryFee); + function getStakingFeeAggregateDistributionE4Precision() + external + view + returns (uint16 modulesFee, uint16 treasuryFee); + + function receiveDepositableEther() external payable; } interface IWithdrawalQueue { function unfinalizedStETH() external view returns (uint256); - function isBunkerModeActive() external view returns (bool); - function finalize(uint256 _lastIdToFinalize, uint256 _maxShareRate) external payable; } @@ -53,6 +44,26 @@ interface IWithdrawalVault { function withdrawWithdrawals(uint256 _amount) external; } +interface IAccountingOracle { + /// @dev returns a tuple instead of a structure to avoid allocating memory + function getProcessingState() + external + view + returns ( + uint256 currentFrameRefSlot, + uint256 processingDeadlineTime, + bytes32 mainDataHash, + bool mainDataSubmitted, + bytes32 extraDataHash, + uint256 extraDataFormat, + bool extraDataSubmitted, + uint256 extraDataItemsCount, + uint256 extraDataItemsSubmitted + ); + function getLastProcessingRefSlot() external view returns (uint256); + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp); +} + /** * @title Liquid staking pool implementation * @@ -87,8 +98,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { bytes32 public constant RESUME_ROLE = 0x2fc10cc8ae19568712f7a176fb4978616a610650813c9d05326c34abb62749c7; // keccak256("RESUME_ROLE"); bytes32 public constant STAKING_PAUSE_ROLE = 0x84ea57490227bc2be925c684e2a367071d69890b629590198f4125a018eb1de8; // keccak256("STAKING_PAUSE_ROLE") bytes32 public constant STAKING_CONTROL_ROLE = 0xa42eee1333c0758ba72be38e728b6dadb32ea767de5b4ddbaea1dae85b1b051f; // keccak256("STAKING_CONTROL_ROLE") - bytes32 public constant UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE = - 0xe6dc5d79630c61871e99d341ad72c5a052bed2fc8c79e5a4480a7cd31117576c; // keccak256("UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE") + bytes32 public constant BUFFER_RESERVE_MANAGER_ROLE = + 0x33969636f1fbf3d7d062d4de4a08e7bd3c46606ec28b3a4398d2665be559b921; // keccak256("BUFFER_RESERVE_MANAGER_ROLE") uint256 private constant DEPOSIT_SIZE = 32 ether; @@ -99,8 +110,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { /// |----- 128 bit -----|------ 128 bit -------| /// | external shares | total shares | /// keccak256("lido.StETH.totalAndExternalShares") - bytes32 internal constant TOTAL_AND_EXTERNAL_SHARES_POSITION = - TOTAL_SHARES_POSITION_LOW128; + bytes32 internal constant TOTAL_AND_EXTERNAL_SHARES_POSITION = TOTAL_SHARES_POSITION_LOW128; + /// @dev storage slot position for the Lido protocol contracts locator /// Since version 3, high 96 bits are used for the max external ratio BP /// |----- 96 bit -----|------ 160 bit -------| @@ -108,30 +119,63 @@ contract Lido is Versioned, StETHPermit, AragonApp { /// keccak256("lido.Lido.lidoLocatorAndMaxExternalRatio") bytes32 internal constant LOCATOR_AND_MAX_EXTERNAL_RATIO_POSITION = 0xd92bc31601d11a10411d08f59b7146d8a5915af253cde25f8e66b67beb4be223; + /// @dev amount of ether (on the current Ethereum side) buffered on this smart contract balance - /// Since version 3, high 128 bits are used for the deposited validators count - /// |------ 128 bit -------|------ 128 bit -------| - /// | deposited validators | buffered ether | - /// keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); - bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = - 0xa84c096ee27e195f25d7b6c7c2a03229e49f1a2a5087e57ce7d7127707942fe3; - /// @dev total amount of ether on Consensus Layer (sum of all the balances of Lido validators) - // "beacon" in the `keccak256()` parameter is staying here for compatibility reason - /// Since version 3, high 128 bits are used for the CL validators count - /// |----- 128 bit -----|------ 128 bit -------| - /// | CL validators | CL balance | - /// keccak256("lido.Lido.clBalanceAndClValidators"); - bytes32 internal constant CL_BALANCE_AND_CL_VALIDATORS_POSITION = - 0xc36804a03ec742b57b141e4e5d8d3bd1ddb08451fd0f9983af8aaab357a78e2f; + /// and amount of ether deposited since last report + /// depositedPostReport lifecycle: + /// 1) increased by `withdrawDepositableEther()` as CL deposits are performed; + /// 2) resets on report processing via `processClStateUpdate()` + /// |------ 128 bit --------|----- 128 bit ------| + /// | deposited post report | buffered ether | + /// keccak256("lido.Lido.bufferedEtherAndDepositedPostReport"); + bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION = + 0x81a11fa1111afa59b50051f60ccf604a39d96acb484dc467ad8eadb4a63f0a5f; + + /// @dev an internal counter accumulates the ETH deposited after the reporting period/frame changes + /// and unique identifier for the last deposit's frame (in this case, it's current refSlot) + /// keccak256("lido.Lido.depositedNextReportAndLastDepositNonce") + bytes32 internal constant DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION = + 0x8d3ed945c7718edcdb639b1235f2bbe3fa81f4a6cec7a436d8ea13fbc502d957; + + /// @dev CL validators balance and deposited balance since last report + /// |----- 128 bit ------------|------ 128 bit -------| + /// | CL validators balance | CL pending balance | + /// keccak256("lido.Lido.clValidatorsBalanceAndClPendingBalance"); + bytes32 internal constant CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION = + 0x096e465397f38e659238ccd5d5a2c434ced54a63fd8d694045bfb058ab9d8112; + + /// @dev number of initial seed deposits (incrementing counter), ex. deposited validators + /// keccak256("lido.Lido.seedDepositsCount"); + bytes32 internal constant SEED_DEPOSITS_COUNT_POSITION = + 0x3f0eaa2c0f16ff9775c078f3df30470d8c042317b24ad1defa240b1c3e10b238; + /// @dev storage slot position of the staking rate limit structure /// keccak256("lido.Lido.stakeLimit"); bytes32 internal constant STAKING_STATE_POSITION = 0xa3678de4a579be090bed1177e0a24f77cc29d181ac22fd7688aca344d8938015; + /// @dev storage slot position for the total amount of execution layer rewards received by Lido contract. /// keccak256("lido.Lido.totalELRewardsCollected"); bytes32 internal constant TOTAL_EL_REWARDS_COLLECTED_POSITION = 0xafe016039542d12eec0183bb0b1ffc2ca45b027126a494672fba4154ee77facb; + /// @dev Storage slot for deposits reserve. + /// Holds buffered ether that remains depositable even when withdrawals demand exists. + /// Lifecycle: + /// 1) can be decreased by `setDepositsReserveTarget()` when target is lowered; + /// 2) consumed by `withdrawDepositableEther()` as CL deposits are performed; + /// 3) synced to target on report processing via `_updateBufferedEtherAllocation()` + /// keccak256("lido.Lido.depositsReserve") + bytes32 internal constant DEPOSITS_RESERVE_POSITION = + 0xda4fbe3b9cbd98dfae5dff538bbff4ba61f38979d4d7419bcd006f3e6250ec13; + + /// @dev Storage slot for deposits reserve target. + /// Stores governance-configured value that deposits reserve is restored to on each oracle report. + /// Set via `setDepositsReserveTarget()`, gated by `BUFFER_RESERVE_MANAGER_ROLE` + /// keccak256("lido.Lido.depositsReserveTarget") + bytes32 internal constant DEPOSITS_RESERVE_TARGET_POSITION = + 0x3d3e9bd6e90e5d1f1c6839835bcbe5746a47c9a013d1eae6e80c248264c06a81; + // Staking was paused (don't accept user's ether submits) event StakingPaused(); // Staking was resumed (accept user's ether submits) @@ -141,15 +185,18 @@ contract Lido is Versioned, StETHPermit, AragonApp { // Staking limit was removed event StakingLimitRemoved(); - // Emitted when validators number delivered by the oracle - event CLValidatorsUpdated(uint256 indexed reportTimestamp, uint256 preCLValidators, uint256 postCLValidators); + // Emitted when CL balances are updated by the oracle + event CLBalancesUpdated(uint256 indexed reportTimestamp, uint256 clValidatorsBalance, uint256 clPendingBalance); + // Emitted when CL pending balance is updated during deposits to CL + event DepositedPostReportUpdated(uint256 depositedPostReport); // Emitted when depositedValidators value is changed event DepositedValidatorsChanged(uint256 depositedValidators); // Emitted when oracle accounting report processed - // @dev `preCLBalance` is the balance of the validators on previous report - // plus the amount of ether that was deposited to the deposit contract since then + // @dev `preCLBalance` is actually the principal CL balance: the sum of the previous report's + // CL validators balance, CL pending balance, and deposited balance since the last report. + // The parameter name is kept for ABI backward compatibility. event ETHDistributed( uint256 indexed reportTimestamp, uint256 preCLBalance, // actually its preCLBalance + deposits due to compatibility reasons @@ -208,6 +255,15 @@ contract Lido is Versioned, StETHPermit, AragonApp { // Bad debt internalized event ExternalBadDebtInternalized(uint256 amountOfShares); + // Emitted when current deposits reserve is updated. + // Can be emitted from `withdrawDepositableEther()`, `collectRewardsAndProcessWithdrawals()`, + // and `setDepositsReserveTarget()` when target is lowered below current reserve. + event DepositsReserveSet(uint256 depositsReserve); + + // Emitted when deposits reserve target is set via `setDepositsReserveTarget()`. + // Emitted even if the new value equals the previous one + event DepositsReserveTargetSet(uint256 depositsReserveTarget); + /** * @notice Initializer function for scratch deploy of Lido contract * @@ -224,7 +280,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { emit LidoLocatorSet(_lidoLocator); _initializeEIP712StETH(_eip712StETH); - _setContractVersion(3); + _setContractVersion(4); ILidoLocator locator = ILidoLocator(_lidoLocator); @@ -233,86 +289,53 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** - * @notice A function to finalize upgrade to v3 (from v2). Can be called only once - * - * For more details see https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md - * @param _oldBurner The address of the old Burner contract to migrate from - * @param _contractsWithBurnerAllowances Contracts that have allowances for the old burner to be migrated - * @param _initialMaxExternalRatioBP Initial maximum external ratio in basis points + * @notice A function to finalize upgrade to v4 (from v3). Can be called only once */ - function finalizeUpgrade_v3( - address _oldBurner, - address[] _contractsWithBurnerAllowances, - uint256 _initialMaxExternalRatioBP - ) external { + function finalizeUpgrade_v4() external { require(hasInitialized(), "NOT_INITIALIZED"); - _checkContractVersion(2); - _setContractVersion(3); - - _migrateStorage_v2_to_v3(); - - _migrateBurner_v2_to_v3(_oldBurner, _contractsWithBurnerAllowances); - - _setMaxExternalRatioBP(_initialMaxExternalRatioBP); - } - - function _migrateStorage_v2_to_v3() internal { - // migrate storage to packed representation - bytes32 LIDO_LOCATOR_POSITION = keccak256("lido.Lido.lidoLocator"); - address locator = LIDO_LOCATOR_POSITION.getStorageAddress(); - assert(locator != address(0)); // sanity check - - _setLidoLocator(LIDO_LOCATOR_POSITION.getStorageAddress()); - LIDO_LOCATOR_POSITION.setStorageUint256(0); - - bytes32 BUFFERED_ETHER_POSITION = keccak256("lido.Lido.bufferedEther"); - _setBufferedEther(BUFFERED_ETHER_POSITION.getStorageUint256()); - BUFFERED_ETHER_POSITION.setStorageUint256(0); - - bytes32 DEPOSITED_VALIDATORS_POSITION = keccak256("lido.Lido.depositedValidators"); - _setDepositedValidators(DEPOSITED_VALIDATORS_POSITION.getStorageUint256()); - DEPOSITED_VALIDATORS_POSITION.setStorageUint256(0); - bytes32 CL_VALIDATORS_POSITION = keccak256("lido.Lido.beaconValidators"); - bytes32 CL_BALANCE_POSITION = keccak256("lido.Lido.beaconBalance"); - _setClBalanceAndClValidators( - CL_BALANCE_POSITION.getStorageUint256(), - CL_VALIDATORS_POSITION.getStorageUint256() - ); - CL_BALANCE_POSITION.setStorageUint256(0); - CL_VALIDATORS_POSITION.setStorageUint256(0); - - bytes32 TOTAL_SHARES_POSITION = keccak256("lido.StETH.totalShares"); - uint256 totalShares = TOTAL_SHARES_POSITION.getStorageUint256(); - assert(totalShares > 0); // sanity check - TOTAL_AND_EXTERNAL_SHARES_POSITION.setLowUint128(totalShares); - TOTAL_SHARES_POSITION.setStorageUint256(0); - } - - function _migrateBurner_v2_to_v3( - address _oldBurner, - address[] _contractsWithBurnerAllowances - ) internal { - require(_oldBurner != address(0), "OLD_BURNER_ADDRESS_ZERO"); - address burner = _burner(); - require(_oldBurner != burner, "OLD_BURNER_SAME_AS_NEW"); - - // migrate burner stETH balance - uint256 oldBurnerShares = _sharesOf(_oldBurner); - if (oldBurnerShares > 0) { - _transferShares(_oldBurner, burner, oldBurnerShares); - _emitTransferEvents(_oldBurner, burner, getPooledEthByShares(oldBurnerShares), oldBurnerShares); - } - - // initialize new burner with state from the old burner - IBurnerMigration(burner).migrate(_oldBurner); - - // migrating allowances - for (uint256 i = 0; i < _contractsWithBurnerAllowances.length; i++) { - uint256 oldAllowance = allowance(_contractsWithBurnerAllowances[i], _oldBurner); - _approve(_contractsWithBurnerAllowances[i], _oldBurner, 0); - _approve(_contractsWithBurnerAllowances[i], burner, oldAllowance); - } + /// @dev prevent migration if the last oracle report wasn't submitted, otherwise deposits + /// made after refSlot and before migration (i.e. report's tx) will be lost + IAccountingOracle oracle = _accountingOracle(); + (,,, bool mainDataSubmitted,,,,,) = oracle.getProcessingState(); + /// @dev pass in case of initial deploy + require(mainDataSubmitted || oracle.getLastProcessingRefSlot() == 0, "NO_REPORT"); + + _checkContractVersion(3); + _setContractVersion(4); + _migrateStorage_v3_to_v4(); + } + + function _migrateStorage_v3_to_v4() internal { + /// @dev storage slots used in v3 + // keccak256("lido.Lido.clBalanceAndClValidators") + bytes32 CL_BALANCE_AND_CL_VALIDATORS_POSITION = + 0xc36804a03ec742b57b141e4e5d8d3bd1ddb08451fd0f9983af8aaab357a78e2f; + // keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); + bytes32 BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = + 0xa84c096ee27e195f25d7b6c7c2a03229e49f1a2a5087e57ce7d7127707942fe3; + + (uint256 clValidatorsBalance, uint256 clValidators) = + CL_BALANCE_AND_CL_VALIDATORS_POSITION.getLowAndHighUint128(); + (uint256 bufferedEther, uint256 depositedValidators) = + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowAndHighUint128(); + + /// @dev convert ex-transientBalance to amount submitted to the Deposit contract + /// after the last accounting oracle report + uint256 depositedPostReport = (depositedValidators - clValidators) * DEPOSIT_SIZE; + _setBufferedEtherAndDepositedPostReport(bufferedEther, depositedPostReport); + /// @dev Since migration is only possible after a report and before the next frame begins, + /// the transient balance will apply to the current frame + (uint256 curNonce,) = _getCurrentFrame(); // get current refslot + _setDepositedNextReportAndLastDepositNonce(depositedPostReport, curNonce); + + /// @dev no pending balance at the moment of upgrade + _setClValidatorsBalanceAndClPendingBalance(clValidatorsBalance, 0); + _setSeedDepositsCount(depositedValidators); + + // wipe out the slots + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setStorageUint256(0); + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setStorageUint256(0); } /** @@ -370,10 +393,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { require(_maxStakeLimit <= uint96(-1) / 2, "TOO_LARGE_MAX_STAKE_LIMIT"); STAKING_STATE_POSITION.setStorageStakeLimitStruct( - STAKING_STATE_POSITION.getStorageStakeLimitStruct().setStakingLimit( - _maxStakeLimit, - _stakeLimitIncreasePerBlock - ) + STAKING_STATE_POSITION.getStorageStakeLimitStruct() + .setStakingLimit(_maxStakeLimit, _stakeLimitIncreasePerBlock) ); emit StakingLimitSet(_maxStakeLimit, _stakeLimitIncreasePerBlock); @@ -532,31 +553,120 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** - * @notice Unsafely change the deposited validators counter + * @return the amount of ether temporarily buffered on this contract balance + * @dev Buffered balance is kept on the contract from the moment the funds are received from user + * until the moment they are actually sent to the official Deposit contract or used to fulfill withdrawal requests + */ + function getBufferedEther() external view returns (uint256) { + return _getBufferedEther(); + } + + /** + * @notice Buffered ether split into reserve buckets. + * @param total Total buffered ether, equal to `getBufferedEther()`. + * @param unreserved Buffer remainder after both reserves are filled. Available for additional CL deposits + * beyond the deposits reserve + * @param depositsReserve Buffer portion available for CL deposits, protected from withdrawals demand. + * Resets on each oracle report, decreases via `withdrawDepositableEther()` + * @param withdrawalsReserve Buffer portion allocated to unfinalized withdrawals. Not depositable to CL. + * Zero when all withdrawal requests are finalized + */ + struct BufferedEtherAllocation { + uint256 total; + uint256 unreserved; + uint256 depositsReserve; + uint256 withdrawalsReserve; + } + + /** + * @notice Calculates buffered ether allocation across reserves + * @dev Buffer is split by priority: * - * The method unsafely changes deposited validator counter. - * Can be required when onboarding external validators to Lido - * (i.e., had deposited before and rotated their type-0x00 withdrawal credentials to Lido) + * 1. depositsReserve - per-frame CL deposit allowance, filled first + * 2. withdrawalsReserve - covers unfinalized withdrawal requests + * 3. unreserved - excess, available for additional CL deposits * - * @param _newDepositedValidators new value + * ┌─────────── Total Buffered Ether ───────────┐ + * ├────────────────────┬───────────────────────┼─────┬──────────────┐ + * │●●●●●●●●●●●●●●●●●●●●│●●●●●●●●●●●●●●●●●●●●●●●●○○○○○│○○○○○○○○○○○○○○│ + * ├────────────────────┼───────────────────────┼─────┼──────────────┤ + * └─ Deposits Reserve ─┼─ Withdrawals Reserve ─┘ ├─ Unreserved ─┘ + * └───── Unfinalized stETH ─────┘ * - * TODO: remove this with maxEB-friendly accounting + * ● - covered by Buffered Ether + * ○ - not covered by Buffered Ether + * + * depositsReserve = min(total, stored deposits reserve) + * withdrawalsReserve = min(total - depositsReserve, unfinalizedStETH) + * unreserved = total - depositsReserve - withdrawalsReserve */ - function unsafeChangeDepositedValidators(uint256 _newDepositedValidators) external { - _auth(UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE); + function _getBufferedEtherAllocation() internal view returns (BufferedEtherAllocation allocation) { + uint256 remaining = _getBufferedEther(); + allocation.total = remaining; + + allocation.depositsReserve = Math256.min(remaining, DEPOSITS_RESERVE_POSITION.getStorageUint256()); + remaining -= allocation.depositsReserve; - _setDepositedValidators(_newDepositedValidators); + allocation.withdrawalsReserve = Math256.min(remaining, _withdrawalQueue().unfinalizedStETH()); + remaining -= allocation.withdrawalsReserve; - emit DepositedValidatorsChanged(_newDepositedValidators); + allocation.unreserved = remaining; } /** - * @return the amount of ether temporarily buffered on this contract balance - * @dev Buffered balance is kept on the contract from the moment the funds are received from user - * until the moment they are actually sent to the official Deposit contract or used to fulfill withdrawal requests + * @notice Returns the currently effective deposits reserve — buffer portion available for CL deposits, protected + * from withdrawals demand + * @dev Capped by current buffered ether. See `_getBufferedEtherAllocation()` */ - function getBufferedEther() external view returns (uint256) { - return _getBufferedEther(); + function getDepositsReserve() external view returns (uint256 depositsReserve) { + return _getBufferedEtherAllocation().depositsReserve; + } + + /** + * @dev Stores new deposits reserve value and emits DepositsReserveSet event + */ + function _setDepositsReserve(uint256 _newDepositsReserve) internal { + DEPOSITS_RESERVE_POSITION.setStorageUint256(_newDepositsReserve); + emit DepositsReserveSet(_newDepositsReserve); + } + + /** + * @notice Returns the currently effective withdrawals reserve + * @dev This reserve is computed after deposits reserve is applied + * @return Amount reserved to satisfy unfinalized withdrawals + */ + function getWithdrawalsReserve() external view returns (uint256) { + return _getBufferedEtherAllocation().withdrawalsReserve; + } + + /** + * @notice Returns configured target for deposits reserve + * @return depositsReserveTarget Configured reserve target in wei + */ + function getDepositsReserveTarget() public view returns (uint256) { + return DEPOSITS_RESERVE_TARGET_POSITION.getStorageUint256(); + } + + /** + * @notice Sets deposits reserve target + * @dev Always updates target and emits DepositsReserveTargetSet + * If target is lowered below current reserve, reserve is reduced immediately + * If target is increased, reserve is not increased here and is synced on report processing via + * `_updateBufferedEtherAllocation()` + * @param _newDepositsReserveTarget New target value in wei + */ + function setDepositsReserveTarget(uint256 _newDepositsReserveTarget) external { + _auth(BUFFER_RESERVE_MANAGER_ROLE); + + DEPOSITS_RESERVE_TARGET_POSITION.setStorageUint256(_newDepositsReserveTarget); + emit DepositsReserveTargetSet(_newDepositsReserveTarget); + + uint256 currentDepositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + // Do not increase reserve mid-frame: this could reduce available ETH for withdrawals finalization + // relative to the report reference slot assumptions. Increases are applied on oracle report processing. + if (_newDepositsReserveTarget < currentDepositsReserve) { + _setDepositsReserve(_newDepositsReserveTarget); + } } /** @@ -597,6 +707,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /** + * @dev DEPRECATED: Use getBalanceStats() for new integrations * @notice Get the key values related to the Consensus Layer side of the contract. * @return depositedValidators - number of deposited validators from Lido contract side * @return beaconValidators - number of Lido validators visible on Consensus Layer, reported by oracle @@ -607,8 +718,82 @@ contract Lido is Versioned, StETHPermit, AragonApp { view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance) { - depositedValidators = _getDepositedValidators(); - (beaconBalance, beaconValidators) = _getClBalanceAndClValidators(); + depositedValidators = _getSeedDepositsCount(); + (uint256 clValidatorsBalance, uint256 clPendingBalance) = _getClValidatorsBalanceAndClPendingBalance(); + /// @dev Since there is now no gap between the deposit on EL and its observation on the CL layer, + /// for compatibility, beaconValidators = depositedValidators. + /// @dev beaconBalance returned as sum of active and pending balances because this amounts + /// are visible on the CL side at moment of report + return (depositedValidators, depositedValidators, clValidatorsBalance.add(clPendingBalance)); + } + + /// @notice Returns current balance statistics + /// @return clValidatorsBalanceAtLastReport Sum of validator's active balances in wei + /// @return clPendingBalanceAtLastReport Sum of validator's pending deposits in wei + /// @return depositedSinceLastReport Deposits made since last oracle report + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + (clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport) = _getClValidatorsBalanceAndClPendingBalance(); + + depositedSinceLastReport = _getDepositedPostReport(); + (depositedForCurrentReport,) = _getDepositedNextReportAdjusted(); + /// @dev depositedNextReport is always less than depositedPostReport, so we can safely subtract + depositedForCurrentReport = depositedSinceLastReport - depositedForCurrentReport; + } + + /** + * To accurately track the ETH that was deposited between the refSlot and the report transaction, we use the following + * approach: + * + * Data structure can be represented as: + * - lastNonce - last deposit refSlot + * - depositedPostReport - total sum of all deposits across all periods since the last successful report + * - depositedNextReport - sum of deposits within the current reporting period, to be included in the next report + * + * Flow diagram: + * NOW + * ┌── depositedPostReport ────────────────┐ ↓ + * │○○○○○○○○○○○○○○│○●●○○R○○○●○○●○│○○●●●○○●○●○○○○│○○●●○○●○○●○○○○│ + * ┆ lastReport-↑ currentRefSlot-↑└────⁠┬────┘ + * ┆ ┆ currentReportFrame-↓ ┆ └depositedNextReport + * ⁠║ frame X ⁠║ frame X+1 ⁠║ frame X+2 ⁠║ frame X+3 ⁠║ + * + * R - report transaction slot + * ● - slot with deposits + * ○ - empty slot + * ⁠║ - frame refSlot + * + * Logic: + * - On any read/write operation, we first retrieve currentNonce (currentRefSlot) + * - Whenever the nonce changes (i.e. the reporting period changes), we reset depositedNextReport to zero + * - To obtain the exact deposit amount for the reporting periods, we compute: depositedPostReport - depositedNextReport + * - On each deposit, both counters are incremented: depositedPostReport += amount and depositedNextReport += amount + * - At reporting time, deposits already accounted for in the report are excluded from depositedPostReport, leaving + * only the current period: depositedPostReport = depositedNextReport + */ + /// @dev read and adjust the `depositedNextReport` value according to the current frame + function _getDepositedNextReportAdjusted() internal view returns (uint256 depositedNextReport, uint256 curNonce) { + uint256 lastNonce; + (depositedNextReport, lastNonce) = _getDepositedNextReportAndLastDepositNonce(); + (curNonce,) = _getCurrentFrame(); // get current refSlot + if (curNonce != lastNonce) { + // treating all unsettled amounts as belonging to previous periods (aka nonces), + // i.e., as already settled (accounted in upcoming report) + depositedNextReport = 0; + } + } + + /// @dev get currentFrameRefSlot from oracle processing state + function _getCurrentFrame() internal view returns (uint256 refSlot, uint256 refSlotTimestamp) { + (refSlot, refSlotTimestamp) = _accountingOracle().getCurrentFrame(); } /** @@ -621,50 +806,70 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @return the amount of ether in the buffer that can be deposited to the Consensus Layer - * @dev Takes into account unfinalized stETH required by WithdrawalQueue + * @dev Equals buffered ether minus withdrawals reserve from `_getBufferedEtherAllocation()` */ - function getDepositableEther() public view returns (uint256) { - uint256 bufferedEther = _getBufferedEther(); - uint256 withdrawalReserve = _withdrawalQueue().unfinalizedStETH(); - return bufferedEther > withdrawalReserve ? bufferedEther - withdrawalReserve : 0; + function getDepositableEther() external view returns (uint256) { + return _getDepositableEther(_getBufferedEtherAllocation()); } /** - * @notice Invoke a deposit call to the Staking Router contract and update buffered counters - * @param _maxDepositsCount max deposits count - * @param _stakingModuleId id of the staking module to be deposited - * @param _depositCalldata module calldata + * @notice Calculates depositable amount from precomputed buffer allocation + * @return Depositable amount, equal to `allocation.depositsReserve + allocation.unreserved` */ - function deposit(uint256 _maxDepositsCount, uint256 _stakingModuleId, bytes _depositCalldata) external { - ILidoLocator locator = _getLidoLocator(); - - require(msg.sender == locator.depositSecurityModule(), "APP_AUTH_DSM_FAILED"); - require(canDeposit(), "CAN_NOT_DEPOSIT"); + function _getDepositableEther(BufferedEtherAllocation allocation) internal pure returns (uint256) { + return allocation.depositsReserve + allocation.unreserved; + } - IStakingRouter stakingRouter = _stakingRouter(locator); - uint256 depositsCount = Math256.min( - _maxDepositsCount, - stakingRouter.getStakingModuleMaxDepositsCount(_stakingModuleId, getDepositableEther()) - ); + /** + * @dev Spends depositable buffer and updates stored deposits reserve accordingly. + * Decreases stored deposits reserve by spent amount, bounded below by zero + */ + function _spendDepositableEther(uint256 _depositAmount) internal { + BufferedEtherAllocation memory allocation = _getBufferedEtherAllocation(); + uint256 depositableEther = _getDepositableEther(allocation); + require(_depositAmount <= depositableEther, "NOT_ENOUGH_ETHER"); + + /// @dev the requested amount will be sent to DepositContract, so we increment + /// depositedPostReport counter to keep _getInternalEther value correct + uint256 depositedPostReport = _getDepositedPostReport().add(_depositAmount); + _setBufferedEtherAndDepositedPostReport(allocation.total.sub(_depositAmount), depositedPostReport); + emit Unbuffered(_depositAmount); + + (uint256 depositedNextReport, uint256 curNonce) = _getDepositedNextReportAdjusted(); + depositedNextReport = depositedNextReport.add(_depositAmount); + _setDepositedNextReportAndLastDepositNonce(depositedNextReport, curNonce); + + uint256 storedDepositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + if (storedDepositsReserve > 0) { + _setDepositsReserve(storedDepositsReserve > _depositAmount ? storedDepositsReserve - _depositAmount : 0); + } + } - uint256 depositsValue; - if (depositsCount > 0) { - depositsValue = depositsCount.mul(DEPOSIT_SIZE); - /// @dev firstly update the local state of the contract to prevent a reentrancy attack, - /// even if the StakingRouter is a trusted contract. + /** + * @notice Withdraw `_amount` of buffer to Staking Router + * @dev Can be called only by the Staking Router contract + * @notice _seedDepositsCount - DEPRECATED, it is used only for backward compatibility + * + * @param _amount amount of ETH to withdraw + * @param _seedDepositsCount amount of seed deposits. In case of top up this value will be equal to 0 + */ + function withdrawDepositableEther(uint256 _amount, uint256 _seedDepositsCount) external { + require(canDeposit(), "CAN_NOT_DEPOSIT"); + IStakingRouter stakingRouter = _stakingRouter(); + _auth(address(stakingRouter)); + require(_amount != 0, "ZERO_AMOUNT"); - (uint256 bufferedEther, uint256 depositedValidators) = _getBufferedEtherAndDepositedValidators(); - depositedValidators = depositedValidators.add(depositsCount); + _spendDepositableEther(_amount); - _setBufferedEtherAndDepositedValidators(bufferedEther.sub(depositsValue), depositedValidators); - emit Unbuffered(depositsValue); - emit DepositedValidatorsChanged(depositedValidators); + if (_seedDepositsCount > 0) { + uint256 newSeedDepositsCount = _getSeedDepositsCount().add(_seedDepositsCount); + _setSeedDepositsCount(newSeedDepositsCount); + /// @dev event name is kept for backward compatibility + emit DepositedValidatorsChanged(newSeedDepositsCount); } - /// @dev transfer ether to StakingRouter and make a deposit at the same time. All the ether - /// sent to StakingRouter is counted as deposited. If StakingRouter can't deposit all - /// passed ether it MUST revert the whole transaction (never happens in normal circumstances) - stakingRouter.deposit.value(depositsValue)(depositsCount, _stakingModuleId, _depositCalldata); + /// @dev forward the requested amount of ether to the StakingRouter + stakingRouter.receiveDepositableEther.value(_amount)(); } /** @@ -740,16 +945,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { _burnShares(msg.sender, _amountOfShares); uint256 stethAmount = getPooledEthByShares(_amountOfShares); - StakeLimitState.Data memory stakeLimitData = STAKING_STATE_POSITION.getStorageStakeLimitStruct(); - - /// NB: burning external shares must be allowed even when staking is paused to allow external ether withdrawals - if (stakeLimitData.isStakingLimitSet() && !stakeLimitData.isStakingPaused()) { - uint256 newStakeLimit = stakeLimitData.calculateCurrentStakeLimit() + stethAmount; - - STAKING_STATE_POSITION.setStorageStakeLimitStruct( - stakeLimitData.updatePrevStakeLimit(newStakeLimit) - ); - } + _increaseStakingLimit(stethAmount); // Historically, Lido contract does not emit Transfer to zero address events // for burning but emits SharesBurnt instead, so it's kept here for compatibility @@ -795,26 +991,29 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @notice Process CL related state changes as a part of the report processing * @dev All data validation was done by Accounting and OracleReportSanityChecker + * @dev Replaces validator counting in v3 with direct balance tracking for EIP-7251 support * @param _reportTimestamp timestamp of the report - * @param _preClValidators number of validators in the previous CL state (for event compatibility) - * @param _reportClValidators number of validators in the current CL state - * @param _reportClBalance total balance of the current CL state + * @param _clValidatorsBalance Validators balance on the consensus layer + * @param _clPendingBalance Pending deposits balance on the consensus layer */ - function processClStateUpdate( - uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance - ) external { + function processClStateUpdate(uint256 _reportTimestamp, uint256 _clValidatorsBalance, uint256 _clPendingBalance) + external + { _whenNotStopped(); _auth(_accounting()); - // Save the current CL balance and validators to - // calculate rewards on the next rebase - _setClBalanceAndClValidators(_reportClBalance, _reportClValidators); + (uint256 depositedNextReport, uint256 curNonce) = _getDepositedNextReportAdjusted(); + /// @dev just save adjusted depositedNextReport + _setDepositedNextReportAndLastDepositNonce(depositedNextReport, curNonce); + /// @dev Since `depositedPostReport` accumulates all deposits, including those that occurred + /// after `refSlot` but before the report, we must retain only the amount not + /// reflected in the report + _setDepositedPostReport(depositedNextReport); - emit CLValidatorsUpdated(_reportTimestamp, _preClValidators, _reportClValidators); - // cl balance change are logged in ETHDistributed event later + /// @dev new values of clValidatorsBalance and clPendingBalance should reflect all + /// deposits during the report frame + _setClValidatorsBalanceAndClPendingBalance(_clValidatorsBalance, _clPendingBalance); + emit CLBalancesUpdated(_reportTimestamp, _clValidatorsBalance, _clPendingBalance); } /** @@ -883,10 +1082,9 @@ contract Lido is Versioned, StETHPermit, AragonApp { // finalize withdrawals (send ether, assign shares for burning) if (_etherToLockOnWithdrawalQueue > 0) { - _withdrawalQueue(locator).finalize.value(_etherToLockOnWithdrawalQueue)( - _lastWithdrawalRequestToFinalize, - _withdrawalsShareRate - ); + _withdrawalQueue(locator) + .finalize + .value(_etherToLockOnWithdrawalQueue)(_lastWithdrawalRequestToFinalize, _withdrawalsShareRate); } uint256 postBufferedEther = _getBufferedEther() @@ -895,6 +1093,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { .sub(_etherToLockOnWithdrawalQueue); // Sent to WithdrawalQueue _setBufferedEther(postBufferedEther); + _updateBufferedEtherAllocation(); emit ETHDistributed( _reportTimestamp, @@ -906,18 +1105,30 @@ contract Lido is Versioned, StETHPermit, AragonApp { ); } + /** + * @dev Syncs stored deposits reserve to configured target after oracle report processing + */ + function _updateBufferedEtherAllocation() internal { + uint256 depositsReserveTarget = getDepositsReserveTarget(); + uint256 depositsReserve = DEPOSITS_RESERVE_POSITION.getStorageUint256(); + + if (depositsReserve != depositsReserveTarget) { + _setDepositsReserve(depositsReserveTarget); + } + } + /** * @notice Emits the `TokenRebase` and `InternalShareRateUpdated` events - * @param _reportTimestamp timestamp of the refSlot block fro the report applied + * @param _reportTimestamp timestamp of the refSlot block for the report applied * @param _timeElapsed seconds since the previous applied report * @param _preTotalShares the total number of shares before the oracle report tx * @param _preTotalEther the total amount of ether before the oracle report tx * @param _postTotalShares the total number of shares after the oracle report tx * @param _postTotalEther the total amount of ether after the oracle report tx - * @param _postInternalShares the total number of internal shares before the oracle report tx + * @param _postInternalShares the total number of internal shares after the oracle report tx * @param _postInternalEther the total amount of internal ether after the oracle tx * @param _sharesMintedAsFees the number of shares minted to pay fees to Lido and StakingModules - * @dev these events are used to calculate protocol gross (without protocol fess deducted) and net APR (StETH APR) + * @dev these events are used to calculate protocol gross (without protocol fees deducted) and net APR (StETH APR) * * preShareRate = preTotalEther * 1e27 / preTotalShares * postShareRate = postTotalEther * 1e27 / postTotalShares @@ -955,7 +1166,11 @@ contract Lido is Versioned, StETHPermit, AragonApp { /** * @notice Overrides default AragonApp behavior to disallow recovery. */ - function transferToVault(address /* _token */) external { + function transferToVault( + address /* _token */ + ) + external + { revert("NOT_SUPPORTED"); } @@ -964,7 +1179,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { //////////////////////////////////////////////////////////////////////////// /** - * @notice DEPRECATED: Returns current withdrawal credentials of deposited validators + * @notice DEPRECATED: Returns current 0x01 withdrawal credentials of deposited validators * @dev DEPRECATED: use StakingRouter.getWithdrawalCredentials() instead */ function getWithdrawalCredentials() external view returns (bytes32) { @@ -1010,15 +1225,14 @@ contract Lido is Versioned, StETHPermit, AragonApp { IStakingRouter stakingRouter = _stakingRouter(); uint256 totalBasisPoints = stakingRouter.TOTAL_BASIS_POINTS(); uint256 totalFee = stakingRouter.getTotalFeeE4Precision(); - (uint256 treasuryFeeBasisPointsAbs, uint256 operatorsFeeBasisPointsAbs) = stakingRouter - .getStakingFeeAggregateDistributionE4Precision(); + (uint256 treasuryFeeBasisPointsAbs, uint256 operatorsFeeBasisPointsAbs) = + stakingRouter.getStakingFeeAggregateDistributionE4Precision(); insuranceFeeBasisPoints = 0; // explicitly set to zero treasuryFeeBasisPoints = uint16((treasuryFeeBasisPointsAbs * totalBasisPoints) / totalFee); operatorsFeeBasisPoints = uint16((operatorsFeeBasisPointsAbs * totalBasisPoints) / totalFee); } - /// @dev Process user deposit, mint liquid tokens and increase the pool buffer /// @param _referral address of referral. /// @return amount of StETH shares minted @@ -1039,20 +1253,14 @@ contract Lido is Versioned, StETHPermit, AragonApp { } /// @dev Get the total amount of ether controlled by the protocol internally - /// (buffered + CL balance of StakingRouter controlled validators + transient) + /// (buffered ether + CL validators balance + CL pending balance + deposited since last report) function _getInternalEther() internal view returns (uint256) { - (uint256 bufferedEther, uint256 depositedValidators) = _getBufferedEtherAndDepositedValidators(); - (uint256 clBalance, uint256 clValidators) = _getClBalanceAndClValidators(); - - // clValidators can never exceed depositedValidators. - assert(depositedValidators >= clValidators); - // the total base balance (multiple of 32) of validators in transient state, - // i.e. submitted to the official Deposit contract but not yet visible in the CL state. - uint256 transientEther = (depositedValidators - clValidators) * DEPOSIT_SIZE; + (uint256 bufferedEther, uint256 depositedPostReport) = _getBufferedEtherAndDepositedPostReport(); + (uint256 clValidatorsBalance, uint256 clPendingBalance) = _getClValidatorsBalanceAndClPendingBalance(); - return bufferedEther - .add(clBalance) - .add(transientEther); + // With balance-based accounting, we don't need to calculate transientEther + // as pending deposits are already included in clPendingBalance + return bufferedEther.add(clValidatorsBalance).add(clPendingBalance).add(depositedPostReport); } /// @dev Calculate the amount of ether controlled by external entities @@ -1105,9 +1313,7 @@ contract Lido is Versioned, StETHPermit, AragonApp { if (totalShares * maxRatioBP <= externalShares * TOTAL_BASIS_POINTS) return 0; - return - (totalShares * maxRatioBP - externalShares * TOTAL_BASIS_POINTS) / - (TOTAL_BASIS_POINTS - maxRatioBP); + return (totalShares * maxRatioBP - externalShares * TOTAL_BASIS_POINTS) / (TOTAL_BASIS_POINTS - maxRatioBP); } function _pauseStaking() internal { @@ -1154,6 +1360,16 @@ contract Lido is Versioned, StETHPermit, AragonApp { } } + function _increaseStakingLimit(uint256 _amount) internal { + StakeLimitState.Data memory stakeLimitData = STAKING_STATE_POSITION.getStorageStakeLimitStruct(); + /// NB: burning external shares must be allowed even when staking is paused to allow external ether withdrawals + if (stakeLimitData.isStakingLimitSet() && !stakeLimitData.isStakingPaused()) { + uint256 newStakeLimit = stakeLimitData.calculateCurrentStakeLimit() + _amount; + + STAKING_STATE_POSITION.setStorageStakeLimitStruct(stakeLimitData.updatePrevStakeLimit(newStakeLimit)); + } + } + /// @dev Bytecode size-efficient analog of the `auth(_role)` modifier /// @param _role Permission name function _auth(bytes32 _role) internal view { @@ -1165,12 +1381,8 @@ contract Lido is Versioned, StETHPermit, AragonApp { require(msg.sender == _address, "APP_AUTH_FAILED"); } - function _stakingRouter(ILidoLocator _locator) internal view returns (IStakingRouter) { - return IStakingRouter(_locator.stakingRouter()); - } - function _stakingRouter() internal view returns (IStakingRouter) { - return _stakingRouter(_getLidoLocator()); + return IStakingRouter(_getLidoLocator().stakingRouter()); } function _withdrawalQueue(ILidoLocator _locator) internal view returns (IWithdrawalQueue) { @@ -1201,6 +1413,10 @@ contract Lido is Versioned, StETHPermit, AragonApp { return _accounting(_getLidoLocator()); } + function _accountingOracle() internal view returns (IAccountingOracle) { + return IAccountingOracle(_getLidoLocator().accountingOracle()); + } + function _elRewardsVault(ILidoLocator _locator) internal view returns (ILidoExecutionLayerRewardsVault) { return ILidoExecutionLayerRewardsVault(_locator.elRewardsVault()); } @@ -1252,44 +1468,74 @@ contract Lido is Versioned, StETHPermit, AragonApp { return TOTAL_AND_EXTERNAL_SHARES_POSITION.getLowAndHighUint128(); } + // helpers: buffered ether and deposited ether since last report + function _getBufferedEther() internal view returns (uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowUint128(); + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getLowUint128(); + } + + function _getDepositedPostReport() internal view returns (uint256) { + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getHighUint128(); + } + + function _getBufferedEtherAndDepositedPostReport() internal view returns (uint256, uint256) { + return BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.getLowAndHighUint128(); } function _setBufferedEther(uint256 _newBufferedEther) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowUint128(_newBufferedEther); + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setLowUint128(_newBufferedEther); } - function _getDepositedValidators() internal view returns (uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getHighUint128(); + function _setDepositedPostReport(uint256 _newDepositedPostReport) internal { + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setHighUint128(_newDepositedPostReport); } - function _setDepositedValidators(uint256 _newDepositedValidators) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setHighUint128(_newDepositedValidators); + function _setBufferedEtherAndDepositedPostReport(uint256 _newBufferedEther, uint256 _newDepositedPostReport) + internal + { + BUFFERED_ETHER_AND_DEPOSITED_POST_REPORT_POSITION.setLowAndHighUint128( + _newBufferedEther, _newDepositedPostReport + ); } - function _getBufferedEtherAndDepositedValidators() internal view returns (uint256, uint256) { - return BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.getLowAndHighUint128(); + function _getDepositedNextReportAndLastDepositNonce() internal view returns (uint256, uint256) { + return DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION.getLowAndHighUint128(); } - function _setBufferedEtherAndDepositedValidators( - uint256 _newBufferedEther, - uint256 _newDepositedValidators - ) internal { - BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowAndHighUint128( - _newBufferedEther, - _newDepositedValidators + function _setDepositedNextReportAndLastDepositNonce(uint256 _depositedNextReport, uint256 _lastDepositNonce) + internal + { + DEPOSITED_NEXT_REPORT_AND_LAST_DEPOSIT_NONCE_POSITION.setLowAndHighUint128( + _depositedNextReport, _lastDepositNonce ); } - function _getClBalanceAndClValidators() internal view returns (uint256, uint256) { - return CL_BALANCE_AND_CL_VALIDATORS_POSITION.getLowAndHighUint128(); + // helpers: [DEPRECATED] deposited validators count + + function _getSeedDepositsCount() internal view returns (uint256) { + return SEED_DEPOSITS_COUNT_POSITION.getLowUint128(); + } + + function _setSeedDepositsCount(uint256 _newSeedDepositsCount) internal { + SEED_DEPOSITS_COUNT_POSITION.setLowUint128(_newSeedDepositsCount); + } + + // helpers: CL validators and pending balances + + function _getClValidatorsBalanceAndClPendingBalance() internal view returns (uint256, uint256) { + return CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION.getLowAndHighUint128(); } - function _setClBalanceAndClValidators(uint256 _newClBalance, uint256 _newClValidators) internal { - CL_BALANCE_AND_CL_VALIDATORS_POSITION.setLowAndHighUint128(_newClBalance, _newClValidators); + function _setClValidatorsBalanceAndClPendingBalance(uint256 _newClValidatorsBalance, uint256 _newClPendingBalance) + internal + { + CL_VALIDATORS_BALANCE_AND_CL_PENDING_BALANCE_POSITION.setLowAndHighUint128( + _newClValidatorsBalance, _newClPendingBalance + ); } + // --- + function _setLidoLocator(address _newLidoLocator) internal { LOCATOR_AND_MAX_EXTERNAL_RATIO_POSITION.setLowUint160(uint160(_newLidoLocator)); } diff --git a/contracts/0.4.24/template/LidoTemplate.sol b/contracts/0.4.24/template/LidoTemplate.sol index 92c01a16d4..e145189f53 100644 --- a/contracts/0.4.24/template/LidoTemplate.sol +++ b/contracts/0.4.24/template/LidoTemplate.sol @@ -614,8 +614,7 @@ contract LidoTemplate is IsContract { perms[1] = _state.lido.RESUME_ROLE(); perms[2] = _state.lido.STAKING_PAUSE_ROLE(); perms[3] = _state.lido.STAKING_CONTROL_ROLE(); - perms[4] = _state.lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(); - for (i = 0; i < 5; ++i) { + for (i = 0; i < 4; ++i) { _createPermissionForAgent(acl, _state.lido, perms[i], agent); } } diff --git a/contracts/0.8.25/CLValidatorVerifier.sol b/contracts/0.8.25/CLValidatorVerifier.sol new file mode 100644 index 0000000000..9fad56ddb1 --- /dev/null +++ b/contracts/0.8.25/CLValidatorVerifier.sol @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +import {GIndex, pack, concat} from "contracts/common/lib/GIndex.sol"; +import {SSZ} from "contracts/common/lib/SSZ.sol"; +import {BLS12_381} from "contracts/common/lib/BLS.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/ValidatorWitness.sol"; + +/** + * @title CLValidatorVerifier + * @author Lido + * @notice + * + * Smart contract verifying CL data of validators + */ +abstract contract CLValidatorVerifier { + // BeaconBlockHeader: state_root field gindex + uint8 private constant STATE_ROOT_DEPTH = 3; + uint256 private constant STATE_ROOT_POSITION = 3; + GIndex public immutable GI_STATE_ROOT = pack((1 << STATE_ROOT_DEPTH) + STATE_ROOT_POSITION, STATE_ROOT_DEPTH); + + // Position (from the end) of parent(slot, proposerIndex) node inside concatenated proof + uint256 private constant SLOT_PROPOSER_PARENT_PROOF_OFFSET = 2; + // EIP-4788 system contract + address public constant BEACON_ROOTS = 0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02; + + // validators[0] gindex before/after fork layout change + GIndex public immutable GI_FIRST_VALIDATOR_PREV; + GIndex public immutable GI_FIRST_VALIDATOR_CURR; + uint64 public immutable PIVOT_SLOT; + + error InvalidSlot(); + error RootNotFound(); + + constructor(GIndex _gIFirstValidatorPrev, GIndex _gIFirstValidatorCurr, uint64 _pivotSlot) { + GI_FIRST_VALIDATOR_PREV = _gIFirstValidatorPrev; + GI_FIRST_VALIDATOR_CURR = _gIFirstValidatorCurr; + PIVOT_SLOT = _pivotSlot; + } + + /// @notice Proves validator[i] under the same EIP-4788 anchor, checks WC, checks active status + function _verifyValidator( + BeaconRootData calldata _beaconRootData, + ValidatorWitness calldata _vw, + uint256 _validatorIndex, + bytes32 _expectedWithdrawalCredentials + ) internal view virtual { + _verifySlot(_vw.proofValidator, _beaconRootData.slot, _beaconRootData.proposerIndex); + + bytes32 parentBlockRoot = _getParentBlockRoot(_beaconRootData.childBlockTimestamp); + + GIndex gIndexValidator = concat(GI_STATE_ROOT, _getValidatorGI(_validatorIndex, _beaconRootData.slot)); + bytes32 validatorLeaf = _validatorHashTreeRoot(_vw, _expectedWithdrawalCredentials); + SSZ.verifyProof({proof: _vw.proofValidator, root: parentBlockRoot, leaf: validatorLeaf, gI: gIndexValidator}); + } + + /// @dev SSZ hash_tree_root(Validator) computed from witness fields. + function _validatorHashTreeRoot(ValidatorWitness calldata _w, bytes32 _expectedWithdrawalCredentials) + internal + view + returns (bytes32) + { + bytes32[8] memory leaves; + leaves[0] = BLS12_381.pubkeyRoot(_w.pubkey); + leaves[1] = _expectedWithdrawalCredentials; + leaves[2] = SSZ.toLittleEndian(_w.effectiveBalance); + leaves[3] = SSZ.toLittleEndian(_w.slashed ? uint64(1) : 0); + leaves[4] = SSZ.toLittleEndian(_w.activationEligibilityEpoch); + leaves[5] = SSZ.toLittleEndian(_w.activationEpoch); + leaves[6] = SSZ.toLittleEndian(_w.exitEpoch); + leaves[7] = SSZ.toLittleEndian(_w.withdrawableEpoch); + + bytes32[4] memory l1; + l1[0] = BLS12_381.sha256Pair(leaves[0], leaves[1]); + l1[1] = BLS12_381.sha256Pair(leaves[2], leaves[3]); + l1[2] = BLS12_381.sha256Pair(leaves[4], leaves[5]); + l1[3] = BLS12_381.sha256Pair(leaves[6], leaves[7]); + + bytes32[2] memory l2; + l2[0] = BLS12_381.sha256Pair(l1[0], l1[1]); + l2[1] = BLS12_381.sha256Pair(l1[2], l1[3]); + + return BLS12_381.sha256Pair(l2[0], l2[1]); + } + + /// @dev Checks that (slot, proposerIndex) parent node is present in the same concatenated proof. + function _verifySlot(bytes32[] calldata _proof, uint64 _slot, uint64 _proposerIndex) internal view { + bytes32 parentSlotProposer = BLS12_381.sha256Pair(SSZ.toLittleEndian(_slot), SSZ.toLittleEndian(_proposerIndex)); + if (_proof[_proof.length - SLOT_PROPOSER_PARENT_PROOF_OFFSET] != parentSlotProposer) { + revert InvalidSlot(); + } + } + + /// @dev GIndex for Validator[i] given slot (fork-aware). + function _getValidatorGI(uint256 _offset, uint64 _provenSlot) internal view returns (GIndex) { + GIndex gI = _provenSlot < PIVOT_SLOT ? GI_FIRST_VALIDATOR_PREV : GI_FIRST_VALIDATOR_CURR; + return gI.shr(_offset); + } + + /// @dev Reads parent_beacon_block_root from EIP-4788 by timestamp. + function _getParentBlockRoot(uint64 _childBlockTimestamp) internal view returns (bytes32) { + (bool success, bytes memory data) = BEACON_ROOTS.staticcall(abi.encode(_childBlockTimestamp)); + if (!success || data.length == 0) revert RootNotFound(); + return abi.decode(data, (bytes32)); + } +} diff --git a/contracts/0.8.25/TopUpGateway.sol b/contracts/0.8.25/TopUpGateway.sol new file mode 100644 index 0000000000..91eaa6725c --- /dev/null +++ b/contracts/0.8.25/TopUpGateway.sol @@ -0,0 +1,403 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {TopUpData, BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; +import {CLValidatorVerifier} from "./CLValidatorVerifier.sol"; +import {AccessControlEnumerableUpgradeable} from + "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; + +interface ILidoLocator { + function stakingRouter() external view returns (address); + function lido() external view returns (address); +} + +interface IStakingRouter { + function getStakingModuleWithdrawalCredentials(uint256 _stakingModuleId) external view returns (bytes32); + function canDeposit(uint256 _stakingModuleId) external view returns (bool); + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external; +} + +interface ILido { + function canDeposit() external view returns (bool); +} + +/** + * @title TopUpGateway + * @author Lido + * @notice TopUpGateway is a contract that serves as the entry point for validator top-ups + */ +contract TopUpGateway is CLValidatorVerifier, AccessControlEnumerableUpgradeable { + using WithdrawalCredentials for bytes32; + + ILidoLocator internal immutable LOCATOR; + + struct Storage { + uint64 maxValidatorsPerTopUp; // 64 + uint32 lastTopUpTimestamp; // 32 + uint32 lastTopUpBlock; // 32 + uint16 minBlockDistance; // 16 + uint16 maxRootAge; // 16 + uint64 targetBalanceGwei; // 64 + uint64 minTopUpGwei; // 64 + } + + /// @dev Storage slot: keccak256(abi.encode(uint256(keccak256("lido.TopUpGateway.storage")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 internal constant GATEWAY_STORAGE_POSITION = + 0x22e512057841e2bc1e6d80030c8bb8b4935377af2e64ba9bf8e6a3e88fb32200; + + uint256 internal constant PUBKEY_LENGTH = 48; + uint256 internal constant FAR_FUTURE_EPOCH = type(uint64).max; + uint256 public immutable SLOTS_PER_EPOCH; + + bytes32 public constant TOP_UP_ROLE = keccak256("TOP_UP_ROLE"); + bytes32 public constant MANAGE_LIMITS_ROLE = keccak256("MANAGE_LIMITS_ROLE"); + + constructor( + address _lidoLocator, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot, + uint256 _slotsPerEpoch + ) CLValidatorVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) { + LOCATOR = ILidoLocator(_lidoLocator); + SLOTS_PER_EPOCH = _slotsPerEpoch; + _disableInitializers(); + } + + /// @notice Initializes the TopUpGateway proxy with admin, rate limits, and top-up balance parameters. + /// @param _admin Address to receive DEFAULT_ADMIN_ROLE + /// @param _maxValidatorsPerTopUp Maximum number of validators per single topUp call + /// @param _minBlockDistance Minimum blocks between topUp calls + /// @param _maxRootAgeSec Maximum age (seconds) of beacon root relative to block.timestamp + /// @param _targetBalanceGwei Target validator balance ceiling after top-up (in Gwei). + /// Top-up amount = targetBalance - currentTotal. + /// @param _minTopUpGwei Minimum top-up that can be performed (in Gwei). If calculated top-up < minTopUp, returns 0. + /// Must be <= _targetBalanceGwei. + /// + /// @dev Ethereum reference values (0x02 validators, MAX_EFFECTIVE_BALANCE = 2048 ETH): + /// _targetBalanceGwei = 2046.75 ETH (2048e9 - 1.25e9 Gwei) — leaves 1.25 ETH safety margin + /// _minTopUpGwei = 1 ETH (1e9 Gwei) — skip top-ups below 1 ETH + function initialize( + address _admin, + uint256 _maxValidatorsPerTopUp, + uint256 _minBlockDistance, + uint256 _maxRootAgeSec, + uint256 _targetBalanceGwei, + uint256 _minTopUpGwei + ) external initializer { + __AccessControlEnumerable_init(); + _grantRole(DEFAULT_ADMIN_ROLE, _admin); + _setMaxValidatorsPerTopUp(_maxValidatorsPerTopUp); + _setMinBlockDistance(_minBlockDistance); + _setMaxRootAge(_maxRootAgeSec); + _setTopUpBalanceLimits(_targetBalanceGwei, _minTopUpGwei); + } + + /** + * @notice Method verifying Merkle proofs on validators, making check of age of slot's proof + * and proceeding to top up validators via StakingRouter.topUp(stakingModuleId, keyIndices, operatorIds, pubkeysPacked, topUpLimitsGwei) + * @param _topUps TopUpData structure, containing validators' container fields, actual balances and pending deposits + * and Merkle proofs on inclusion of each container in Beacon State tree + * @dev Amount of validators limited by maxValidatorsPerTopUp; Between topUp calls should pass minBlockDistance. + * Only callable by accounts with TOP_UP_ROLE. + */ + function topUp(TopUpData calldata _topUps) external onlyRole(TOP_UP_ROLE) { + Storage storage $ = _gatewayStorage(); + + uint256 validatorsCount = _topUps.validatorIndices.length; + if (validatorsCount == 0) revert WrongArrayLength(); + + if ( + _topUps.keyIndices.length != validatorsCount || _topUps.operatorIds.length != validatorsCount + || _topUps.validatorWitness.length != validatorsCount + || _topUps.pendingBalanceGwei.length != validatorsCount + ) { + revert WrongArrayLength(); + } + + // length should be less than or eq maxValidatorsPerTopUp + if (validatorsCount > $.maxValidatorsPerTopUp) { + revert MaxValidatorsPerTopUpExceeded(); + } + + // Check for duplicate validatorIndices (O(n^2) acceptable since bounded by maxValidatorsPerTopUp) + for (uint256 i; i < validatorsCount; ++i) { + for (uint256 j = i + 1; j < validatorsCount; ++j) { + if (_topUps.validatorIndices[i] == _topUps.validatorIndices[j]) { + revert DuplicateValidatorIndex(); + } + } + } + + // Distance is for flexibility in future to control top-up frequency + _requireBlockDistancePassed(); + + // Check proof age + // 0. _topUps.beaconRootData.childBlockTimestamp is newer than timestamp of last top up + // 1. _topUps.beaconRootData.childBlockTimestamp is not older than maxRootAge + _verifyRootAge(_topUps.beaconRootData); + + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + + // Find and validate withdrawalCredentials 0x02 + bytes32 withdrawalCredentials = stakingRouter.getStakingModuleWithdrawalCredentials(_topUps.moduleId); + _requireWithdrawalCredentials02(withdrawalCredentials); + + bytes[] memory pubkeys = new bytes[](validatorsCount); + + uint256[] memory topUpLimits = new uint256[](validatorsCount); + + // 1. Evaluate top-up limit based on current balance, pending deposits, and configured limits + // 2. Verify proof data through CLValidatorProofVerifier + unchecked { + for (uint256 i; i < validatorsCount; ++i) { + // For each validator + ValidatorWitness calldata vw = _topUps.validatorWitness[i]; + + if (vw.pubkey.length != PUBKEY_LENGTH) { + revert WrongPubkeyLength(); + } + + _verifyValidatorWasActivated(_topUps.beaconRootData.slot, vw); + + _verifyValidator(_topUps.beaconRootData, vw, _topUps.validatorIndices[i], withdrawalCredentials); + + pubkeys[i] = vw.pubkey; + + // calculate top up limit accounting for current balance and pending deposits + topUpLimits[i] = _evaluateTopUpLimit(vw, _topUps.pendingBalanceGwei[i]) * 1 gwei; + } + } + + // Proceed to StakingRouter + IStakingRouter(stakingRouter).topUp( + _topUps.moduleId, _topUps.keyIndices, _topUps.operatorIds, pubkeys, topUpLimits + ); + + _setLastTopUpData(); + } + + /** + * @notice Checks if top-up is possible for a given staking module + * @param _stakingModuleId Id of the staking module + * @return True if top-up is possible, false otherwise + * @dev Checks: module exists, module is active, block distance passed, Lido can deposit, and withdrawal credentials are 0x02 + */ + function canTopUp(uint256 _stakingModuleId) external view returns (bool) { + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + + if (!stakingRouter.canDeposit(_stakingModuleId)) return false; + if (!ILido(LOCATOR.lido()).canDeposit()) return false; + if (!_isBlockDistancePassed()) return false; + + bytes32 wc = stakingRouter.getStakingModuleWithdrawalCredentials(_stakingModuleId); + return wc.isType2(); + } + + /** + * @notice Returns the timestamp when last top up happened + */ + function getLastTopUpTimestamp() external view returns (uint256) { + return _gatewayStorage().lastTopUpTimestamp; + } + + /** + * @notice Returns the allowed amount of validators per top up + */ + function getMaxValidatorsPerTopUp() external view returns (uint256) { + return _gatewayStorage().maxValidatorsPerTopUp; + } + + /** + * @notice Returns the min block distance that should pass from last top up + */ + function getMinBlockDistance() external view returns (uint256) { + return _gatewayStorage().minBlockDistance; + } + + /** + * @notice Returns the maximum age (seconds) of beacon root relative to block.timestamp + */ + function getMaxRootAge() external view returns (uint256) { + return _gatewayStorage().maxRootAge; + } + + /** + * @notice Returns target validator balance ceiling after top-up (in Gwei) + */ + function getTargetBalanceGwei() external view returns (uint256) { + return _gatewayStorage().targetBalanceGwei; + } + + /** + * @notice Returns minimum top-up that can be performed (in Gwei). + */ + function getMinTopUpGwei() external view returns (uint256) { + return _gatewayStorage().minTopUpGwei; + } + + /** + * @notice Set max validators per top up value + * @param _newValue Max validators per top up value + */ + function setMaxValidatorsPerTopUp(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMaxValidatorsPerTopUp(_newValue); + } + + /** + * @notice Set min block distance + * @param _newValue Min block distance + */ + function setMinBlockDistance(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMinBlockDistance(_newValue); + } + + /** + * @notice Set targetBalanceGwei and minTopUpGwei values + * @param _targetBalanceGwei target validator balance ceiling after top-up (in Gwei) + * @param _minTopUpGwei minimum top-up that can be performed (in Gwei). + */ + function setTopUpBalanceLimits(uint256 _targetBalanceGwei, uint256 _minTopUpGwei) + external + onlyRole(MANAGE_LIMITS_ROLE) + { + _setTopUpBalanceLimits(_targetBalanceGwei, _minTopUpGwei); + } + + /// @notice Sets the maximum allowed age of beacon root relative to current block timestamp + /// @param _newValue Maximum age in seconds + function setMaxRootAge(uint256 _newValue) external onlyRole(MANAGE_LIMITS_ROLE) { + _setMaxRootAge(_newValue); + } + + function _isBlockDistancePassed() internal view returns (bool) { + Storage storage $ = _gatewayStorage(); + return $.lastTopUpBlock == 0 || block.number - $.lastTopUpBlock >= $.minBlockDistance; + } + + function _requireBlockDistancePassed() internal view { + if (!_isBlockDistancePassed()) { + revert MinBlockDistanceNotMet(); + } + } + + function _requireWithdrawalCredentials02(bytes32 _wc) internal pure { + if (!_wc.isType2()) { + revert WrongWithdrawalCredentials(); + } + } + + function _setLastTopUpData() internal { + Storage storage $ = _gatewayStorage(); + $.lastTopUpTimestamp = uint32(block.timestamp); + $.lastTopUpBlock = uint32(block.number); + emit LastTopUpChanged(block.timestamp); + } + + function _setMaxRootAge(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint16).max) revert TooLargeValue(); + _gatewayStorage().maxRootAge = uint16(_newValue); + + emit MaxRootAgeChanged(_newValue); + } + + function _setMaxValidatorsPerTopUp(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint64).max) revert TooLargeValue(); + _gatewayStorage().maxValidatorsPerTopUp = uint64(_newValue); + emit MaxValidatorsPerTopUpChanged(_newValue); + } + + function _setMinBlockDistance(uint256 _newValue) internal { + if (_newValue == 0) revert ZeroValue(); + if (_newValue > type(uint16).max) revert TooLargeValue(); + _gatewayStorage().minBlockDistance = uint16(_newValue); + emit MinBlockDistanceChanged(_newValue); + } + + function _setTopUpBalanceLimits(uint256 _targetBalanceGwei, uint256 _minTopUpGwei) internal { + if (_targetBalanceGwei == 0 || _minTopUpGwei == 0) revert ZeroValue(); + if (_targetBalanceGwei > type(uint64).max || _minTopUpGwei > type(uint64).max) revert TooLargeValue(); + if (_minTopUpGwei > _targetBalanceGwei) revert MinTopUpExceedsTarget(); + + Storage storage $ = _gatewayStorage(); + $.targetBalanceGwei = uint64(_targetBalanceGwei); + $.minTopUpGwei = uint64(_minTopUpGwei); + emit TopUpBalanceLimitsChanged(_targetBalanceGwei, _minTopUpGwei); + } + + function _verifyRootAge(BeaconRootData calldata _beaconRootData) internal view { + if (block.timestamp > _beaconRootData.childBlockTimestamp + _gatewayStorage().maxRootAge) { + revert RootIsTooOld(); + } + + if (_beaconRootData.childBlockTimestamp <= _gatewayStorage().lastTopUpTimestamp) revert RootPrecedesLastTopUp(); + } + + function _verifyValidatorWasActivated(uint64 _slot, ValidatorWitness calldata _w) internal view { + // header slot epoch + uint64 epoch = uint64(_slot / SLOTS_PER_EPOCH); + // Validator should be activated earlier than current epoch + if (_w.activationEpoch >= epoch) revert ValidatorIsNotActivated(); + } + + function _evaluateTopUpLimit(ValidatorWitness calldata _validator, uint256 _pendingBalanceGwei) + internal + view + returns (uint256) + { + if ( + _validator.exitEpoch != FAR_FUTURE_EPOCH || _validator.slashed + || _validator.withdrawableEpoch != FAR_FUTURE_EPOCH + ) { + return 0; + } + + Storage storage $ = _gatewayStorage(); + uint256 currentTotal = _validator.effectiveBalance + _pendingBalanceGwei; + if (currentTotal >= $.targetBalanceGwei) return 0; + + uint256 topUpLimit = $.targetBalanceGwei - currentTotal; + if (topUpLimit < $.minTopUpGwei) return 0; + + return topUpLimit; + } + + function _gatewayStorage() internal pure returns (Storage storage $) { + bytes32 position = GATEWAY_STORAGE_POSITION; + assembly ("memory-safe") { + $.slot := position + } + } + + event MaxValidatorsPerTopUpChanged(uint256 newValue); + event MinBlockDistanceChanged(uint256 newValue); + event LastTopUpChanged(uint256 newValue); + event MaxRootAgeChanged(uint256 newValue); + event TopUpBalanceLimitsChanged(uint256 targetBalanceGwei, uint256 minTopUpGwei); + + error ZeroValue(); + error TooLargeValue(); + error RootIsTooOld(); + error RootPrecedesLastTopUp(); + error WrongArrayLength(); + error MaxValidatorsPerTopUpExceeded(); + error WrongWithdrawalCredentials(); + error WrongPubkeyLength(); + error MinBlockDistanceNotMet(); + error DuplicateValidatorIndex(); + error ValidatorIsNotActivated(); + error MinTopUpExceedsTarget(); +} diff --git a/contracts/0.8.25/consolidation/ConsolidationBus.sol b/contracts/0.8.25/consolidation/ConsolidationBus.sol new file mode 100644 index 0000000000..0a1c026058 --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationBus.sol @@ -0,0 +1,434 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import { + AccessControlEnumerableUpgradeable +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; + +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IConsolidationGateway { + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable; +} + +/** + * @title ConsolidationBus + * @notice Message Bus for consolidation requests that decouples request submission from fee payment. + * + * The workflow: + * 1. Admins register/unregister publishers via grant/revoke PUBLISH_ROLE + * 2. Registered publishers add consolidation requests (PUBLISH_ROLE) + * 3. Executor bot executes batches, paying the required ETH fee + * The bus forwards the batch to ConsolidationGateway + * 4. Optional REMOVE_ROLE can remove batches from the pending queue + */ +contract ConsolidationBus is AccessControlEnumerableUpgradeable { + uint256 internal constant PUBKEY_LENGTH = 48; + + /** + * @notice Thrown when an invalid zero value is passed + * @param name Name of the argument that was zero + */ + error ZeroArgument(string name); + + /** + * @notice Thrown when attempting to set the admin address to zero + */ + error AdminCannotBeZero(); + + /** + * @notice Thrown when batch is empty + */ + error EmptyBatch(); + + /** + * @notice Thrown when attempting to remove an empty list of batch hashes + */ + error EmptyBatchHashes(); + + /** + * @notice Thrown when a source group has zero elements + * @param groupIndex Index of the empty group + */ + error EmptyGroup(uint256 groupIndex); + + /** + * @notice Thrown when batch size exceeds the limit + * @param size Actual batch size + * @param limit Maximum allowed batch size + */ + error BatchTooLarge(uint256 size, uint256 limit); + + /** + * @notice Thrown when the number of groups in a batch exceeds the limit + * @param groupsCount Actual number of groups + * @param limit Maximum allowed number of groups + */ + error TooManyGroups(uint256 groupsCount, uint256 limit); + + /** + * @notice Thrown when maxGroupsInBatch exceeds batchSize + * @param maxGroupsInBatch The max groups in batch value + * @param batchSizeLimit The batch size limit value + */ + error MaxGroupsExceedsBatchSize(uint256 maxGroupsInBatch, uint256 batchSizeLimit); + + /** + * @notice Thrown when attempting to add a batch that is already pending execution + * @param batchHash Hash of the batch that already exists in the pending queue + */ + error BatchAlreadyPending(bytes32 batchHash); + + /** + * @notice Thrown when batch is not found in storage + * @param batchHash Hash of the missing batch + */ + error BatchNotFound(bytes32 batchHash); + + /** + * @notice Thrown when source and target pubkeys are the same + * @param index Index of the invalid pair in the batch + */ + error SourceEqualsTarget(uint256 index); + + /** + * @notice Thrown when target pubkey length is invalid + * @param groupIndex Index of the group with invalid target pubkey + * @param length Actual pubkey length in bytes + */ + error InvalidTargetPubkeyLength(uint256 groupIndex, uint256 length); + + /** + * @notice Thrown when source pubkey length is invalid + * @param groupIndex Index of the group with invalid source pubkey + * @param sourceIndex Index of the source pubkey inside the group + * @param length Actual pubkey length in bytes + */ + error InvalidSourcePubkeyLength(uint256 groupIndex, uint256 sourceIndex, uint256 length); + + /** + * @notice Thrown when attempting to execute a batch before the execution delay has passed + * @param currentTime Current block timestamp + * @param executeAfter Earliest timestamp at which the batch can be executed + */ + error ExecutionDelayNotPassed(uint256 currentTime, uint256 executeAfter); + + /** + * @notice Emitted when the batch size limit is updated + * @param newLimit New batch size limit + */ + event BatchLimitUpdated(uint256 newLimit); + + /** + * @notice Emitted when the max groups in batch limit is updated + * @param newLimit New max groups in batch limit + */ + event MaxGroupsInBatchUpdated(uint256 newLimit); + + /** + * @notice Emitted when consolidation requests are added + * @param publisher Address of the publisher who added the requests + * @param batchData Encoded batch data (abi.encode(groups)) + */ + event RequestsAdded(address indexed publisher, bytes batchData); + + /** + * @notice Emitted when consolidation requests are executed + * @param batchHash Hash of the executed batch + * @param feePaid Amount of ETH paid for the execution + */ + event RequestsExecuted(bytes32 indexed batchHash, uint256 feePaid); + + /** + * @notice Emitted when batches are removed + * @param batchHashes Array of removed batch hashes + */ + event BatchesRemoved(bytes32[] batchHashes); + + /** + * @notice Emitted when the execution delay is updated + * @param newDelay New execution delay in seconds + */ + event ExecutionDelayUpdated(uint256 newDelay); + + bytes32 public constant MANAGE_ROLE = keccak256("MANAGE_ROLE"); + bytes32 public constant PUBLISH_ROLE = keccak256("PUBLISH_ROLE"); + bytes32 public constant REMOVE_ROLE = keccak256("REMOVE_ROLE"); + + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + struct BatchInfo { + address publisher; + uint64 addedAt; + } + + IConsolidationGateway internal immutable CONSOLIDATION_GATEWAY; + + uint256 internal _batchSize; + uint256 internal _maxGroupsInBatch; + uint256 internal _executionDelay; + mapping(bytes32 batchHash => BatchInfo info) internal _pendingBatches; + + constructor(address consolidationGateway) { + if (consolidationGateway == address(0)) revert ZeroArgument("consolidationGateway"); + + CONSOLIDATION_GATEWAY = IConsolidationGateway(consolidationGateway); + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param admin Lido DAO Aragon agent contract address. + /// @dev Proxy initialization method. + function initialize( + address admin, + uint256 initialBatchSize, + uint256 initialMaxGroupsInBatch, + uint256 initialExecutionDelay + ) external initializer { + if (admin == address(0)) revert AdminCannotBeZero(); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + _grantRole(MANAGE_ROLE, admin); + _grantRole(REMOVE_ROLE, admin); + + _setBatchSize(initialBatchSize); + _setMaxGroupsInBatch(initialMaxGroupsInBatch); + _setExecutionDelay(initialExecutionDelay); + } + + /** + * @notice Sets the maximum batch size limit + * @param limit New batch size limit + * @dev Reverts if caller does not have MANAGE_ROLE + */ + function setBatchSize(uint256 limit) external onlyRole(MANAGE_ROLE) { + _setBatchSize(limit); + } + + /** + * @notice Sets the maximum number of groups allowed in a batch + * @param limit New max groups in batch limit + * @dev Reverts if caller does not have MANAGE_ROLE + */ + function setMaxGroupsInBatch(uint256 limit) external onlyRole(MANAGE_ROLE) { + _setMaxGroupsInBatch(limit); + } + + /** + * @notice Sets the execution delay in seconds between adding and executing a batch + * @param delay New execution delay in seconds (0 means no delay) + * @dev Reverts if caller does not have MANAGE_ROLE + * @dev The execution delay is not snapshotted per batch + * Changes to this parameter apply retroactively to all pending batches + * MANAGE_ROLE holders are trusted + */ + function setExecutionDelay(uint256 delay) external onlyRole(MANAGE_ROLE) { + _setExecutionDelay(delay); + } + + /** + * @notice Removes batches from the queue + * @param batchHashes Array of batch hashes to remove + * @dev Reverts if caller does not have REMOVE_ROLE + * @dev Reverts if batchHashes is empty + * @dev Reverts if any batch is not found or already executed + */ + function removeBatches(bytes32[] calldata batchHashes) external onlyRole(REMOVE_ROLE) { + if (batchHashes.length == 0) revert EmptyBatchHashes(); + + for (uint256 i = 0; i < batchHashes.length; ++i) { + bytes32 batchHash = batchHashes[i]; + + if (_pendingBatches[batchHash].publisher == address(0)) revert BatchNotFound(batchHash); + + delete _pendingBatches[batchHash]; + } + emit BatchesRemoved(batchHashes); + } + + // ============== + // View methods + // ============== + + /** + * @notice Returns the current batch size limit + * @return Current maximum batch size + */ + function batchSize() external view returns (uint256) { + return _batchSize; + } + + /** + * @notice Returns the maximum number of groups allowed in a batch + * @return Current max groups in batch limit + */ + function maxGroupsInBatch() external view returns (uint256) { + return _maxGroupsInBatch; + } + + /** + * @notice Returns the current execution delay in seconds + * @return Current execution delay + */ + function executionDelay() external view returns (uint256) { + return _executionDelay; + } + + /** + * @notice Returns the address of the ConsolidationGateway + * @return Address of the ConsolidationGateway contract + */ + function getConsolidationGateway() external view returns (address) { + return address(CONSOLIDATION_GATEWAY); + } + + /** + * @notice Returns the batch info for a pending batch + * @param batchHash Hash of the batch to check + * @return Batch info struct with publisher address and addedAt timestamp (zero values if batch is not in queue) + */ + function getBatchInfo(bytes32 batchHash) external view returns (BatchInfo memory) { + return _pendingBatches[batchHash]; + } + + // =============== + // Publisher API + // =============== + + /** + * @notice Adds grouped consolidation requests to the queue + * @param groups Array of consolidation groups, where each group contains source pubkeys and a target pubkey + * @dev The same batch can be submitted again after it has been executed. + * @dev Reverts if: + * - Caller does not have PUBLISH_ROLE + * - Batch is empty + * - Any group is empty + * - Total batch size exceeds limit + * - Any source or target pubkey length is not 48 bytes + * - Any source pubkey equals its corresponding target pubkey + * - Batch already exists + */ + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external onlyRole(PUBLISH_ROLE) { + uint256 groupsCount = groups.length; + if (groupsCount == 0) revert EmptyBatch(); + + uint256 maxGroups = _maxGroupsInBatch; + if (groupsCount > maxGroups) revert TooManyGroups(groupsCount, maxGroups); + + uint256 totalCount = 0; + for (uint256 i = 0; i < groupsCount; ++i) { + uint256 groupSize = groups[i].sourcePubkeys.length; + if (groupSize == 0) revert EmptyGroup(i); + totalCount += groupSize; + } + + uint256 limit = _batchSize; + if (totalCount > limit) revert BatchTooLarge(totalCount, limit); + + for (uint256 i = 0; i < groupsCount; ++i) { + bytes calldata targetPubkey = groups[i].targetPubkey; + if (targetPubkey.length != PUBKEY_LENGTH) { + revert InvalidTargetPubkeyLength(i, targetPubkey.length); + } + + bytes32 targetHash = keccak256(targetPubkey); + bytes[] calldata group = groups[i].sourcePubkeys; + for (uint256 j = 0; j < group.length; ++j) { + bytes calldata sourcePubkey = group[j]; + if (sourcePubkey.length != PUBKEY_LENGTH) { + revert InvalidSourcePubkeyLength(i, j, sourcePubkey.length); + } + + if (keccak256(sourcePubkey) == targetHash) { + revert SourceEqualsTarget(i); + } + } + } + + bytes memory encodedBatch = abi.encode(groups); + + bytes32 batchHash = keccak256(encodedBatch); + + if (_pendingBatches[batchHash].publisher != address(0)) revert BatchAlreadyPending(batchHash); + + _pendingBatches[batchHash] = BatchInfo(msg.sender, uint64(block.timestamp)); + + emit RequestsAdded(msg.sender, encodedBatch); + } + + // ============== + // Executor API + // ============== + + /** + * @notice Executes a batch of grouped consolidation requests + * @param groups Array of consolidation witness groups, each containing source pubkeys and a target validator witness + * @dev Forwards the batch to ConsolidationGateway with msg.value as fee + * @dev Reverts if: + * - Batch was not added or was already executed/removed + */ + function executeConsolidation(IConsolidationGateway.ConsolidationWitnessGroup[] calldata groups) external payable { + // Reconstruct ConsolidationGroup[] to compute the batch hash that matches the publisher's submission + ConsolidationGroup[] memory publisherGroups = new ConsolidationGroup[](groups.length); + for (uint256 i = 0; i < groups.length; ++i) { + publisherGroups[i] = ConsolidationGroup({ + sourcePubkeys: groups[i].sourcePubkeys, + targetPubkey: groups[i].targetWitness.pubkey + }); + } + + bytes32 batchHash = keccak256(abi.encode(publisherGroups)); + + BatchInfo memory batch = _pendingBatches[batchHash]; + if (batch.publisher == address(0)) revert BatchNotFound(batchHash); + + uint256 executeAfter = uint256(batch.addedAt) + _executionDelay; + if (block.timestamp < executeAfter) revert ExecutionDelayNotPassed(block.timestamp, executeAfter); + + delete _pendingBatches[batchHash]; + + CONSOLIDATION_GATEWAY.addConsolidationRequests{value: msg.value}(groups, msg.sender); + + emit RequestsExecuted(batchHash, msg.value); + } + + // ================== + // Internal methods + // ================== + + function _setBatchSize(uint256 limit) internal { + if (limit == 0) revert ZeroArgument("batchSizeLimit"); + uint256 maxGroups = _maxGroupsInBatch; + if (maxGroups > limit) revert MaxGroupsExceedsBatchSize(maxGroups, limit); + _batchSize = limit; + emit BatchLimitUpdated(limit); + } + + function _setMaxGroupsInBatch(uint256 limit) internal { + if (limit == 0) revert ZeroArgument("maxGroupsInBatchLimit"); + uint256 currentBatchSize = _batchSize; + if (limit > currentBatchSize) revert MaxGroupsExceedsBatchSize(limit, currentBatchSize); + _maxGroupsInBatch = limit; + emit MaxGroupsInBatchUpdated(limit); + } + + function _setExecutionDelay(uint256 delay) internal { + _executionDelay = delay; + emit ExecutionDelayUpdated(delay); + } +} diff --git a/contracts/0.8.25/consolidation/ConsolidationGateway.sol b/contracts/0.8.25/consolidation/ConsolidationGateway.sol new file mode 100644 index 0000000000..0a5829cced --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationGateway.sol @@ -0,0 +1,379 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {LimitData, RateLimitStorage, RateLimit} from "contracts/common/lib/RateLimit.sol"; +import {PausableUntil} from "contracts/common/utils/PausableUntil.sol"; +import {AccessControlEnumerable} from "@openzeppelin/contracts-v5.2/access/extensions/AccessControlEnumerable.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {CLProofVerifier} from "contracts/0.8.25/vaults/predeposit_guarantee/CLProofVerifier.sol"; +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IDepositSecurityModule { + function isDepositsPaused() external view returns (bool); +} + +interface ILido { + function canDeposit() external view returns (bool); +} + +interface IWithdrawalVault { + function addConsolidationRequests(bytes[] calldata sourcePubkeys, bytes[] calldata targetPubkeys) external payable; + + function getConsolidationRequestFee() external view returns (uint256); +} + +/** + * @title ConsolidationGateway + * @notice ConsolidationGateway contract is one entrypoint for all consolidation requests in protocol. + * This contract is responsible for limiting consolidation requests, checking ADD_CONSOLIDATION_REQUEST_ROLE role before it gets to Withdrawal Vault. + */ +contract ConsolidationGateway is AccessControlEnumerable, PausableUntil, CLProofVerifier { + using RateLimitStorage for bytes32; + using RateLimit for LimitData; + + /** + * @notice Thrown when an invalid zero value is passed + * @param name Name of the argument that was zero + */ + error ZeroArgument(string name); + + /** + * @notice Thrown when attempting to set the admin address to zero + */ + error AdminCannotBeZero(); + + /** + * @notice Thrown when a consolidation fee insufficient + * @param feeRequired Amount of fee required to cover consolidation request + * @param passedValue Amount of fee sent to cover consolidation request + */ + error InsufficientFee(uint256 feeRequired, uint256 passedValue); + + /** + * @notice Thrown when a consolidation fee refund failed + */ + error FeeRefundFailed(); + + /** + * @notice Thrown when remaining consolidation requests limit is not enough to cover sender requests + * @param requestsCount Amount of requests that were sent for processing + * @param remainingLimit Amount of requests that still can be processed at current day + */ + error ConsolidationRequestsLimitExceeded(uint256 requestsCount, uint256 remainingLimit); + + /** + * @notice Thrown when a source group has zero elements + * @param groupIndex Index of the empty group + */ + error EmptyGroup(uint256 groupIndex); + + /** + * @notice Thrown when DSM deposits are paused + */ + error DSMDepositsPaused(); + + /** + * @notice Thrown when Lido deposits are paused (Lido stopped or bunker mode) + */ + error LidoDepositsPaused(); + + /** + * @notice Emitted when limits configs are set. + * @param maxConsolidationRequestsLimit The maximum number of consolidation requests. + * @param consolidationsPerFrame The number of consolidations that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + */ + event ConsolidationRequestsLimitSet( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ); + + /// @notice role that allows to pause the contract + bytes32 public constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + + /// @notice role that allows to resume the contract + bytes32 public constant RESUME_ROLE = keccak256("RESUME_ROLE"); + + bytes32 public constant ADD_CONSOLIDATION_REQUEST_ROLE = keccak256("ADD_CONSOLIDATION_REQUEST_ROLE"); + bytes32 public constant EXIT_LIMIT_MANAGER_ROLE = keccak256("EXIT_LIMIT_MANAGER_ROLE"); + + bytes32 public constant CONSOLIDATION_LIMIT_POSITION = + keccak256("lido.ConsolidationGateway.maxConsolidationRequestLimit"); + + uint256 internal constant COMPOUNDING_PREFIX = uint256(0x02) << 248; + + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + ILidoLocator internal immutable LOCATOR; + + /// @dev Ensures the contract's ETH balance is unchanged. + modifier preservesEthBalance() { + uint256 balanceBeforeCall = address(this).balance - msg.value; + _; + assert(address(this).balance == balanceBeforeCall); + } + + constructor( + address admin, + address lidoLocator, + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot + ) CLProofVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) { + if (admin == address(0)) revert AdminCannotBeZero(); + if (lidoLocator == address(0)) revert ZeroArgument("lidoLocator"); + LOCATOR = ILidoLocator(lidoLocator); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + _setConsolidationRequestLimit(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + /** + * @notice Resume the contract + * @dev Reverts if contracts is not paused + * @dev Reverts if sender has no `RESUME_ROLE` + */ + function resume() external onlyRole(RESUME_ROLE) { + _resume(); + } + + /** + * @notice Pause the contract for a specified period + * @param _duration pause duration in seconds (use `PAUSE_INFINITELY` for unlimited) + * @dev Reverts if contract is already paused + * @dev Reverts if sender has no `PAUSE_ROLE` + * @dev Reverts if zero duration is passed + */ + function pauseFor(uint256 _duration) external onlyRole(PAUSE_ROLE) { + _pauseFor(_duration); + } + + /** + * @notice Pause the contract until a specified timestamp + * @param _pauseUntilInclusive the last second to pause until inclusive + * @dev Reverts if the timestamp is in the past + * @dev Reverts if sender has no `PAUSE_ROLE` + * @dev Reverts if contract is already paused + */ + function pauseUntil(uint256 _pauseUntilInclusive) external onlyRole(PAUSE_ROLE) { + _pauseUntil(_pauseUntilInclusive); + } + + /** + * @dev Submits grouped Consolidation Requests to the Withdrawal Vault. + * Each group represents multiple source validators consolidating into a single target. + * @param groups An array of consolidation groups, where each group contains source public keys + * and a target validator witness with a CL proof of withdrawal credentials. + * @param refundRecipient The address that will receive any excess ETH sent for fees. + * + * @notice Reverts if: + * - The caller does not have the `ADD_CONSOLIDATION_REQUEST_ROLE` + * - The total fee value sent is insufficient to cover all provided consolidation requests. + * - There is not enough limit quota left in the current frame to process all requests. + */ + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable onlyRole(ADD_CONSOLIDATION_REQUEST_ROLE) preservesEthBalance whenResumed { + if (msg.value == 0) revert ZeroArgument("msg.value"); + uint256 groupsCount = groups.length; + if (groupsCount == 0) revert ZeroArgument("groups"); + + // Count total individual requests across all groups + uint256 requestsCount = 0; + for (uint256 i = 0; i < groupsCount; ++i) { + uint256 groupSize = groups[i].sourcePubkeys.length; + if (groupSize == 0) revert EmptyGroup(i); + requestsCount += groupSize; + } + + _checkConsolidationPreconditions(); + + (IWithdrawalVault withdrawalVault, bytes32 withdrawalCredentials) = _getWithdrawalVaultData(); + + for (uint256 i = 0; i < groupsCount; ++i) { + _validatePubKeyWCProof(groups[i].targetWitness, withdrawalCredentials); + } + + _consumeConsolidationRequestLimit(requestsCount); + + uint256 fee = withdrawalVault.getConsolidationRequestFee(); + uint256 totalFee = requestsCount * fee; + uint256 refund = _checkFee(totalFee); + + // Expand grouped requests into flat pairs for WithdrawalVault + (bytes[] memory sourcePubkeys, bytes[] memory targetPubkeys) = _prepareConsolidationPairs( + groups, + requestsCount + ); + withdrawalVault.addConsolidationRequests{value: totalFee}(sourcePubkeys, targetPubkeys); + + _refundFee(refund, refundRecipient); + } + + /** + * @notice Sets the maximum request limit and the frame during which a portion of the limit can be restored. + * @param maxConsolidationRequestsLimit The maximum number of consolidation requests. + * @param consolidationsPerFrame The number of consolidations that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + */ + function setConsolidationRequestLimit( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ) external onlyRole(EXIT_LIMIT_MANAGER_ROLE) { + _setConsolidationRequestLimit(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + /** + * @notice Returns information about current limits data + * @return maxConsolidationRequestsLimit Maximum consolidation requests limit + * @return consolidationsPerFrame The number of consolidations that can be restored per frame. + * @return frameDurationInSec The duration of each frame, in seconds, after which `consolidationsPerFrame` consolidations can be restored. + * @return prevConsolidationRequestsLimit Limit left after previous requests + * @return currentConsolidationRequestsLimit Current consolidation requests limit + */ + function getConsolidationRequestLimitFullInfo() + external + view + returns ( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec, + uint256 prevConsolidationRequestsLimit, + uint256 currentConsolidationRequestsLimit + ) + { + LimitData memory limitData = CONSOLIDATION_LIMIT_POSITION.getStorageLimit(); + maxConsolidationRequestsLimit = limitData.maxLimit; + consolidationsPerFrame = limitData.itemsPerFrame; + frameDurationInSec = limitData.frameDurationInSec; + prevConsolidationRequestsLimit = limitData.prevLimit; + + currentConsolidationRequestsLimit = limitData.isLimitSet() + ? limitData.calculateCurrentLimit(_getTimestamp()) + : type(uint256).max; + } + + /// Internal functions + + function _checkConsolidationPreconditions() internal view { + // If DSM paused deposits, some validators may not belong to Lido + // and can therefore have non-Lido withdrawal credentials. + // To avoid accepting consolidations into such validators, new consolidation requests are blocked. + // This acts as an additional safety check on top of validator proof verification. + if (IDepositSecurityModule(LOCATOR.depositSecurityModule()).isDepositsPaused()) { + revert DSMDepositsPaused(); + } + + // If Lido stopped or bunker mode is active, new consolidation requests must also be blocked. + if (!ILido(LOCATOR.lido()).canDeposit()) { + revert LidoDepositsPaused(); + } + } + + function _checkFee(uint256 fee) internal view returns (uint256 refund) { + if (msg.value < fee) { + revert InsufficientFee(fee, msg.value); + } + unchecked { + refund = msg.value - fee; + } + } + + function _refundFee(uint256 refund, address recipient) internal { + if (refund > 0) { + // If the refund recipient is not set, use the sender as the refund recipient + if (recipient == address(0)) { + recipient = msg.sender; + } + + (bool success, ) = recipient.call{value: refund}(""); + if (!success) { + revert FeeRefundFailed(); + } + } + } + + function _getTimestamp() internal view virtual returns (uint256) { + return block.timestamp; // solhint-disable-line not-rely-on-time + } + + function _setConsolidationRequestLimit( + uint256 maxConsolidationRequestsLimit, + uint256 consolidationsPerFrame, + uint256 frameDurationInSec + ) internal { + uint256 timestamp = _getTimestamp(); + + CONSOLIDATION_LIMIT_POSITION.setStorageLimit( + CONSOLIDATION_LIMIT_POSITION.getStorageLimit().setLimits( + maxConsolidationRequestsLimit, + consolidationsPerFrame, + frameDurationInSec, + timestamp + ) + ); + + emit ConsolidationRequestsLimitSet(maxConsolidationRequestsLimit, consolidationsPerFrame, frameDurationInSec); + } + + function _consumeConsolidationRequestLimit(uint256 requestsCount) internal { + LimitData memory limitData = CONSOLIDATION_LIMIT_POSITION.getStorageLimit(); + if (!limitData.isLimitSet()) { + return; + } + + uint256 limit = limitData.calculateCurrentLimit(_getTimestamp()); + + if (limit < requestsCount) { + revert ConsolidationRequestsLimitExceeded(requestsCount, limit); + } + + CONSOLIDATION_LIMIT_POSITION.setStorageLimit(limitData.updatePrevLimit(limit - requestsCount, _getTimestamp())); + } + + /// Flattens grouped source pubkeys and repeats each group's target pubkey. + function _prepareConsolidationPairs( + ConsolidationWitnessGroup[] calldata groups, + uint256 totalCount + ) internal pure returns (bytes[] memory sourcePubkeys, bytes[] memory targetPubkeys) { + sourcePubkeys = new bytes[](totalCount); + targetPubkeys = new bytes[](totalCount); + + uint256 idx = 0; + for (uint256 i = 0; i < groups.length; ++i) { + bytes[] calldata group = groups[i].sourcePubkeys; + bytes calldata target = groups[i].targetWitness.pubkey; + for (uint256 j = 0; j < group.length; ++j) { + sourcePubkeys[idx] = group[j]; + targetPubkeys[idx] = target; + ++idx; + } + } + } + + /// Returns the withdrawal vault and its 0x02 withdrawal credentials. + function _getWithdrawalVaultData() + internal + view + returns (IWithdrawalVault withdrawalVault, bytes32 withdrawalCredentials) + { + address vaultAddress = LOCATOR.withdrawalVault(); + withdrawalVault = IWithdrawalVault(vaultAddress); + + // withdrawalCredentials = 0x02 || 11 zero bytes || 20-byte vault address + withdrawalCredentials = bytes32(COMPOUNDING_PREFIX | uint160(vaultAddress)); + } +} diff --git a/contracts/0.8.25/consolidation/ConsolidationMigrator.sol b/contracts/0.8.25/consolidation/ConsolidationMigrator.sol new file mode 100644 index 0000000000..674b0c5ada --- /dev/null +++ b/contracts/0.8.25/consolidation/ConsolidationMigrator.sol @@ -0,0 +1,419 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; +import { + AccessControlEnumerableUpgradeable +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; + +/** + * @dev Minimal interface for StakingRouter to get module addresses + */ +interface IStakingRouter { + struct StakingModule { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory); +} + +/** + * @dev Unified interface for staking modules (NOR, SDVT, CMv1, CMv2) + * It also works for legacy staking modules (NOR, SDVT) where `getSigningKeys` returns different + * tuple `(bytes memory pubkeys, bytes memory signatures, bool[] memory used)`. + * The trick: `abi.decode(returndata, (bytes))` will decode only the first tuple element. + * This is safe as long as the first returned value really is `bytes pubkeys` in that position. + */ +interface IUnifiedStakingModule { + function getSigningKeys( + uint256 nodeOperatorId, + uint256 startIndex, + uint256 keysCount + ) external view returns (bytes memory); + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ); +} + +/** + * @dev Interface for ConsolidationBus to submit consolidation requests + */ +interface IConsolidationBus { + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external; +} + +/** + * @title ConsolidationMigrator + * @notice Validates and submits consolidation requests from source module to target module. + * + * The workflow: + * 1. Allows the consolidation manager to submit consolidation requests for operator pairs + * 2. Consolidation manager submit consolidation batches + * 3. Contract validates keys and forwards to ConsolidationBus + */ +contract ConsolidationMigrator is AccessControlEnumerableUpgradeable { + using EnumerableSet for EnumerableSet.UintSet; + + // ========== + // Errors + // ========== + + error ZeroArgument(string name); + error AdminCannotBeZero(); + error PairNotInAllowlist(uint256 sourceOperatorId, uint256 targetOperatorId); + error KeyNotDeposited(uint256 moduleId, uint256 operatorId, uint256 keyIndex); + error NotAuthorized(address caller, uint256 sourceOperatorId, uint256 targetOperatorId); + + // ========== + // Events + // ========== + + event ConsolidationPairAllowed( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + address indexed submitter + ); + event ConsolidationPairDisallowed( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + address indexed submitter + ); + event ConsolidationSubmitted( + uint256 indexed sourceOperatorId, + uint256 indexed targetOperatorId, + ConsolidationIndexGroup[] groups + ); + + // ========== + // Structs + // ========== + + struct ConsolidationIndexGroup { + uint256[] sourceKeyIndices; + uint256 targetKeyIndex; + } + + // ========== + // Roles + // ========== + + bytes32 public constant ALLOW_PAIR_ROLE = keccak256("ALLOW_PAIR_ROLE"); + bytes32 public constant DISALLOW_PAIR_ROLE = keccak256("DISALLOW_PAIR_ROLE"); + + // ========== + // Immutables + // ========== + + uint256 public constant PUBKEY_LENGTH = 48; + + IStakingRouter internal immutable STAKING_ROUTER; + IConsolidationBus internal immutable CONSOLIDATION_BUS; + uint256 internal immutable SOURCE_MODULE_ID; + uint256 internal immutable TARGET_MODULE_ID; + + // ========== + // Storage + // ========== + + /// @dev mapping(sourceOperatorId => set of allowed targetOperatorIds) + mapping(uint256 => EnumerableSet.UintSet) internal _allowedPairs; + + /// @dev mapping(sourceOperatorId => mapping(targetOperatorId => submitter address)) + mapping(uint256 => mapping(uint256 => address)) internal _submitters; + + // ========== + // Constructor + // ========== + + constructor(address stakingRouter, address consolidationBus, uint256 _sourceModuleId, uint256 _targetModuleId) { + if (stakingRouter == address(0)) revert ZeroArgument("stakingRouter"); + if (consolidationBus == address(0)) revert ZeroArgument("consolidationBus"); + if (_sourceModuleId == 0) revert ZeroArgument("sourceModuleId"); + if (_targetModuleId == 0) revert ZeroArgument("targetModuleId"); + + STAKING_ROUTER = IStakingRouter(stakingRouter); + CONSOLIDATION_BUS = IConsolidationBus(consolidationBus); + SOURCE_MODULE_ID = _sourceModuleId; + TARGET_MODULE_ID = _targetModuleId; + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param admin Lido DAO Aragon agent contract address. + /// @dev Proxy initialization method. + function initialize(address admin) external initializer { + if (admin == address(0)) revert AdminCannotBeZero(); + + _grantRole(DEFAULT_ADMIN_ROLE, admin); + } + + // ====================== + // Allowlist Management + // ====================== + + /** + * @notice Allows a consolidation pair (source operator -> target operator) with a designated submitter + * @param sourceOperatorId ID of the source operator in source module + * @param targetOperatorId ID of the target operator in target module + * @param submitter Address authorized to submit consolidation batches for this pair + * @dev Can be called multiple times to update the submitter for an existing pair + * @dev Reverts if caller does not have ALLOW_PAIR_ROLE or if submitter is zero address + */ + function allowPair( + uint256 sourceOperatorId, + uint256 targetOperatorId, + address submitter + ) external onlyRole(ALLOW_PAIR_ROLE) { + if (submitter == address(0)) revert ZeroArgument("submitter"); + + _allowedPairs[sourceOperatorId].add(targetOperatorId); + _submitters[sourceOperatorId][targetOperatorId] = submitter; + + emit ConsolidationPairAllowed(sourceOperatorId, targetOperatorId, submitter); + } + + /** + * @notice Disallows a consolidation pair and removes the submitter + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @dev Reverts if caller does not have DISALLOW_PAIR_ROLE + */ + function disallowPair(uint256 sourceOperatorId, uint256 targetOperatorId) external onlyRole(DISALLOW_PAIR_ROLE) { + bool removed = _allowedPairs[sourceOperatorId].remove(targetOperatorId); + if (!removed) revert PairNotInAllowlist(sourceOperatorId, targetOperatorId); + + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + delete _submitters[sourceOperatorId][targetOperatorId]; + + emit ConsolidationPairDisallowed(sourceOperatorId, targetOperatorId, submitter); + } + + /** + * @notice Allows a submitter to disallow their own pair (permissionless) + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @dev Caller must be the designated submitter for the pair + * @dev Reverts if caller is not the submitter + */ + function selfDisallowPair(uint256 sourceOperatorId, uint256 targetOperatorId) external { + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + if (msg.sender != submitter) { + revert NotAuthorized(msg.sender, sourceOperatorId, targetOperatorId); + } + + _allowedPairs[sourceOperatorId].remove(targetOperatorId); + delete _submitters[sourceOperatorId][targetOperatorId]; + + emit ConsolidationPairDisallowed(sourceOperatorId, targetOperatorId, msg.sender); + } + + // ============== + // View Methods + // ============== + + /** + * @notice Checks if a consolidation pair is allowed + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @return True if the pair is allowed + */ + function isPairAllowed(uint256 sourceOperatorId, uint256 targetOperatorId) external view returns (bool) { + return _allowedPairs[sourceOperatorId].contains(targetOperatorId); + } + + /** + * @notice Returns all allowed target operators for a given source operator + * @param sourceOperatorId ID of the source operator + * @return Array of allowed target operator IDs + */ + function getAllowedTargets(uint256 sourceOperatorId) external view returns (uint256[] memory) { + return _allowedPairs[sourceOperatorId].values(); + } + + /** + * @notice Returns the submitter address for a consolidation pair + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @return Address authorized to submit consolidation batches, or address(0) if pair not allowed + */ + function getSubmitter(uint256 sourceOperatorId, uint256 targetOperatorId) external view returns (address) { + return _submitters[sourceOperatorId][targetOperatorId]; + } + + /** + * @notice Returns the StakingRouter address + * @return Address of the StakingRouter + */ + function getStakingRouter() external view returns (address) { + return address(STAKING_ROUTER); + } + + /** + * @notice Returns the ConsolidationBus address + * @return Address of the ConsolidationBus + */ + function getConsolidationBus() external view returns (address) { + return address(CONSOLIDATION_BUS); + } + + /** + * @notice Returns the source module ID this migrator is bound to + * @return Source module ID + */ + function sourceModuleId() external view returns (uint256) { + return SOURCE_MODULE_ID; + } + + /** + * @notice Returns the target module ID this migrator is bound to + * @return Target module ID + */ + function targetModuleId() external view returns (uint256) { + return TARGET_MODULE_ID; + } + + // ============ + // Submit + // ============ + + /** + * @notice Submits a consolidation batch after validation + * @param sourceOperatorId ID of the source operator + * @param targetOperatorId ID of the target operator + * @param groups Array of consolidation index groups, each containing source key indices and a target key index + * @dev Caller must be the designated submitter for this pair (set via allowPair) + * @dev Forwards the validated batch to ConsolidationBus + */ + function submitConsolidationBatch( + uint256 sourceOperatorId, + uint256 targetOperatorId, + ConsolidationIndexGroup[] calldata groups + ) external { + // Check authorization: caller must be the designated submitter for this pair + address submitter = _submitters[sourceOperatorId][targetOperatorId]; + if (msg.sender != submitter) { + revert NotAuthorized(msg.sender, sourceOperatorId, targetOperatorId); + } + + // Validate the batch and get pubkeys + IConsolidationBus.ConsolidationGroup[] memory pubkeyGroups = _getValidatedConsolidationPubkeys( + sourceOperatorId, + targetOperatorId, + groups + ); + + CONSOLIDATION_BUS.addConsolidationRequests(pubkeyGroups); + + emit ConsolidationSubmitted(sourceOperatorId, targetOperatorId, groups); + } + + // ================== + // Internal Methods + // ================== + + /** + * @dev Validates consolidation key sets and returns corresponding pubkeys. + * Ensures all referenced keys are deposited. + */ + function _getValidatedConsolidationPubkeys( + uint256 sourceOperatorId, + uint256 targetOperatorId, + ConsolidationIndexGroup[] calldata groups + ) internal view returns (IConsolidationBus.ConsolidationGroup[] memory pubkeyGroups) { + uint256 groupsCount = groups.length; + + pubkeyGroups = new IConsolidationBus.ConsolidationGroup[](groupsCount); + for (uint256 i = 0; i < groupsCount; ++i) { + pubkeyGroups[i].sourcePubkeys = _validateAndExtractSourceKeys(sourceOperatorId, groups[i].sourceKeyIndices); + pubkeyGroups[i].targetPubkey = _validateAndExtractTargetKey(targetOperatorId, groups[i].targetKeyIndex); + } + } + + function _validateAndExtractSourceKeys( + uint256 operatorId, + uint256[] calldata keyIndices + ) internal view returns (bytes[] memory pubkeys) { + IUnifiedStakingModule module = _getModule(SOURCE_MODULE_ID); + + uint256 totalDeposited = _getDepositedValidatorsCount(module, operatorId); + + uint256 count = keyIndices.length; + pubkeys = new bytes[](count); + + for (uint256 i = 0; i < count; ++i) { + uint256 keyIndex = keyIndices[i]; + + if (keyIndex >= totalDeposited) { + revert KeyNotDeposited(SOURCE_MODULE_ID, operatorId, keyIndex); + } + + bytes memory key = module.getSigningKeys(operatorId, keyIndex, 1); + assert(key.length == PUBKEY_LENGTH); // Should always be 48 bytes for a single key + pubkeys[i] = key; + } + } + + function _validateAndExtractTargetKey( + uint256 operatorId, + uint256 keyIndex + ) internal view returns (bytes memory pubkey) { + IUnifiedStakingModule module = _getModule(TARGET_MODULE_ID); + + uint256 totalDeposited = _getDepositedValidatorsCount(module, operatorId); + + if (keyIndex >= totalDeposited) { + revert KeyNotDeposited(TARGET_MODULE_ID, operatorId, keyIndex); + } + + bytes memory key = module.getSigningKeys(operatorId, keyIndex, 1); + assert(key.length == PUBKEY_LENGTH); // Should always be 48 bytes for a single key + pubkey = key; + } + + function _getModule(uint256 moduleId) internal view returns (IUnifiedStakingModule) { + IStakingRouter.StakingModule memory sm = STAKING_ROUTER.getStakingModule(moduleId); + return IUnifiedStakingModule(sm.stakingModuleAddress); + } + + function _getDepositedValidatorsCount( + IUnifiedStakingModule module, + uint256 operatorId + ) internal view returns (uint256 totalDeposited) { + (, , , , , , totalDeposited, ) = module.getNodeOperatorSummary(operatorId); + } +} diff --git a/contracts/0.8.25/lib/BeaconChainDepositor.sol b/contracts/0.8.25/lib/BeaconChainDepositor.sol new file mode 100644 index 0000000000..81bebb835b --- /dev/null +++ b/contracts/0.8.25/lib/BeaconChainDepositor.sol @@ -0,0 +1,150 @@ +// SPDX-FileCopyrightText: 2023 Lido +// SPDX-License-Identifier: GPL-3.0 + +// See contracts/COMPILERS.md +pragma solidity 0.8.25; + +import {MemUtils} from "contracts/common/lib/MemUtils.sol"; + +interface IDepositContract { + function get_deposit_root() external view returns (bytes32 rootHash); + + function deposit( + bytes calldata pubkey, // 48 bytes + bytes calldata withdrawal_credentials, // 32 bytes + bytes calldata signature, // 96 bytes + bytes32 deposit_data_root + ) external payable; +} + +library BeaconChainDepositor { + uint256 internal constant PUBLIC_KEY_LENGTH = 48; + uint256 internal constant SIGNATURE_LENGTH = 96; + + uint256 internal constant DEPOSIT_SIZE = 32 ether; + uint64 internal constant DEPOSIT_SIZE_IN_GWEI = 32 ether / 1 gwei; + + /// @dev Minimum deposit amount required by the Ethereum Deposit Contract + uint256 internal constant MIN_DEPOSIT = 1 ether; + + /// @dev Invokes a deposit call to the official Beacon Deposit contract + /// @param _depositContract - IDepositContract deposit contract + /// @param _keysCount amount of keys to deposit + /// @param _withdrawalCredentials Commitment to a public key for withdrawals + /// @param _publicKeysBatch A BLS12-381 public keys batch + /// @param _signaturesBatch A BLS12-381 signatures batch + function makeBeaconChainDeposits32ETH( + IDepositContract _depositContract, + uint256 _keysCount, + bytes memory _withdrawalCredentials, + bytes memory _publicKeysBatch, + bytes memory _signaturesBatch + ) public { + if (_publicKeysBatch.length != PUBLIC_KEY_LENGTH * _keysCount) { + revert InvalidPublicKeysBatchLength(_publicKeysBatch.length, PUBLIC_KEY_LENGTH * _keysCount); + } + if (_signaturesBatch.length != SIGNATURE_LENGTH * _keysCount) { + revert InvalidSignaturesBatchLength(_signaturesBatch.length, SIGNATURE_LENGTH * _keysCount); + } + + bytes memory publicKey = MemUtils.unsafeAllocateBytes(PUBLIC_KEY_LENGTH); + bytes memory signature = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH); + + for (uint256 i; i < _keysCount; ++i) { + MemUtils.copyBytes(_publicKeysBatch, publicKey, i * PUBLIC_KEY_LENGTH, 0, PUBLIC_KEY_LENGTH); + MemUtils.copyBytes(_signaturesBatch, signature, i * SIGNATURE_LENGTH, 0, SIGNATURE_LENGTH); + + _depositContract.deposit{value: DEPOSIT_SIZE}( + publicKey, + _withdrawalCredentials, + signature, + _computeDepositDataRootWithAmount(_withdrawalCredentials, publicKey, signature, DEPOSIT_SIZE_IN_GWEI) + ); + } + } + + function makeBeaconChainTopUp( + IDepositContract _depositContract, + bytes memory _withdrawalCredentials, + bytes[] memory _publicKeys, + uint256[] memory _amount + ) public { + uint256 len = _publicKeys.length; + if (len == 0) return; + if (len != _amount.length) revert ArrayLengthMismatch(); + + bytes memory dummySignature = new bytes(SIGNATURE_LENGTH); + + for (uint256 i; i < len; ++i) { + bytes memory pk = _publicKeys[i]; + + if (pk.length != PUBLIC_KEY_LENGTH) { + revert InvalidPublicKeysBatchLength(pk.length, PUBLIC_KEY_LENGTH); + } + + uint256 amount = _amount[i]; + + // obtainDepositData can return 0 amount for some keys + if (amount == 0) continue; + + // Amounts below minimum deposit (1 ETH) would fail at deposit contract + if (amount < MIN_DEPOSIT) { + revert DepositAmountTooLow(); + } + + uint256 amountGwei = amount / 1 gwei; + if (amountGwei > type(uint64).max) { + revert AmountTooLarge(); + } + uint64 amountGwei64 = uint64(amountGwei); + + // full DepositData root with custom amount + bytes32 depositDataRoot = + _computeDepositDataRootWithAmount(_withdrawalCredentials, pk, dummySignature, amountGwei64); + + _depositContract.deposit{value: amount}(pk, _withdrawalCredentials, dummySignature, depositDataRoot); + } + } + + function _computeDepositDataRootWithAmount( + bytes memory _withdrawalCredentials, + bytes memory _publicKey, + bytes memory _signature, + uint64 _amountGwei + ) private pure returns (bytes32) { + bytes32 publicKeyRoot = sha256(abi.encodePacked(_publicKey, bytes16(0))); + bytes32 signatureRoot = _computeSignatureRoot(_signature); + bytes8 amountLE = _toLittleEndian64(_amountGwei); + + return sha256( + abi.encodePacked( + sha256(abi.encodePacked(publicKeyRoot, _withdrawalCredentials)), + sha256(abi.encodePacked(amountLE, bytes24(0), signatureRoot)) + ) + ); + } + + function _computeSignatureRoot(bytes memory _signature) private pure returns (bytes32) { + bytes memory sigPart1 = MemUtils.unsafeAllocateBytes(64); + bytes memory sigPart2 = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH - 64); + + MemUtils.copyBytes(_signature, sigPart1, 0, 0, 64); + MemUtils.copyBytes(_signature, sigPart2, 64, 0, SIGNATURE_LENGTH - 64); + + return + sha256(abi.encodePacked(sha256(abi.encodePacked(sigPart1)), sha256(abi.encodePacked(sigPart2, bytes32(0))))); + } + + function _toLittleEndian64(uint64 value) private pure returns (bytes8 ret) { + ret = bytes8(0); + for (uint256 i = 0; i < 8; ++i) { + ret |= bytes8(bytes1(uint8(value >> (8 * i)))) >> (8 * i); + } + } + + error InvalidPublicKeysBatchLength(uint256 actual, uint256 expected); + error InvalidSignaturesBatchLength(uint256 actual, uint256 expected); + error ArrayLengthMismatch(); + error AmountTooLarge(); + error DepositAmountTooLow(); +} diff --git a/contracts/0.8.25/sr/ISRBase.sol b/contracts/0.8.25/sr/ISRBase.sol new file mode 100644 index 0000000000..5bb8e07b55 --- /dev/null +++ b/contracts/0.8.25/sr/ISRBase.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {StakingModuleStatus} from "./SRTypes.sol"; + +/** + * @title StakingRouter base interface, defines events and errors + * @author KRogLA + */ + +interface ISRBase { + /** + * Events + */ + event StakingModuleAdded(uint256 indexed stakingModuleId, address stakingModule, string name, address createdBy); + event StakingModuleShareLimitSet( + uint256 indexed stakingModuleId, uint256 stakeShareLimit, uint256 priorityExitShareThreshold, address setBy + ); + event StakingModuleFeesSet( + uint256 indexed stakingModuleId, uint256 stakingModuleFee, uint256 treasuryFee, address setBy + ); + event StakingModuleMaxDepositsPerBlockSet( + uint256 indexed stakingModuleId, uint256 maxDepositsPerBlock, address setBy + ); + event StakingModuleMinDepositBlockDistanceSet( + uint256 indexed stakingModuleId, uint256 minDepositBlockDistance, address setBy + ); + event StakingModuleStatusSet(uint256 indexed stakingModuleId, StakingModuleStatus status, address setBy); + + event WithdrawalCredentialsSet(bytes32 withdrawalCredentials, address setBy); + + event StakingRouterETHDeposited(uint256 indexed stakingModuleId, uint256 amount); + event DepositableEthReceived(uint256 amount); + + event ExitedAndStuckValidatorsCountsUpdateFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event RewardsMintedReportFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event StakingModuleExitedValidatorsIncompleteReporting( + uint256 indexed stakingModuleId, uint256 unreportedExitedValidatorsCount + ); + event WithdrawalsCredentialsChangeFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); + event StakingModuleExitNotificationFailed( + uint256 indexed stakingModuleId, uint256 indexed nodeOperatorId, bytes _publicKey + ); + + /** + * Errors + */ + + // Validation + error InvalidAmountGwei(); + error NotAuthorized(); + error ZeroAddress(); + error ZeroArgument(); + error ArraysLengthMismatch(); + error OracleExtraDataNotSubmitted(); + + // Oracle report + error InvalidReportData(uint256 code); + error ReportedExitedValidatorsExceedDeposited( + uint256 reportedExitedValidatorsCount, uint256 depositedValidatorsCount + ); + error UnexpectedCurrentValidatorsCount( + uint256 currentModuleExitedValidatorsCount, uint256 currentNodeOpExitedValidatorsCount + ); + error UnexpectedFinalExitedValidatorsCount( + uint256 newModuleTotalExitedValidatorsCount, uint256 newModuleTotalExitedValidatorsCountInStakingRouter + ); + error UnrecoverableModuleError(); + error ExitedValidatorsCountCannotDecrease(); + + // Deposits + error DirectETHTransfer(); + error ModuleReturnExceedTarget(); + error StakingModuleStatusTheSame(); + error EmptyKeysList(); + error WrongPubkeyLength(); + error AmountNotAlignedToGwei(); + error AllocationExceedsLimit(); + error ZeroDeposits(); + + // Staking module + error StakingModuleAddressExists(); + error StakingModulesLimitExceeded(); + error StakingModuleWrongName(); + error StakingModuleUnregistered(); + error StakingModuleNotActive(); + error WrongWithdrawalCredentialsType(); + error InvalidPriorityExitShareThreshold(); + error InvalidMinDepositBlockDistance(); + error InvalidMaxDepositPerBlockValue(); + error InvalidStakeShareLimit(); + error InvalidFeeSum(); + error InconsistentFeeSum(); + error UnexpectedModuleId(uint256 expectedId, uint256 actualId); +} diff --git a/contracts/0.8.25/sr/SRLib.sol b/contracts/0.8.25/sr/SRLib.sol new file mode 100644 index 0000000000..b022e53990 --- /dev/null +++ b/contracts/0.8.25/sr/SRLib.sol @@ -0,0 +1,932 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {Math, SafeCast} from "@openzeppelin/contracts-v5.2/utils/math/Math.sol"; +import {StorageSlot} from "@openzeppelin/contracts-v5.2/utils/StorageSlot.sol"; +import {MinFirstAllocationStrategy} from "contracts/common/lib/MinFirstAllocationStrategy.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {SRStorage} from "./SRStorage.sol"; +import {SRUtils} from "./SRUtils.sol"; +import { + ModuleState, + StakingModuleConfig, + StakingModuleStatus, + StakingModule, + ModuleStateConfig, + ModuleStateDeposits, + ModuleStateAccounting, + ValidatorExitData, + ValidatorsCountsCorrection, + RouterStateAccounting +} from "./SRTypes.sol"; +import {ISRBase} from "./ISRBase.sol"; + +/** + * @title StakingRouter helper external library + * @author KRogLA + */ + +library SRLib { + using StorageSlot for bytes32; + using WithdrawalCredentials for bytes32; + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + + /// @dev Protocol-level constants, built once per tx from immutables + /// @dev Due to SRLib is external library, we can't access immutable variables here, so we pass them as parameters + struct Config { + uint256 maxEBType1; + uint256 maxEBType2; + } + + struct ModuleParamsCache { + uint256 depositableCount; + uint256 activeCount; + uint16 shareLimit; + StakingModuleStatus status; + uint8 wcType; + } + + /// @notice One-time migration from old storage layout to new RouterState struct. + /// @dev Storage slot positions are computed inline for migration-only use. + /// After migration, this function can be removed. + function _migrateStorage(uint256 maxEBType1) public { + // skip migration if data already exists + if (SRStorage.getModulesCount() > 0) { + return; + } + + // Old storage slot positions (computed inline for migration-only use) + bytes32 LIDO_POS = keccak256("lido.StakingRouter.lido"); + bytes32 WITHDRAWAL_CREDENTIALS_POS = keccak256("lido.StakingRouter.withdrawalCredentials"); + bytes32 STAKING_MODULES_COUNT_POS = keccak256("lido.StakingRouter.stakingModulesCount"); + bytes32 LAST_STAKING_MODULE_ID_POS = keccak256("lido.StakingRouter.lastStakingModuleId"); + bytes32 CONTRACT_VERSION_POS = keccak256("lido.Versioned.contractVersion"); + bytes32 STAKING_MODULES_MAPPING_POS = keccak256("lido.StakingRouter.stakingModules"); + bytes32 STAKING_MODULE_INDICES_POS = keccak256("lido.StakingRouter.stakingModuleIndicesOneBased"); + + // cleanup old storage slots + delete LIDO_POS.getBytes32Slot().value; + delete CONTRACT_VERSION_POS.getBytes32Slot().value; + + // migrate last staking module ID + SRStorage.getRouterState().lastModuleId = uint24(LAST_STAKING_MODULE_ID_POS.getUint256Slot().value); + delete LAST_STAKING_MODULE_ID_POS.getBytes32Slot().value; + + // migrate WC + SRStorage.getRouterState().withdrawalCredentials = WITHDRAWAL_CREDENTIALS_POS.getBytes32Slot().value; + delete WITHDRAWAL_CREDENTIALS_POS.getBytes32Slot().value; + + uint256 modulesCount = STAKING_MODULES_COUNT_POS.getUint256Slot().value; + delete STAKING_MODULES_COUNT_POS.getBytes32Slot().value; + + // get old storage ref. for staking modules mapping + mapping(uint256 => StakingModule) storage oldStakingModules = + _getStorageStakingModulesMapping(STAKING_MODULES_MAPPING_POS); + // get old storage ref. for staking modules indices mapping + mapping(uint256 => uint256) storage oldStakingModuleIndices = + _getStorageStakingIndicesMapping(STAKING_MODULE_INDICES_POS); + + uint64 totalValidatorsBalanceGwei; + StakingModule memory smOld; + + for (uint256 i; i < modulesCount; ++i) { + smOld = oldStakingModules[i]; + + uint256 _moduleId = smOld.id; + // push module ID to EnumerableSet + SRStorage.addModuleId(_moduleId); + + ModuleState storage moduleState = _moduleId.getModuleState(); + + // 1 SSTORE + moduleState.name = smOld.name; + + // 1 SSTORE + moduleState.config = ModuleStateConfig({ + moduleAddress: smOld.stakingModuleAddress, + moduleFee: smOld.stakingModuleFee, + treasuryFee: smOld.treasuryFee, + stakeShareLimit: smOld.stakeShareLimit, + priorityExitShareThreshold: smOld.priorityExitShareThreshold, + status: StakingModuleStatus(smOld.status), + withdrawalCredentialsType: WithdrawalCredentials.WC_TYPE_01 + }); + + // 1 SSTORE + moduleState.deposits = ModuleStateDeposits({ + lastDepositAt: smOld.lastDepositAt, + lastDepositBlock: SafeCast.toUint64(smOld.lastDepositBlock), + maxDepositsPerBlock: smOld.maxDepositsPerBlock, + minDepositBlockDistance: smOld.minDepositBlockDistance + }); + + /// @dev calculate module effective balance at the migration moment + (uint256 exitedValidatorsCount, uint256 depositedValidatorsCount,) = + _getStakingModuleSummary(IStakingModule(smOld.stakingModuleAddress)); + // The module might not receive all exited validators data yet => we need to replacing + // the exitedValidatorsCount with the one that the staking router is aware of. + uint256 activeCount = + depositedValidatorsCount - Math.max(smOld.exitedValidatorsCount, exitedValidatorsCount); + uint64 validatorsBalanceGwei = SRUtils._toGwei(activeCount * maxEBType1); + + // 1 SSTORE + moduleState.accounting = ModuleStateAccounting({ + validatorsBalanceGwei: validatorsBalanceGwei, + exitedValidatorsCount: SafeCast.toUint64(smOld.exitedValidatorsCount) + }); + + totalValidatorsBalanceGwei += validatorsBalanceGwei; + + // cleanup old storage for staking module data + delete oldStakingModules[i]; + delete oldStakingModuleIndices[_moduleId]; + } + + // cleanup old mapping storage slots + delete STAKING_MODULES_MAPPING_POS.getBytes32Slot().value; + delete STAKING_MODULE_INDICES_POS.getBytes32Slot().value; + + /// @dev use the same value for both CL balance and active balance at migration moment, + /// next Oracle report will update the both values + SRStorage.getRouterState().accounting = + RouterStateAccounting({validatorsBalanceGwei: totalValidatorsBalanceGwei}); + } + + /// @dev Helper for migration - returns old staking modules mapping storage reference + function _getStorageStakingModulesMapping(bytes32 _position) + internal + pure + returns (mapping(uint256 => StakingModule) storage $) + { + assembly ("memory-safe") { + $.slot := _position + } + } + + /// @dev Helper for migration - returns old staking module indices mapping storage reference + function _getStorageStakingIndicesMapping(bytes32 _position) + internal + pure + returns (mapping(uint256 => uint256) storage $) + { + assembly ("memory-safe") { + $.slot := _position + } + } + + /// @notice Registers a new staking module. + /// @param _moduleAddress Address of staking module. + /// @param _moduleName Name of staking module. + /// @param _moduleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function _addModule(address _moduleAddress, string calldata _moduleName, StakingModuleConfig calldata _moduleConfig) + public + returns (uint256 newModuleId) + { + SRUtils._requireNotZero(_moduleAddress); + + if (bytes(_moduleName).length == 0 || bytes(_moduleName).length > SRUtils.MAX_STAKING_MODULE_NAME_LENGTH) { + revert ISRBase.StakingModuleWrongName(); + } + if (SRStorage.getModulesCount() >= SRUtils.MAX_STAKING_MODULES_COUNT) { + revert ISRBase.StakingModulesLimitExceeded(); + } + + SRUtils._requireWCTypeValid(_moduleConfig.withdrawalCredentialsType); + + // Check for duplicate module address + /// @dev due to small number of modules, we can afford to do this check on add + uint256 modulesCount = SRStorage.getModulesCount(); + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (_moduleAddress == moduleId.getModuleState().config.moduleAddress) { + revert ISRBase.StakingModuleAddressExists(); + } + } + + newModuleId = SRStorage.getRouterState().lastModuleId + 1; + // push new module ID to EnumerableSet + SRStorage.addModuleId(newModuleId); + + ModuleState storage moduleState = newModuleId.getModuleState(); + moduleState.config.moduleAddress = _moduleAddress; + moduleState.config.status = StakingModuleStatus.Active; + moduleState.config.withdrawalCredentialsType = uint8(_moduleConfig.withdrawalCredentialsType); + moduleState.name = _moduleName; + + emit ISRBase.StakingModuleAdded(newModuleId, _moduleAddress, _moduleName, msg.sender); + + _updateModuleParams( + newModuleId, + _moduleConfig.stakeShareLimit, + _moduleConfig.priorityExitShareThreshold, + _moduleConfig.stakingModuleFee, + _moduleConfig.treasuryFee, + _moduleConfig.maxDepositsPerBlock, + _moduleConfig.minDepositBlockDistance + ); + + // save last module ID + SRStorage.getRouterState().lastModuleId = uint24(newModuleId); + return newModuleId; + } + + /// @notice Validates share-related parameters. + /// @param _stakeShareLimit Stake share limit to validate (in basis points). + /// @param _priorityExitShareThreshold Priority exit share threshold to validate (in basis points). + function _validateShareParams(uint256 _stakeShareLimit, uint256 _priorityExitShareThreshold) private pure { + if (_stakeShareLimit > SRUtils.TOTAL_BASIS_POINTS) { + revert ISRBase.InvalidStakeShareLimit(); + } + if (_priorityExitShareThreshold > SRUtils.TOTAL_BASIS_POINTS) { + revert ISRBase.InvalidPriorityExitShareThreshold(); + } + if (_stakeShareLimit > _priorityExitShareThreshold) revert ISRBase.InvalidPriorityExitShareThreshold(); + } + + function _updateModuleParams( + uint256 _moduleId, + uint256 _stakeShareLimit, + uint256 _priorityExitShareThreshold, + uint256 _moduleFee, + uint256 _treasuryFee, + uint256 _maxDepositsPerBlock, + uint256 _minDepositBlockDistance + ) public { + _validateShareParams(_stakeShareLimit, _priorityExitShareThreshold); + if (_moduleFee + _treasuryFee > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + _requireConsistentFeeSum(_moduleId, _moduleFee, _treasuryFee); + if (_minDepositBlockDistance == 0 || _minDepositBlockDistance > type(uint64).max) { + revert ISRBase.InvalidMinDepositBlockDistance(); + } + if (_maxDepositsPerBlock > type(uint64).max) revert ISRBase.InvalidMaxDepositPerBlockValue(); + + // 1 SLOAD + ModuleStateConfig memory stateConfig = _moduleId.getModuleState().config; + // forge-lint: disable-start(unsafe-typecast) + stateConfig.moduleFee = uint16(_moduleFee); + stateConfig.treasuryFee = uint16(_treasuryFee); + stateConfig.stakeShareLimit = uint16(_stakeShareLimit); + stateConfig.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); + // 1 SSTORE + _moduleId.getModuleState().config = stateConfig; + + // 1 SLOAD + ModuleStateDeposits memory stateDeposits = _moduleId.getModuleState().deposits; + stateDeposits.maxDepositsPerBlock = SafeCast.toUint64(_maxDepositsPerBlock); + stateDeposits.minDepositBlockDistance = SafeCast.toUint64(_minDepositBlockDistance); + // forge-lint: disable-end(unsafe-typecast) + // 1 SSTORE + _moduleId.getModuleState().deposits = stateDeposits; + + address setBy = msg.sender; + emit ISRBase.StakingModuleShareLimitSet(_moduleId, _stakeShareLimit, _priorityExitShareThreshold, setBy); + emit ISRBase.StakingModuleFeesSet(_moduleId, _moduleFee, _treasuryFee, setBy); + emit ISRBase.StakingModuleMaxDepositsPerBlockSet(_moduleId, _maxDepositsPerBlock, setBy); + emit ISRBase.StakingModuleMinDepositBlockDistanceSet(_moduleId, _minDepositBlockDistance, setBy); + } + + function _requireConsistentFeeSum(uint256 _moduleId, uint256 _moduleFee, uint256 _treasuryFee) internal view { + uint256 feeSum = _moduleFee + _treasuryFee; + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (moduleId == _moduleId) continue; + + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + if (uint256(stateConfig.moduleFee) + uint256(stateConfig.treasuryFee) != feeSum) { + revert ISRBase.InconsistentFeeSum(); + } + } + } + + function _updateAllModuleFees(uint256[] calldata _moduleFees, uint256[] calldata _treasuryFees) public { + uint256 modulesCount = SRStorage.getModulesCount(); + if (_moduleFees.length != modulesCount || _treasuryFees.length != modulesCount) { + revert ISRBase.ArraysLengthMismatch(); + } + if (modulesCount == 0) { + return; + } + + uint256 expectedFeeSum = _moduleFees[0] + _treasuryFees[0]; + if (expectedFeeSum > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + + for (uint256 i = 1; i < modulesCount; ++i) { + uint256 feeSum = _moduleFees[i] + _treasuryFees[i]; + if (feeSum > SRUtils.TOTAL_BASIS_POINTS) revert ISRBase.InvalidFeeSum(); + if (feeSum != expectedFeeSum) revert ISRBase.InconsistentFeeSum(); + } + + address setBy = msg.sender; + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + // forge-lint: disable-start(unsafe-typecast) + stateConfig.moduleFee = uint16(_moduleFees[i]); + stateConfig.treasuryFee = uint16(_treasuryFees[i]); + // forge-lint: disable-end(unsafe-typecast) + moduleId.getModuleState().config = stateConfig; + emit ISRBase.StakingModuleFeesSet(moduleId, _moduleFees[i], _treasuryFees[i], setBy); + } + } + + /// @notice Updates only the share-related params of a staking module. + /// @param _moduleId Id of the staking module. + /// @param _stakeShareLimit New stake share limit (in basis points). + /// @param _priorityExitShareThreshold New priority exit share threshold (in basis points). + function _updateModuleShares(uint256 _moduleId, uint256 _stakeShareLimit, uint256 _priorityExitShareThreshold) + public + { + _validateShareParams(_stakeShareLimit, _priorityExitShareThreshold); + + // 1 SLOAD + ModuleStateConfig memory stateConfig = _moduleId.getModuleState().config; + + // forge-lint: disable-start(unsafe-typecast) + stateConfig.stakeShareLimit = uint16(_stakeShareLimit); + stateConfig.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); + // forge-lint: disable-end(unsafe-typecast) + + // 1 SSTORE + _moduleId.getModuleState().config = stateConfig; + + emit ISRBase.StakingModuleShareLimitSet(_moduleId, _stakeShareLimit, _priorityExitShareThreshold, msg.sender); + } + + /// @dev module state helpers + + function _setModuleStatus(uint256 _moduleId, StakingModuleStatus _status) public returns (bool isChanged) { + ModuleStateConfig storage stateConfig = _moduleId.getModuleState().config; + isChanged = stateConfig.status != _status; + if (isChanged) { + stateConfig.status = _status; + emit ISRBase.StakingModuleStatusSet(_moduleId, _status, msg.sender); + } + } + + /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. + function _getStakingModuleSummary(IStakingModule module) + internal + view + returns (uint256 exitedValidators, uint256 depositedValidators, uint256 depositableValidators) + { + return module.getStakingModuleSummary(); + } + + /// @notice Deposit allocation for modules + /// @dev Allocates deposits to staking modules based on their stake share limits and available capacity. + /// The allocation algorithm prioritizes modules with lower validator (WC 0x01 equivalent) counts (MinFirst strategy). + /// @dev Method uses conversion from/to Ether amounts due to MinFirstAllocationStrategy working with unit values. + /// @param _cfg - protocol-level constants + /// @param _allocateAmount - Eth amount that should be allocated into modules + /// @param _isTopUp - flag indicating whether the allocation is for top-up deposits + /// @return totalAllocated - amount actually allocated + /// @return allocated - Array of newly allocated amounts for each module + /// @return newAllocations - Array of new allocation amounts for each module + function _getDepositAllocations(Config calldata _cfg, uint256 _allocateAmount, bool _isTopUp) + public + view + returns (uint256 totalAllocated, uint256[] memory allocated, uint256[] memory newAllocations) + { + uint256 modulesCount = SRStorage.getModulesCount(); + if (modulesCount == 0) { + return (0, new uint256[](0), new uint256[](0)); + } + + // put calldata var to stack + uint256 initialDeposit = _cfg.maxEBType1; + // convert to validators equivalent + uint256 depositsToAllocate = _allocateAmount / initialDeposit; + // get current allocations and capacities in validators equivalent + uint256[] memory capacities; + // @dev using output parameter as temporary storage for current allocations + (allocated, capacities) = _getModulesAllocationAndCapacity(_cfg, depositsToAllocate, _isTopUp); + + // If no deposits to allocate, return current state + if (depositsToAllocate > 0) { + // Use MinFirstAllocationStrategy to allocate deposits + /// @dev due to library is external, the `allocated` array is not mutated + (totalAllocated, newAllocations) = + MinFirstAllocationStrategy.allocate(allocated, capacities, depositsToAllocate); + // Convert allocated validators and allocations per module back to Ether amounts + totalAllocated *= initialDeposit; + for (uint256 i = 0; i < modulesCount; ++i) { + // get allocation delta only: new - current + allocated[i] = (newAllocations[i] - allocated[i]) * initialDeposit; + newAllocations[i] *= initialDeposit; + } + } else { + newAllocations = new uint256[](modulesCount); + // Convert allocations per module back to Ether amounts + for (uint256 i = 0; i < modulesCount; ++i) { + newAllocations[i] = allocated[i] * initialDeposit; + allocated[i] = 0; + } + } + } + + /// @notice calculate allocation amount for single module + function _getModuleDepositAllocation( + Config calldata _cfg, + uint256 _moduleId, + uint256 _allocateAmount, + bool _isTopUp + ) public view returns (uint256 allocation) { + (, uint256[] memory allocated,) = _getDepositAllocations(_cfg, _allocateAmount, _isTopUp); + uint256 moduleIdx = SRUtils._getModuleIndexById(_moduleId); + allocation = allocated[moduleIdx]; + } + + /** + * @notice calculate allocation amounts for all modules + * @dev If `_isTopUp` is `true`, allocation is performed for top-up deposits targeting + * WC type `0x02` validators. In this case, `_cfg.maxEBType2` used + * to correctly calculate the module's capacity. + * + * @dev The Allocation logic must preserve the same priority between modules + * regardless of the allocation type or amount (initial seed deposits or top-ups). + * + * For seed deposits this is straightforward. Both regular modules (0x01) + * and modules with keys 0x02 use the same depositableValidatorsCount metric, + * so the allocation priority is naturally consistent. + * + * Top-up allocation is less obvious and requires additional considerations. + * + * Important facts: + * + * 1. Top-ups are only possible for modules with keys type 0x02. + * 2. The total top-up amount is limited by the unused capacity of already active keys. + * 3. The method call with the flag `isTopUp = true` is used only when calculating + * top-up allocations. In other words, the values returned for modules 0x01 + * are ignored by the caller. + * + * Since allocation uses the MinFirstAllocationStrategy, we must not exclude + * modules 0x01 from the selection during top-up calculations (for example, + * by setting their capacity to zero). If we did, the algorithm would attempt + * to distribute the entire available amount only across modules 0x02. + * + * This would incorrectly increase the priority of deposits into modules 0x02 + * relative to modules 0x01. + * + * Therefore the following approach is used: + * + * - For modules 0x01 we keep the same capacity as for regular seed deposits. + * Formally, these modules cannot receive top-ups, but they must remain + * visible to the allocation strategy to preserve priority ordering. + * + * - For modules 0x02 the capacity is set only to the remaining unused capacity + * of already active keys. + * + * At first glance this may appear to prioritize deposits into modules 0x01. + * However, taking fact #3 into account, the returned allocations for modules + * 0x01 are never used. They are only an artifact of the MinFirstAllocationStrategy. + * + * This design preserves the correct global priority between modules while + * still allowing the system to fully utilize the available top-up capacity + * of modules with keys type 0x02. + */ + function _getModulesAllocationAndCapacity(Config calldata _cfg, uint256 depositsToAllocate, bool _isTopUp) + internal + view + returns (uint256[] memory _allocations, uint256[] memory _capacities) + { + uint256 modulesCount = SRStorage.getModulesCount(); + _allocations = new uint256[](modulesCount); + + ModuleParamsCache[] memory cache = new ModuleParamsCache[](modulesCount); + ModuleState storage moduleState; + ModuleStateConfig memory stateConfig; + + uint256 totalValidators; + uint256 maxEBType1 = _cfg.maxEBType1; + for (uint256 i = 0; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + moduleState = moduleId.getModuleState(); + stateConfig = moduleState.config; + // caching config + cache[i].shareLimit = stateConfig.stakeShareLimit; + cache[i].status = stateConfig.status; + cache[i].wcType = stateConfig.withdrawalCredentialsType; + (uint256 exitedValidatorsCount, uint256 depositedValidatorsCount, uint256 depositableValidatorsCount) = + _getStakingModuleSummary(moduleId.getIStakingModule()); + cache[i].depositableCount = depositableValidatorsCount; + + // get active validators count + uint256 validatorsCount = depositedValidatorsCount + - Math.max(exitedValidatorsCount, moduleState.accounting.exitedValidatorsCount); + + // save to cache + cache[i].activeCount = validatorsCount; + + if (WithdrawalCredentials.isType2(stateConfig.withdrawalCredentialsType)) { + // Calculate equivalent of WC01 validators count rounded up: ceil(balance / maxEBType1) + validatorsCount = Math.ceilDiv(moduleId.getIStakingModuleV2().getTotalModuleStake(), maxEBType1); + } + _allocations[i] = validatorsCount; + totalValidators += validatorsCount; + } + // new total validators count after allocation + totalValidators += depositsToAllocate; + _capacities = new uint256[](modulesCount); + + // put calldata msxEBType2 to stack + uint256 maxEBType2 = _cfg.maxEBType2; + + for (uint256 i = 0; i < modulesCount; ++i) { + // module initial capacity = current allocation + uint256 validatorsCapacity = _allocations[i]; + if (cache[i].status == StakingModuleStatus.Active) { + if (_isTopUp && WithdrawalCredentials.isType2(cache[i].wcType)) { + // max eth capacity of active validators = n * maxEB, + // so capacity in validators equivalent = n * maxEBType2 / msxEBType1 + validatorsCapacity = cache[i].activeCount * maxEBType2 / maxEBType1; + } else { + validatorsCapacity = _allocations[i] + cache[i].depositableCount; + } + // Calculate target validators for each module based on stake share limits + // Target validators = (stakeShareLimit * totalValidators) / TOTAL_BASIS_POINTS + uint256 targetValidators = (cache[i].shareLimit * totalValidators) / SRUtils.TOTAL_BASIS_POINTS; + // Module capacity is limited by available validators and target share + validatorsCapacity = Math.min(targetValidators, validatorsCapacity); + } + + _capacities[i] = validatorsCapacity; + } + } + + /// @notice Handles tracking and penalization logic for a node operator who failed to exit their validator within the defined exit window. + /// @dev This function is called to report the current exit-related status of a validator belonging to a specific node operator. + /// It accepts a validator's public key, associated with the duration (in seconds) it was eligible to exit but has not exited. + /// This data could be used to trigger penalties for the node operator if the validator has been non-exiting for too long. + /// @param _stakingModuleId The ID of the staking module. + /// @param _nodeOperatorId The ID of the node operator whose validator status is being delivered. + /// @param _proofSlotTimestamp The timestamp (slot time) when the validator was last known to be in an active ongoing state. + /// @param _publicKey The public key of the validator being reported. + /// @param _eligibleToExitInSec The duration (in seconds) indicating how long the validator has been eligible to exit after request but has not exited. + function _reportValidatorExitDelay( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _stakingModuleId.getIStakingModule() + .reportValidatorExitDelay(_nodeOperatorId, _proofSlotTimestamp, _publicKey, _eligibleToExitInSec); + } + + /// @notice Handles the triggerable exit event for a set of validators. + /// @dev This function is called when validators are exited using triggerable exit requests on the Execution Layer. + /// @param validatorExitData An array of `ValidatorExitData` structs, each representing a validator + /// for which a triggerable exit was requested. Each entry includes: + /// - `stakingModuleId`: ID of the staking module. + /// - `nodeOperatorId`: ID of the node operator. + /// - `pubkey`: Validator public key, 48 bytes length. + /// @param _withdrawalRequestPaidFee Fee amount paid to send a withdrawal request on the Execution Layer (EL). + /// @param _exitType The type of exit being performed. + /// This parameter may be interpreted differently across various staking modules depending on their specific implementation. + function _onValidatorExitTriggered( + ValidatorExitData[] calldata validatorExitData, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) public { + ValidatorExitData calldata data; + for (uint256 i = 0; i < validatorExitData.length; ++i) { + data = validatorExitData[i]; + SRUtils._requireModuleIdExists(data.stakingModuleId); + try data.stakingModuleId.getIStakingModule() + .onValidatorExitTriggered(data.nodeOperatorId, data.pubkey, _withdrawalRequestPaidFee, _exitType) {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onValidatorExitTriggered() + /// reverts because of the "out of gas" error. Here we assume that the + /// onValidatorExitTriggered() method doesn't have reverts with + /// empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.StakingModuleExitNotificationFailed(data.stakingModuleId, data.nodeOperatorId, data.pubkey); + } + } + } + + /// @notice Reports the minted rewards to the staking modules with the specified ids. + /// @param _stakingModuleIds Ids of the staking modules. + /// @param _totalShares Total shares minted for the staking modules. + /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. + function _reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) public { + uint256 n = _stakingModuleIds.length; + if (_totalShares.length != n) revert ISRBase.ArraysLengthMismatch(); + + for (uint256 i = 0; i < n; ++i) { + if (_totalShares[i] == 0) continue; + SRUtils._requireModuleIdExists(_stakingModuleIds[i]); + + try _stakingModuleIds[i].getIStakingModule().onRewardsMinted(_totalShares[i]) {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onRewardsMinted() reverts because of the + /// "out of gas" error. Here we assume that the onRewardsMinted() method doesn't + /// have reverts with empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.RewardsMintedReportFailed(_stakingModuleIds[i], lowLevelRevertData); + } + } + } + + /// @notice Finalizes the reporting of the exited validators counts for the current + /// reporting frame. + /// + /// @dev Called by the oracle when the second phase of data reporting finishes, i.e. when the + /// oracle submitted the complete data on the exited validator counts per node operator + /// for the current reporting frame. See the docs for `updateExitedValidatorsCountByStakingModule` + /// for the description of the overall update process. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function _onValidatorsCountsByNodeOperatorReportingFinished() public { + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + ModuleState storage state = moduleId.getModuleState(); + IStakingModule stakingModule = state.getIStakingModule(); + + (uint256 exitedValidatorsCount,,) = _getStakingModuleSummary(stakingModule); + if (exitedValidatorsCount != state.accounting.exitedValidatorsCount) continue; + + // oracle finished updating exited validators for all node ops + try stakingModule.onExitedAndStuckValidatorsCountsUpdated() {} + catch (bytes memory lowLevelRevertData) { + /// @dev This check is required to prevent incorrect gas estimation of the method. + /// Without it, Ethereum nodes that use binary search for gas estimation may + /// return an invalid value when the onExitedAndStuckValidatorsCountsUpdated() + /// reverts because of the "out of gas" error. Here we assume that the + /// onExitedAndStuckValidatorsCountsUpdated() method doesn't have reverts with + /// empty error data except "out of gas". + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + emit ISRBase.ExitedAndStuckValidatorsCountsUpdateFailed(moduleId, lowLevelRevertData); + } + } + } + + /// @notice Decreases vetted signing keys counts per node operator for the staking module with + /// the specified id. + /// @param _stakingModuleId The id of the staking module to be updated. + /// @param _nodeOperatorIds Ids of the node operators to be updated. + /// @param _vettedSigningKeysCounts New counts of vetted signing keys for the specified node operators. + /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. + function _decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _checkOperatorsReportData(_nodeOperatorIds, _vettedSigningKeysCounts); + _stakingModuleId.getIStakingModule().decreaseVettedSigningKeysCount(_nodeOperatorIds, _vettedSigningKeysCounts); + } + + /// @notice Updates exited validators counts per node operator for the staking module with + /// the specified id. See the docs for `updateExitedValidatorsCountByStakingModule` for the + /// description of the overall update process. + /// + /// @param _stakingModuleId The id of the staking modules to be updated. + /// @param _nodeOperatorIds Ids of the node operators to be updated. + /// @param _exitedValidatorsCounts New counts of exited validators for the specified node operators. + function _reportStakingModuleOperatorExitedValidators( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _exitedValidatorsCounts + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + _checkOperatorsReportData(_nodeOperatorIds, _exitedValidatorsCounts); + _stakingModuleId.getIStakingModule().updateExitedValidatorsCount(_nodeOperatorIds, _exitedValidatorsCounts); + } + + /// @notice Updates total numbers of exited validators for staking modules with the specified module ids. + /// @param _stakingModuleIds Ids of the staking modules to be updated. + /// @param _exitedValidatorsCounts New counts of exited validators for the specified staking modules. + /// @return The total increase in the aggregate number of exited validators across all updated modules. + /// + /// @dev The total numbers are stored in the staking router and can differ from the totals obtained by calling + /// `IStakingModule.getStakingModuleSummary()`. The overall process of updating validator counts is the following: + /// + /// 1. In the first data submission phase, the oracle calls `updateExitedValidatorsCountByStakingModule` on the + /// staking router, passing the totals by module. The staking router stores these totals and uses them to + /// distribute new stake and staking fees between the modules. There can only be single call of this function + /// per oracle reporting frame. + /// + /// 2. In the second part of the second data submission phase, the oracle calls + /// `StakingRouter.reportStakingModuleExitedValidatorsCountByNodeOperator` on the staking router which passes + /// the counts by node operator to the staking module by calling `IStakingModule.updateExitedValidatorsCount`. + /// This can be done multiple times for the same module, passing data for different subsets of node + /// operators. + /// + /// 3. At the end of the second data submission phase, it's expected for the aggregate exited validators count + /// across all module's node operators (stored in the module) to match the total count for this module + /// (stored in the staking router). However, it might happen that the second phase of data submission doesn't + /// finish until the new oracle reporting frame is started, in which case staking router will emit ISRBase.a warning + /// event `StakingModuleExitedValidatorsIncompleteReporting` when the first data submission phase is performed + /// for a new reporting frame. This condition will result in the staking module having an incomplete data about + /// the exited validator counts during the whole reporting frame. Handling this condition is + /// the responsibility of each staking module. + /// + /// 4. When the second reporting phase is finished, i.e. when the oracle submitted the complete data on the exited + /// validator counts per node operator for the current reporting frame, the oracle calls + /// `StakingRouter.onValidatorsCountsByNodeOperatorReportingFinished` which, in turn, calls + /// `IStakingModule.onExitedAndStuckValidatorsCountsUpdated` on all modules. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function _updateExitedValidatorsCountByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _exitedValidatorsCounts + ) public returns (uint256) { + uint256 n = _stakingModuleIds.length; + if (_exitedValidatorsCounts.length != n) revert ISRBase.ArraysLengthMismatch(); + + uint256 newlyExitedValidatorsCount; + + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + SRUtils._requireModuleIdExists(moduleId); + ModuleState storage state = moduleId.getModuleState(); + ModuleStateAccounting storage moduleAcc = state.accounting; + uint64 prevReportedExitedValidatorsCount = moduleAcc.exitedValidatorsCount; + + uint64 newReportedExitedValidatorsCount = SafeCast.toUint64(_exitedValidatorsCounts[i]); + + if (newReportedExitedValidatorsCount < prevReportedExitedValidatorsCount) { + revert ISRBase.ExitedValidatorsCountCannotDecrease(); + } + + (uint256 totalExitedValidators, uint256 totalDepositedValidators,) = + _getStakingModuleSummary(state.getIStakingModule()); + + if (newReportedExitedValidatorsCount > totalDepositedValidators) { + revert ISRBase.ReportedExitedValidatorsExceedDeposited( + newReportedExitedValidatorsCount, totalDepositedValidators + ); + } + + newlyExitedValidatorsCount += newReportedExitedValidatorsCount - prevReportedExitedValidatorsCount; + + if (totalExitedValidators < prevReportedExitedValidatorsCount) { + // not all of the exited validators were async reported to the module + unchecked { + emit ISRBase.StakingModuleExitedValidatorsIncompleteReporting( + moduleId, prevReportedExitedValidatorsCount - totalExitedValidators + ); + } + } + + // save new value + moduleAcc.exitedValidatorsCount = newReportedExitedValidatorsCount; + } + + return newlyExitedValidatorsCount; + } + + /// @notice Sets exited validators count for the given module and given node operator in that module + /// without performing critical safety checks, e.g. that exited validators count cannot decrease. + /// + /// Should only be used by the DAO in extreme cases and with sufficient precautions to correct invalid + /// data reported by the oracle committee due to a bug in the oracle daemon. + /// + /// @param _stakingModuleId Id of the staking module. + /// @param _nodeOperatorId Id of the node operator. + /// @param _triggerUpdateFinish Whether to call `onExitedAndStuckValidatorsCountsUpdated` on the module + /// after applying the corrections. + /// @param _correction See the docs for the `ValidatorsCountsCorrection` struct. + /// + /// @dev Reverts if the current numbers of exited validators of the module and node operator + /// don't match the supplied expected current values. + /// + /// @dev The function is restricted to the `UNSAFE_SET_EXITED_VALIDATORS_ROLE` role. + function _unsafeSetExitedValidatorsCount( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + bool _triggerUpdateFinish, + ValidatorsCountsCorrection calldata _correction + ) public { + SRUtils._requireModuleIdExists(_stakingModuleId); + ModuleState storage state = _stakingModuleId.getModuleState(); + ModuleStateAccounting storage moduleAcc = state.accounting; + uint64 prevReportedExitedValidatorsCount = moduleAcc.exitedValidatorsCount; + IStakingModule stakingModule = state.getIStakingModule(); + + (,,,,, uint256 totalExitedValidators,,) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); + + if ( + _correction.currentModuleExitedValidatorsCount != prevReportedExitedValidatorsCount + || _correction.currentNodeOperatorExitedValidatorsCount != totalExitedValidators + ) { + revert ISRBase.UnexpectedCurrentValidatorsCount(prevReportedExitedValidatorsCount, totalExitedValidators); + } + + moduleAcc.exitedValidatorsCount = SafeCast.toUint64(_correction.newModuleExitedValidatorsCount); + + stakingModule.unsafeUpdateValidatorsCount(_nodeOperatorId, _correction.newNodeOperatorExitedValidatorsCount); + + (uint256 moduleTotalExitedValidators, uint256 moduleTotalDepositedValidators,) = + _getStakingModuleSummary(stakingModule); + + if (_correction.newModuleExitedValidatorsCount > moduleTotalDepositedValidators) { + revert ISRBase.ReportedExitedValidatorsExceedDeposited( + _correction.newModuleExitedValidatorsCount, moduleTotalDepositedValidators + ); + } + + if (_triggerUpdateFinish) { + if (moduleTotalExitedValidators != _correction.newModuleExitedValidatorsCount) { + revert ISRBase.UnexpectedFinalExitedValidatorsCount( + moduleTotalExitedValidators, _correction.newModuleExitedValidatorsCount + ); + } + + stakingModule.onExitedAndStuckValidatorsCountsUpdated(); + } + } + + /// @dev report MUST include all modules in the same order as they are registered in the SR + function _validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) public view { + uint256 n = SRStorage.getModulesCount(); + + if (_stakingModuleIds.length != n || _validatorBalancesGwei.length != n) { + revert ISRBase.ArraysLengthMismatch(); + } + + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + if (moduleId != _stakingModuleIds[i]) revert ISRBase.UnexpectedModuleId(moduleId, _stakingModuleIds[i]); + + SRUtils._ensureAmountGwei(_validatorBalancesGwei[i]); + } + } + + /// @dev report MUST include all modules in the same order as they are registered in the SR + function _reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) public { + _validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + + uint256 n = _stakingModuleIds.length; + uint64 totalValidatorsBalanceGwei; + for (uint256 i = 0; i < n; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + ModuleStateAccounting storage moduleAcc = moduleId.getModuleState().accounting; + uint64 validatorsBalanceGwei = uint64(_validatorBalancesGwei[i]); + + moduleAcc.validatorsBalanceGwei = validatorsBalanceGwei; + + totalValidatorsBalanceGwei += validatorsBalanceGwei; + } + RouterStateAccounting storage routerAcc = SRStorage.getRouterState().accounting; + routerAcc.validatorsBalanceGwei = totalValidatorsBalanceGwei; + } + + /// @dev Save the last deposit state for the staking module + /// @param _moduleId id of the staking module to be deposited + function _updateModuleLastDepositState(uint256 _moduleId) public { + ModuleStateDeposits storage stateDeposits = _moduleId.getModuleState().deposits; + + stateDeposits.lastDepositAt = uint64(block.timestamp); + stateDeposits.lastDepositBlock = uint64(block.number); + } + + function _notifyStakingModulesOfWithdrawalCredentialsChange() public { + uint256 modulesCount = SRStorage.getModulesCount(); + + for (uint256 i; i < modulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + + try moduleId.getIStakingModule().onWithdrawalCredentialsChanged() {} + catch (bytes memory lowLevelRevertData) { + if (lowLevelRevertData.length == 0) revert ISRBase.UnrecoverableModuleError(); + if (moduleId.getModuleState().config.status == StakingModuleStatus.Active) { + _setModuleStatus(moduleId, StakingModuleStatus.DepositsPaused); + } + emit ISRBase.WithdrawalsCredentialsChangeFailed(moduleId, lowLevelRevertData); + } + } + } + + function _checkOperatorsReportData(bytes calldata _ids, bytes calldata _values) internal pure { + if (_ids.length % 8 != 0 || _values.length % 16 != 0) { + revert ISRBase.InvalidReportData(3); + } + uint256 count = _ids.length / 8; + if (_values.length / 16 != count) { + revert ISRBase.InvalidReportData(2); + } + if (count == 0) { + revert ISRBase.InvalidReportData(1); + } + } +} diff --git a/contracts/0.8.25/sr/SRStorage.sol b/contracts/0.8.25/sr/SRStorage.sol new file mode 100644 index 0000000000..29ce99a053 --- /dev/null +++ b/contracts/0.8.25/sr/SRStorage.sol @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; +import {ModuleState, RouterState} from "./SRTypes.sol"; + +library SRStorage { + using EnumerableSet for EnumerableSet.UintSet; + using SRStorage for ModuleState; + + /// @dev RouterState storage position + bytes32 internal constant ROUTER_STORAGE_POSITION = keccak256( + abi.encode(uint256(keccak256(abi.encodePacked("lido.StakingRouter.routerStorage"))) - 1) + ) & ~bytes32(uint256(0xff)); + + /// @dev get RouterState storage reference + function getRouterState() internal pure returns (RouterState storage $) { + bytes32 _position = ROUTER_STORAGE_POSITION; + assembly ("memory-safe") { + $.slot := _position + } + } + + /** + * Module state helpers + */ + + function getModuleState(uint256 _moduleId) internal view returns (ModuleState storage) { + return getRouterState().moduleStates[_moduleId]; + } + + function getIStakingModule(ModuleState storage $) internal view returns (IStakingModule) { + return IStakingModule($.config.moduleAddress); + } + + function getIStakingModuleV2(ModuleState storage $) internal view returns (IStakingModuleV2) { + return IStakingModuleV2($.config.moduleAddress); + } + + function getIStakingModule(uint256 _moduleId) internal view returns (IStakingModule) { + return getModuleState(_moduleId).getIStakingModule(); + } + + function getIStakingModuleV2(uint256 _moduleId) internal view returns (IStakingModuleV2) { + return getModuleState(_moduleId).getIStakingModuleV2(); + } + + /** + * ModuleIds set helpers + */ + + function getModulesCount() internal view returns (uint256) { + return getRouterState().moduleIds.length(); + } + + function getModuleIds() internal view returns (uint256[] memory) { + return getRouterState().moduleIds.values(); + } + + function getModuleIdAt(uint256 _idx) internal view returns (uint256) { + return getRouterState().moduleIds.at(_idx); + } + + function isModuleExists(uint256 _moduleId) internal view returns (bool) { + return getRouterState().moduleIds.contains(_moduleId); + } + + /// @notice get module inner position in the list of modules (1-based) + /// @dev direct access to EnumerableSet internal storage + function getModuleIdInnerPosition(uint256 _moduleId) internal view returns (uint256) { + return getRouterState().moduleIds._inner._positions[bytes32(_moduleId)]; + } + + function addModuleId(uint256 _moduleId) internal { + getRouterState().moduleIds.add(_moduleId); + } +} diff --git a/contracts/0.8.25/sr/SRTypes.sol b/contracts/0.8.25/sr/SRTypes.sol new file mode 100644 index 0000000000..50f13ac8d1 --- /dev/null +++ b/contracts/0.8.25/sr/SRTypes.sol @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; + +/** + * @title StakingRouter shared types + * @author KRogLA + */ + +interface ILido { + function getDepositableEther() external view returns (uint256); + function withdrawDepositableEther(uint256 _amount, uint256 _depositsCount) external; +} + +interface IAccountingOracle { + ///@dev returns a tuple instead of a structure to avoid allocating memory + function getProcessingState() + external + view + returns ( + uint256 currentFrameRefSlot, + uint256 processingDeadlineTime, + bytes32 mainDataHash, + bool mainDataSubmitted, + bytes32 extraDataHash, + uint256 extraDataFormat, + bool extraDataSubmitted, + uint256 extraDataItemsCount, + uint256 extraDataItemsSubmitted + ); + function getLastProcessingRefSlot() external view returns (uint256); +} + +/// @dev Since `enum` is `uint8` by nature, so the `status` is stored as `uint8` to avoid +/// possible problems when upgrading. But for human readability, we use `enum` as +/// function parameter type. More about conversion in the docs: +/// https://docs.soliditylang.org/en/v0.8.17/types.html#enums +enum StakingModuleStatus { + Active, // deposits and rewards allowed + DepositsPaused, // deposits NOT allowed, rewards allowed + Stopped // deposits and rewards NOT allowed +} + +/// @notice Configuration parameters for a staking module. +/// @dev Used when adding or updating a staking module to set operational limits, fee parameters, +/// and withdrawal credential type. +struct StakingModuleConfig { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + uint256 stakeShareLimit; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + uint256 priorityExitShareThreshold; + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + uint256 stakingModuleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + uint256 treasuryFee; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + uint256 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + uint256 minDepositBlockDistance; + /// @notice Withdrawal credentials type (0x01/0x02) + uint256 withdrawalCredentialsType; +} + +/// @dev old data struct, kept for backward compatibility +struct StakingModule { + /// @notice Unique id of the staking module. + uint24 id; + /// @notice Address of the staking module. + address stakingModuleAddress; + /// @notice Part of the fee taken from staking rewards that goes to the staking module. + uint16 stakingModuleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury. + uint16 treasuryFee; + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Formerly known as `targetShare`. + uint16 stakeShareLimit; + /// @notice Staking module status if staking module can not accept the deposits or can + /// participate in further reward distribution. + uint8 status; + /// @notice Name of the staking module. + string name; + /// @notice block.timestamp of the last deposit of the staking module. + /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositAt; + /// @notice block.number of the last deposit of the staking module. + /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. + uint256 lastDepositBlock; + /// @notice Number of exited validators. + uint256 exitedValidatorsCount; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + uint16 priorityExitShareThreshold; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. + uint64 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). + uint64 minDepositBlockDistance; + /// @notice Withdrawal credentials type (0x01/0x02) + uint8 withdrawalCredentialsType; + /// @notice total actual balance of validators for module in Gwei. + uint64 validatorsBalanceGwei; + } + +/// @dev 1 storage slot +struct ModuleStateConfig { + /// @notice Address of the staking module. + address moduleAddress; + /// @notice Part of the fee taken from staking rewards that goes to the staking module. + uint16 moduleFee; + /// @notice Part of the fee taken from staking rewards that goes to the treasury. + uint16 treasuryFee; + /// @notice Maximum stake share that can be allocated to a module, in BP. + uint16 stakeShareLimit; + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + uint16 priorityExitShareThreshold; + /// @notice Staking module status if staking module can not accept the deposits or can + /// participate in further reward distribution. + StakingModuleStatus status; + /// @notice Withdrawal credentials type (0x01/0x02) + uint8 withdrawalCredentialsType; + // uint8 _reserved; + // uint8 _reserved; +} + +/// @dev 1 storage slot +struct ModuleStateDeposits { + /// @notice block.timestamp of the last deposit of the staking module. + /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositAt; + /// @notice block.number of the last deposit of the staking module. + /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. + uint64 lastDepositBlock; + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. + uint64 maxDepositsPerBlock; + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). + uint64 minDepositBlockDistance; +} + +/// @dev 1 storage slot +struct ModuleStateAccounting { + /// @notice total actual balance of validators for module in Gwei. + uint64 validatorsBalanceGwei; + /// @notice Cumulative number of exited validators for module + uint64 exitedValidatorsCount; + // uint64 _reserved; + // uint64 _reserved; +} + +struct RouterStateAccounting { + /// @notice total actual balance of validators in Gwei. + uint64 validatorsBalanceGwei; + // uint64 _reserved; + // uint64 _reserved; + // uint64 _reserved; +} + +struct ModuleState { + /// @notice module config data + ModuleStateConfig config; // slot 0 + /// @notice deposits state data + ModuleStateDeposits deposits; // slot 1 + /// @notice accounting state data + ModuleStateAccounting accounting; // slot 2 + /// @notice Name of the staking module. + string name; // slot 3 +} + +struct RouterState { + // moduleId => ModuleState + mapping(uint256 => ModuleState) moduleStates; // slot 0 + EnumerableSet.UintSet moduleIds; // slot 1 + RouterStateAccounting accounting; // slot 2 + bytes32 withdrawalCredentials; // slot 3 + uint24 lastModuleId; +} + +/// @notice A summary of the staking module's validators. +struct StakingModuleSummary { + /// @notice The total number of validators in the EXITED state on the Consensus Layer. + /// @dev This value can't decrease in normal conditions. + uint256 totalExitedValidators; + /// @notice The total number of validators deposited via the official Deposit Contract. + /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this + /// counter is not decreasing. + uint256 totalDepositedValidators; + /// @notice The number of validators in the set available for deposit + uint256 depositableValidatorsCount; +} + +/// @notice A summary of node operator and its validators. +/// @dev old data struct, kept for backward compatibility +struct NodeOperatorSummary { + /// @notice Shows whether the current target limit applied to the node operator. + uint256 targetLimitMode; + /// @notice Relative target active validators limit for operator. + uint256 targetValidatorsCount; + /// @notice The number of validators with an expired request to exit time. + /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. + uint256 stuckValidatorsCount; + /// @notice The number of validators that can't be withdrawn, but deposit costs were + /// compensated to the Lido by the node operator. + /// @dev [deprecated] Refunded validators processing has been removed, this field is no longer used. + uint256 refundedValidatorsCount; + /// @notice A time when the penalty for stuck validators stops applying to node operator rewards. + /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. + uint256 stuckPenaltyEndTimestamp; + /// @notice The total number of validators in the EXITED state on the Consensus Layer. + /// @dev This value can't decrease in normal conditions. + uint256 totalExitedValidators; + /// @notice The total number of validators deposited via the official Deposit Contract. + /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this + /// counter is not decreasing. + uint256 totalDepositedValidators; + /// @notice The number of validators in the set available for deposit. + uint256 depositableValidatorsCount; +} + +/// @notice A collection of the staking module data stored across the StakingRouter and the +/// staking module contract. +/// +/// @dev This data, first of all, is designed for off-chain usage and might be redundant for +/// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. +struct StakingModuleDigest { + /// @notice The number of node operators registered in the staking module. + uint256 nodeOperatorsCount; + /// @notice The number of node operators registered in the staking module in active state. + uint256 activeNodeOperatorsCount; + /// @notice The current state of the staking module taken from the StakingRouter. + StakingModule state; + /// @notice A summary of the staking module's validators. + StakingModuleSummary summary; +} + +/// @notice A collection of the node operator data stored in the staking module. +/// @dev This data, first of all, is designed for off-chain usage and might be redundant for +/// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. +struct NodeOperatorDigest { + /// @notice Id of the node operator. + uint256 id; + /// @notice Shows whether the node operator is active or not. + bool isActive; + /// @notice A summary of node operator and its validators. + NodeOperatorSummary summary; +} + +struct ValidatorsCountsCorrection { + /// @notice The expected current number of exited validators of the module that is + /// being corrected. + uint256 currentModuleExitedValidatorsCount; + /// @notice The expected current number of exited validators of the node operator + /// that is being corrected. + uint256 currentNodeOperatorExitedValidatorsCount; + /// @notice The corrected number of exited validators of the module. + uint256 newModuleExitedValidatorsCount; + /// @notice The corrected number of exited validators of the node operator. + uint256 newNodeOperatorExitedValidatorsCount; +} + +struct ValidatorExitData { + uint256 stakingModuleId; + uint256 nodeOperatorId; + bytes pubkey; +} diff --git a/contracts/0.8.25/sr/SRUtils.sol b/contracts/0.8.25/sr/SRUtils.sol new file mode 100644 index 0000000000..6b96f26663 --- /dev/null +++ b/contracts/0.8.25/sr/SRUtils.sol @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.25; + +import {SRStorage} from "./SRStorage.sol"; +import {ModuleState} from "./SRTypes.sol"; +import {ISRBase} from "./ISRBase.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; + +/** + * @title StakingRouter utility functions + * @author KRogLA + */ +library SRUtils { + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + + uint256 public constant TOTAL_BASIS_POINTS = 10000; + uint256 public constant MAX_STAKING_MODULES_COUNT = 32; + /// @dev Restrict the name size with 31 bytes to storage in a single slot. + uint256 public constant MAX_STAKING_MODULE_NAME_LENGTH = 31; + + /// @dev Large enough to fit all existing Ether per entity, yet overflow-safe when aggregating a reasonable number of entities + uint256 internal constant MAX_VALUE_GWEI = 1_000_000_000 ether / 1 gwei; // i.e. 1B ETH + + /** + * Validation + */ + + function _requireNotZero(uint256 _value) internal pure { + if (_value == 0) revert ISRBase.ZeroArgument(); + } + + function _requireNotZero(address _address) internal pure { + if (_address == address(0)) revert ISRBase.ZeroAddress(); + } + + function _requireWCTypeValid(uint256 _wcType) internal pure { + if (!WithdrawalCredentials.isTypeValid(_wcType)) revert ISRBase.WrongWithdrawalCredentialsType(); + } + + function _requireWCType2(uint256 _wcType) internal pure { + if (!WithdrawalCredentials.isType2(_wcType)) revert ISRBase.WrongWithdrawalCredentialsType(); + } + + function _requireModuleIdExists(uint256 _moduleId) internal view { + if (!SRStorage.isModuleExists(_moduleId)) revert ISRBase.StakingModuleUnregistered(); + } + + /** + * Module helpers + */ + + /// @dev will cause an overflow error if moduleId does not exist + /// @param moduleId module id + /// @return module index in the list of modules (0-based) + function _getModuleIndexById(uint256 moduleId) internal view returns (uint256) { + /// @dev convert from 1-based position + return SRStorage.getModuleIdInnerPosition(moduleId) - 1; + } + + /// @dev get validators (active) balance of the module in ETH (wei) + function _getModuleValidatorsBalance(uint256 moduleId) internal view returns (uint256) { + return _fromGwei(moduleId.getModuleState().accounting.validatorsBalanceGwei); + } + + /// @dev get total validators (active) balance of all modules in ETH + function _getTotalModulesValidatorsBalance() internal view returns (uint256) { + return _fromGwei(SRStorage.getRouterState().accounting.validatorsBalanceGwei); + } + + /** + * Amount helpers + */ + + /// @dev checks if the amount not exceeds a reasonable limit and converts it to uint64 + /// @param amountGwei checked amount in gwei + /// @return validated amount in gwei as uint64 + function _ensureAmountGwei(uint256 amountGwei) internal pure returns (uint64) { + if (amountGwei > MAX_VALUE_GWEI) { + revert ISRBase.InvalidAmountGwei(); + } + return uint64(amountGwei); + } + + /// @dev converts amount from wei to gwei + function _toGwei(uint256 amount) internal pure returns (uint64) { + return _ensureAmountGwei(amount / 1 gwei); + } + + /// @dev converts amount from gwei to wei + /// @dev skip _ensureAmountGwei for the input amount due to using the method only as a reverse + /// conversion to values saved via _toGwei + function _fromGwei(uint256 amount) internal pure returns (uint256) { + return amount * 1 gwei; + } +} diff --git a/contracts/0.8.25/sr/StakingRouter.sol b/contracts/0.8.25/sr/StakingRouter.sol new file mode 100644 index 0000000000..7690f03516 --- /dev/null +++ b/contracts/0.8.25/sr/StakingRouter.sol @@ -0,0 +1,1158 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.25; + +import {Math} from "@openzeppelin/contracts-v5.2/utils/math/Math.sol"; +import { + AccessControlEnumerableUpgradeable, + EnumerableSet +} from "contracts/openzeppelin/5.2/upgradeable/access/extensions/AccessControlEnumerableUpgradeable.sol"; +import {BeaconChainDepositor, IDepositContract} from "contracts/0.8.25/lib/BeaconChainDepositor.sol"; +import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; +import {WithdrawalCredentials} from "contracts/common/lib/WithdrawalCredentials.sol"; +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; +import {SRLib} from "./SRLib.sol"; +import {SRStorage} from "./SRStorage.sol"; +import {SRUtils} from "./SRUtils.sol"; +import {ISRBase} from "./ISRBase.sol"; + +import { + ModuleState, + StakingModuleStatus, + StakingModuleConfig, + ValidatorsCountsCorrection, + ValidatorExitData, + StakingModule, + StakingModuleSummary, + NodeOperatorSummary, + StakingModuleDigest, + NodeOperatorDigest, + ModuleStateConfig, + ModuleStateDeposits, + ModuleStateAccounting, + ILido +} from "./SRTypes.sol"; + +contract StakingRouter is ISRBase, AccessControlEnumerableUpgradeable { + using WithdrawalCredentials for bytes32; + using SRStorage for ModuleState; + using SRStorage for uint256; // for module IDs + using EnumerableSet for EnumerableSet.AddressSet; + + /// @dev ACL roles + bytes32 public constant MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = keccak256("MANAGE_WITHDRAWAL_CREDENTIALS_ROLE"); + bytes32 public constant STAKING_MODULE_MANAGE_ROLE = keccak256("STAKING_MODULE_MANAGE_ROLE"); + bytes32 public constant STAKING_MODULE_SHARE_MANAGE_ROLE = keccak256("STAKING_MODULE_SHARE_MANAGE_ROLE"); + bytes32 public constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); + bytes32 public constant REPORT_EXITED_VALIDATORS_ROLE = keccak256("REPORT_EXITED_VALIDATORS_ROLE"); + bytes32 public constant REPORT_VALIDATOR_EXITING_STATUS_ROLE = keccak256("REPORT_VALIDATOR_EXITING_STATUS_ROLE"); + bytes32 public constant REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = keccak256("REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE"); + bytes32 public constant UNSAFE_SET_EXITED_VALIDATORS_ROLE = keccak256("UNSAFE_SET_EXITED_VALIDATORS_ROLE"); + bytes32 public constant REPORT_REWARDS_MINTED_ROLE = keccak256("REPORT_REWARDS_MINTED_ROLE"); + + uint256 public constant FEE_PRECISION_POINTS = 10 ** 20; // 100 * 10 ** 18 + uint64 internal constant PUBKEY_LENGTH = 48; + + IDepositContract public immutable DEPOSIT_CONTRACT; + ILido public immutable LIDO; + ILidoLocator public immutable LIDO_LOCATOR; + + /// @notice Max Effective Balance for Withdrawal Credentials types + /// @dev for Ethereum chain: 32 ether and 2048 ether + uint256 public immutable MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + uint256 public immutable MAX_EFFECTIVE_BALANCE_WC_TYPE_02; + + /// @dev backward-compatible getter for a constant moved to a shared library + function INITIAL_DEPOSIT_SIZE() external view returns (uint256) { + return MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function TOTAL_BASIS_POINTS() external pure returns (uint256) { + return SRUtils.TOTAL_BASIS_POINTS; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function MAX_STAKING_MODULES_COUNT() external pure returns (uint256) { + return SRUtils.MAX_STAKING_MODULES_COUNT; + } + + /// @dev backward-compatible getter for a constant moved to a shared library + function MAX_STAKING_MODULE_NAME_LENGTH() external pure returns (uint256) { + return SRUtils.MAX_STAKING_MODULE_NAME_LENGTH; + } + + constructor( + address _depositContract, + address _lido, + address _lidoLocator, + uint256 _maxEBType1, + uint256 _maxEBType2 + ) { + SRUtils._requireNotZero(_depositContract); + SRUtils._requireNotZero(_lido); + SRUtils._requireNotZero(_lidoLocator); + + DEPOSIT_CONTRACT = IDepositContract(_depositContract); + LIDO = ILido(_lido); + LIDO_LOCATOR = ILidoLocator(_lidoLocator); + + SRUtils._requireNotZero(_maxEBType1); + SRUtils._requireNotZero(_maxEBType2); + MAX_EFFECTIVE_BALANCE_WC_TYPE_01 = _maxEBType1; + MAX_EFFECTIVE_BALANCE_WC_TYPE_02 = _maxEBType2; + + _disableInitializers(); + } + + /// @notice Initializes the contract. + /// @param _admin Lido DAO Aragon agent contract address. + /// @param _withdrawalCredentials 0x01 credentials to withdraw ETH on Consensus Layer side. + /// @dev Proxy initialization method. + function initialize(address _admin, bytes32 _withdrawalCredentials) external reinitializer(4) { + if (_admin == address(0)) revert ZeroAddress(); + + _grantRole(DEFAULT_ADMIN_ROLE, _admin); + _setWithdrawalCredentials(_withdrawalCredentials); + } + + /// @notice A function to migrate upgrade to v4 (from v3) and use OpenZeppelin versioning. + function finalizeUpgrade_v4() external reinitializer(4) { + // migrate current modules to new storage + SRLib._migrateStorage(MAX_EFFECTIVE_BALANCE_WC_TYPE_01); + + /// @dev migrate OZ roles + /// Due to OZ 5.2 AccessControl uses ERC-7201 namespaced storage at different slots we should + /// migrate roles from old storage to new one. + /// We use only _roleMembers mapping and safely ignore the second mapping _roles, because + /// both mappings are updated atomically, so we only need one. + + // pre upgrade roles + bytes32[9] memory roles = [ + DEFAULT_ADMIN_ROLE, + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE, + STAKING_MODULE_MANAGE_ROLE, + STAKING_MODULE_UNVETTING_ROLE, + REPORT_EXITED_VALIDATORS_ROLE, + REPORT_VALIDATOR_EXITING_STATUS_ROLE, + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE, + UNSAFE_SET_EXITED_VALIDATORS_ROLE, + REPORT_REWARDS_MINTED_ROLE + ]; + + EnumerableSet.AddressSet storage members; + for (uint256 i = 0; i < roles.length; ++i) { + bytes32 role = roles[i]; + members = _getStorageRoleMembersOld()[role]; + for (uint256 j; j < members.length(); ++j) { + _grantRole(role, members.at(j)); + } + } + } + + /// @dev Helper for migration - returns OZ AccessControlEnumerable _roleMembers mapping storage reference + function _getStorageRoleMembersOld() private pure returns (mapping(bytes32 => EnumerableSet.AddressSet) storage $) { + /// @dev Old _roleMembers storage slot. + bytes32 position = keccak256("openzeppelin.AccessControlEnumerable._roleMembers"); + assembly ("memory-safe") { + $.slot := position + } + } + + /// @dev Prohibit direct transfer to contract. + receive() external payable { + revert DirectETHTransfer(); + } + + /// @notice Registers a new staking module. + /// @param _name Name of staking module. + /// @param _stakingModuleAddress Address of staking module. + /// @param _stakingModuleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function addStakingModule( + string calldata _name, + address _stakingModuleAddress, + StakingModuleConfig calldata _stakingModuleConfig + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + uint256 newModuleId = SRLib._addModule(_stakingModuleAddress, _name, _stakingModuleConfig); + + /// @dev Simulate last deposit state to prevent real deposits into the new ModuleState via + /// DepositSecurityModule just after the addition. + _updateModuleLastDepositState(newModuleId, 0); + } + + /// @notice Updates staking module params. + /// @param _stakingModuleId Staking module id. + // @param _stakingModuleConfig Staking module config + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateStakingModule( + uint256 _stakingModuleId, + uint256 _stakeShareLimit, + uint256 _priorityExitShareThreshold, + uint256 _stakingModuleFee, + uint256 _treasuryFee, + uint256 _maxDepositsPerBlock, + uint256 _minDepositBlockDistance + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + SRUtils._requireModuleIdExists(_stakingModuleId); + SRLib._updateModuleParams( + _stakingModuleId, + _stakeShareLimit, + _priorityExitShareThreshold, + _stakingModuleFee, + _treasuryFee, + _maxDepositsPerBlock, + _minDepositBlockDistance + ); + } + + /// @notice Updates fees for all staking modules in a single atomic operation. + /// @param _stakingModuleFees New staking module fee values in the current module iteration order (returned by `getStakingModuleIds()`). + /// @param _treasuryFees New treasury fee values in the current module iteration order. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateAllStakingModulesFees(uint256[] calldata _stakingModuleFees, uint256[] calldata _treasuryFees) + external + onlyRole(STAKING_MODULE_MANAGE_ROLE) + { + SRLib._updateAllModuleFees(_stakingModuleFees, _treasuryFees); + } + + /// @notice Updates staking module share params. + /// @param _stakingModuleId Staking module id. + /// @param _stakeShareLimit New stake share limit value. + /// @param _priorityExitShareThreshold New priority exit share threshold value. + /// @dev The function is restricted to the `STAKING_MODULE_SHARE_MANAGE_ROLE` role. + function updateModuleShares(uint256 _stakingModuleId, uint16 _stakeShareLimit, uint16 _priorityExitShareThreshold) + external + onlyRole(STAKING_MODULE_SHARE_MANAGE_ROLE) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + SRLib._updateModuleShares(_stakingModuleId, _stakeShareLimit, _priorityExitShareThreshold); + } + + /// @notice Updates the limit of the validators that can be used for deposit. + /// @param _stakingModuleId Id of the staking module. + /// @param _nodeOperatorId Id of the node operator. + /// @param _targetLimitMode Target limit mode. + /// @param _targetLimit Target limit of the node operator. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function updateTargetValidatorsLimits( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _targetLimitMode, + uint256 _targetLimit + ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { + SRUtils._requireModuleIdExists(_stakingModuleId); + _stakingModuleId.getIStakingModule() + .updateTargetValidatorsLimits(_nodeOperatorId, _targetLimitMode, _targetLimit); + } + + /// @dev See {SRLib._reportRewardsMinted}. + /// + /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. + function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) + external + onlyRole(REPORT_REWARDS_MINTED_ROLE) + { + SRLib._reportRewardsMinted(_stakingModuleIds, _totalShares); + } + + /// @dev See {SRLib._updateExitedValidatorsCountByStakingModule}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function updateExitedValidatorsCountByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _exitedValidatorsCounts + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) returns (uint256) { + return SRLib._updateExitedValidatorsCountByStakingModule(_stakingModuleIds, _exitedValidatorsCounts); + } + + /// @dev The function is restricted to the same role as `updateExitedValidatorsCountByStakingModule`, + /// i.e. `REPORT_EXITED_VALIDATORS_ROLE` role. + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._reportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + } + + /// @notice Validates a validator balances report against the current StakingRouter module set and limits. + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view { + SRLib._validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + } + + /// @dev See {SRLib._reportStakingModuleOperatorExitedValidators}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function reportStakingModuleExitedValidatorsCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _exitedValidatorsCounts + ) external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._reportStakingModuleOperatorExitedValidators(_stakingModuleId, _nodeOperatorIds, _exitedValidatorsCounts); + } + + /// @dev DEPRECATED + /// @dev See {SRLib._unsafeSetExitedValidatorsCount}. + function unsafeSetExitedValidatorsCount( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + bool _triggerUpdateFinish, + ValidatorsCountsCorrection calldata _correction + ) external onlyRole(UNSAFE_SET_EXITED_VALIDATORS_ROLE) { + SRLib._unsafeSetExitedValidatorsCount(_stakingModuleId, _nodeOperatorId, _triggerUpdateFinish, _correction); + } + + /// @dev See {SRLib._onValidatorsCountsByNodeOperatorReportingFinished}. + /// + /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. + function onValidatorsCountsByNodeOperatorReportingFinished() external onlyRole(REPORT_EXITED_VALIDATORS_ROLE) { + SRLib._onValidatorsCountsByNodeOperatorReportingFinished(); + } + + /// @dev See {SRLib._decreaseStakingModuleVettedKeysCountByNodeOperator}. + /// + /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. + function decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external onlyRole(STAKING_MODULE_UNVETTING_ROLE) { + SRLib._decreaseStakingModuleVettedKeysCountByNodeOperator( + _stakingModuleId, _nodeOperatorIds, _vettedSigningKeysCounts + ); + } + + /// @dev See {SRLib._reportValidatorExitDelay}. + function reportValidatorExitDelay( + uint256 _stakingModuleId, + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) external onlyRole(REPORT_VALIDATOR_EXITING_STATUS_ROLE) { + SRLib._reportValidatorExitDelay( + _stakingModuleId, _nodeOperatorId, _proofSlotTimestamp, _publicKey, _eligibleToExitInSec + ); + } + + /// @dev See {SRLib._onValidatorExitTriggered}. + function onValidatorExitTriggered( + ValidatorExitData[] calldata validatorExitData, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) external onlyRole(REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE) { + SRLib._onValidatorExitTriggered(validatorExitData, _withdrawalRequestPaidFee, _exitType); + } + + /// @notice Returns all registered staking modules. + /// @return moduleStates Array of staking modules. + function getStakingModules() external view returns (StakingModule[] memory) { + uint256 modulesCount = SRStorage.getModulesCount(); + StakingModule[] memory moduleStates = new StakingModule[](modulesCount); + + for (uint256 i; i < modulesCount; ++i) { + moduleStates[i] = _getModuleStateCompat(SRStorage.getModuleIdAt(i)); + } + return moduleStates; + } + + /// @notice Returns state for staking modules. + /// @param _stakingModuleId Id of the staking module. + /// @return stateConfig staking modules config state + function getStakingModuleStateConfig(uint256 _stakingModuleId) + external + view + returns (ModuleStateConfig memory stateConfig) + { + (, stateConfig) = _getModuleState(_stakingModuleId); + } + + function getStakingModuleStateDeposits(uint256 _stakingModuleId) + external + view + returns (ModuleStateDeposits memory stateDeposits) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + stateDeposits = state.deposits; + } + + function getStakingModuleStateAccounting(uint256 _stakingModuleId) + external + view + returns (uint64 validatorsBalanceGwei, uint64 exitedValidatorsCount) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + ModuleStateAccounting memory moduleAcc = state.accounting; + return (moduleAcc.validatorsBalanceGwei, moduleAcc.exitedValidatorsCount); + } + + /// @notice Returns the ids of all registered staking modules. + /// @return stakingModuleIds Array of staking module ids. + function getStakingModuleIds() external view returns (uint256[] memory) { + return SRStorage.getModuleIds(); + } + + /// @notice Returns the staking module by its id. + /// @param _stakingModuleId Id of the staking module. + /// @return moduleState Staking module data. + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getModuleStateCompat(_stakingModuleId); + } + + /// @notice Returns total number of staking modules. + /// @return Total number of staking modules. + function getStakingModulesCount() external view returns (uint256) { + return SRStorage.getModulesCount(); + } + + /// @notice Returns true if staking module with the given id was registered via `addStakingModule`, false otherwise. + /// @param _stakingModuleId Id of the staking module. + /// @return True if staking module with the given id was registered, false otherwise. + function hasStakingModule(uint256 _stakingModuleId) public view returns (bool) { + return SRStorage.isModuleExists(_stakingModuleId); + } + + /// @notice Returns status of staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Status of the staking module. + function getStakingModuleStatus(uint256 _stakingModuleId) public view returns (StakingModuleStatus) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _stakingModuleId.getModuleState().config.status; + } + + function getContractVersion() external view returns (uint256) { + return _getInitializedVersion(); + } + + /// @notice Returns all-validators summary in the staking module. + /// @param _stakingModuleId Id of the staking module to return summary for. + /// @return summary Staking module summary. + function getStakingModuleSummary(uint256 _stakingModuleId) + external + view + returns (StakingModuleSummary memory summary) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getStakingModuleSummaryStruct(_stakingModuleId); + } + + /// @notice Returns node operator summary from the staking module. + /// @param _stakingModuleId Id of the staking module where node operator is onboarded. + /// @param _nodeOperatorId Id of the node operator to return summary for. + /// @return summary Node operator summary. + function getNodeOperatorSummary(uint256 _stakingModuleId, uint256 _nodeOperatorId) + external + view + returns (NodeOperatorSummary memory summary) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _getNodeOperatorSummary(_stakingModuleId.getIStakingModule(), _nodeOperatorId); + } + + /// @notice Returns staking module digest for each staking module registered in the staking router. + /// @return Array of staking module digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getAllStakingModuleDigests() external view returns (StakingModuleDigest[] memory) { + return getStakingModuleDigests(SRStorage.getModuleIds()); + } + + /// @notice Returns staking module digest for passed staking module ids. + /// @param _stakingModuleIds Ids of the staking modules to return data for. + /// @return digests Array of staking module digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getStakingModuleDigests(uint256[] memory _stakingModuleIds) + public + view + returns (StakingModuleDigest[] memory digests) + { + digests = new StakingModuleDigest[](_stakingModuleIds.length); + + for (uint256 i = 0; i < _stakingModuleIds.length; ++i) { + uint256 stakingModuleId = _stakingModuleIds[i]; + SRUtils._requireModuleIdExists(stakingModuleId); + IStakingModule stakingModule = stakingModuleId.getIStakingModule(); + + digests[i].nodeOperatorsCount = _getStakingModuleNodeOperatorsCount(stakingModule); + digests[i].activeNodeOperatorsCount = _getStakingModuleActiveNodeOperatorsCount(stakingModule); + digests[i].state = _getModuleStateCompat(stakingModuleId); + digests[i].summary = _getStakingModuleSummaryStruct(stakingModuleId); + } + } + + /// @notice Returns node operator digest for each node operator registered in the given staking module. + /// @param _stakingModuleId Id of the staking module to return data for. + /// @return Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getAllNodeOperatorDigests(uint256 _stakingModuleId) external view returns (NodeOperatorDigest[] memory) { + return getNodeOperatorDigests( + _stakingModuleId, 0, _getStakingModuleNodeOperatorsCount(_stakingModuleId.getIStakingModule()) + ); + } + + /// @notice Returns node operator digest for passed node operator ids in the given staking module. + /// @param _stakingModuleId Id of the staking module where node operators registered. + /// @param _offset Node operators offset starting with 0. + /// @param _limit The max number of node operators to return. + /// @return Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getNodeOperatorDigests(uint256 _stakingModuleId, uint256 _offset, uint256 _limit) + public + view + returns (NodeOperatorDigest[] memory) + { + return getNodeOperatorDigests( + _stakingModuleId, _getStakingModuleNodeOperatorIds(_stakingModuleId.getIStakingModule(), _offset, _limit) + ); + } + + /// @notice Returns node operator digest for a slice of node operators registered in the given + /// staking module. + /// @param _stakingModuleId Id of the staking module where node operators registered. + /// @param _nodeOperatorIds Ids of the node operators to return data for. + /// @return digests Array of node operator digests. + /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs + /// for data aggregation. + function getNodeOperatorDigests(uint256 _stakingModuleId, uint256[] memory _nodeOperatorIds) + public + view + returns (NodeOperatorDigest[] memory digests) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + digests = new NodeOperatorDigest[](_nodeOperatorIds.length); + for (uint256 i = 0; i < _nodeOperatorIds.length; ++i) { + uint256 nodeOperatorId = _nodeOperatorIds[i]; + IStakingModule stakingModule = _stakingModuleId.getIStakingModule(); + + digests[i].id = nodeOperatorId; + digests[i].isActive = _getStakingModuleNodeOperatorIsActive(stakingModule, nodeOperatorId); + digests[i].summary = _getNodeOperatorSummary(stakingModule, nodeOperatorId); + } + } + + /// @notice Sets the staking module status flag for participation in further deposits and/or reward distribution. + /// @param _stakingModuleId Id of the staking module to be updated. + /// @param _status New status of the staking module. + /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. + function setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) + external + onlyRole(STAKING_MODULE_MANAGE_ROLE) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + if (!SRLib._setModuleStatus(_stakingModuleId, _status)) revert StakingModuleStatusTheSame(); + } + + /// @notice Returns whether the staking module is stopped. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the staking module is stopped, false otherwise. + function getStakingModuleIsStopped(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Stopped; + } + + /// @notice Returns whether the deposits are paused for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the deposits are paused, false otherwise. + function getStakingModuleIsDepositsPaused(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.DepositsPaused; + } + + /// @notice Returns whether the staking module is active. + /// @param _stakingModuleId Id of the staking module. + /// @return True if the staking module is active, false otherwise. + function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool) { + return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Active; + } + + /// @notice Returns staking module nonce. + /// @param _stakingModuleId Id of the staking module. + /// @return Staking module nonce. + function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256) { + SRUtils._requireModuleIdExists(_stakingModuleId); + return _stakingModuleId.getIStakingModule().getNonce(); + } + + /// @notice Returns the last deposit block for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Last deposit block for the staking module. + function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.lastDepositBlock; + } + + /// @notice Returns the min deposit block distance for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Min deposit block distance for the staking module. + function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.minDepositBlockDistance; + } + + /// @notice Returns the max deposits count per block for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return Max deposits count per block for the staking module. + function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256) { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + return state.deposits.maxDepositsPerBlock; + } + + /// @notice Returns active validators count for the staking module. + /// @param _stakingModuleId Id of the staking module. + /// @return activeValidatorsCount Active validators count for the staking module. + function getStakingModuleActiveValidatorsCount(uint256 _stakingModuleId) + external + view + returns (uint256 activeValidatorsCount) + { + (ModuleState storage state,) = _getModuleState(_stakingModuleId); + (uint256 totalExitedValidators, uint256 totalDepositedValidators,) = _getStakingModuleSummary(_stakingModuleId); + + activeValidatorsCount = + totalDepositedValidators - Math.max(state.accounting.exitedValidatorsCount, totalExitedValidators); + } + + /// @notice Returns withdrawal credentials type + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @return withdrawal credentials: 0x01... - for Legacy modules, 0x02... - for New modules + function getStakingModuleWithdrawalCredentials(uint256 _stakingModuleId) external view returns (bytes32) { + (, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + return _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + } + + /// @notice Returns the max count of deposits which the staking module can provide data for based + /// on the passed `_maxDepositsValue` amount. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _maxDepositsValue Max amount of ether that might be used for deposits count calculation. + /// @return Max number of deposits might be done using the given staking module. + function getStakingModuleMaxDepositsCount(uint256 _stakingModuleId, uint256 _maxDepositsValue) + public + view + returns (uint256) + { + SRUtils._requireModuleIdExists(_stakingModuleId); + // If module is not active, then it capacity is 0, so stakingModuleDepositableEthAmount will be 0. + // Module capacity is calculated based on the depositableValidatorsCount (from getStakingModuleSummary), so + // stakingModuleDepositableEthAmount is already capped by the module capacity and represents the max ETH amount possible to deposit. + return + _getModuleDepositAllocation(_stakingModuleId, _maxDepositsValue, false) / MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) + && _stakingModuleId.getModuleState().config.status == StakingModuleStatus.Active; + } + + /** + * @notice A payable function for depositable eth acquisition. Can be called only by `Lido` + */ + function receiveDepositableEther() external payable { + _checkAppAuth(address(LIDO)); + + emit DepositableEthReceived(msg.value); + } + + /// @notice Method performs top-up calls to the official Deposit contract. Determines how much Lido buffered ether can be deposited + /// to the staking module, obtains keys from the staking module with exact allocation for each key, pulls ether from Lido, + /// and performs the top-up call. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _keyIndices List of keys' indices + /// @param _operatorIds List of operator indices + /// @param _pubkeys List of validator public keys to top up + /// @param _topUpLimits Maximum amount (in wei) that can be deposited per key based on CL data and TopUpGateway logic + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external { + _checkAppAuth(_getTopUpGateway()); + _validateTopUpInputs(_keyIndices, _operatorIds, _topUpLimits, _pubkeys); + + (, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + + if (stateConfig.status != StakingModuleStatus.Active) revert StakingModuleNotActive(); + + /// @dev This method is only supported for new modules (0x02 withdrawal credentials) + SRUtils._requireWCType2(stateConfig.withdrawalCredentialsType); + + // Get allocation based on target share + uint256 depositableEther = LIDO.getDepositableEther(); + uint256 smDepositableEthAmount = _getModuleDepositAllocation(_stakingModuleId, depositableEther, true); + + // Call allocateDeposits on the staking module to determine for what amount deposit each key + // The module verifies keys belong to it and reverts if invalid. + // Even if smDepositableEthAmount is 0, we still call the module + // to allow CSM queue cursor advancement. + uint256[] memory allocations; + uint256 smDepositableEthAmountRounded = smDepositableEthAmount - (smDepositableEthAmount % 1 gwei); + allocations = IStakingModuleV2(stateConfig.moduleAddress) + .allocateDeposits(smDepositableEthAmountRounded, _pubkeys, _keyIndices, _operatorIds, _topUpLimits); + + // Calculate total amount from allocations returned by module (in wei) + uint256 amount; + unchecked { + for (uint256 i; i < allocations.length; ++i) { + if (allocations[i] % 1 gwei != 0) { + revert AmountNotAlignedToGwei(); + } + + if (allocations[i] > _topUpLimits[i]) { + revert AllocationExceedsLimit(); + } + + amount += allocations[i]; + } + } + + // Verify sum of allocations does not exceed module's max deposit amount + if (amount > smDepositableEthAmountRounded) { + revert ModuleReturnExceedTarget(); + } + + if (amount > 0) { + uint256 etherBalanceBeforeDeposits = address(this).balance; + // Pull ETH from Lido + LIDO.withdrawDepositableEther(amount, 0); + + bytes32 withdrawalCredentials = _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + bytes memory wcBytes = abi.encodePacked(withdrawalCredentials); + + // Make beacon chain top-up deposits + BeaconChainDepositor.makeBeaconChainTopUp(DEPOSIT_CONTRACT, wcBytes, _pubkeys, allocations); + + uint256 etherBalanceAfterDeposits = address(this).balance; + + /// @dev All pulled ETH must be deposited + assert(etherBalanceBeforeDeposits == etherBalanceAfterDeposits); + } + } + + function _validateTopUpInputs( + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + uint256[] calldata _topUpLimits, + bytes[] calldata _pubkeys + ) internal pure { + uint256 n = _keyIndices.length; + + if (n == 0) { + revert EmptyKeysList(); + } + + if (_operatorIds.length != n || _topUpLimits.length != n || _pubkeys.length != n) { + revert ArraysLengthMismatch(); + } + + for (uint256 i; i < n; ++i) { + if (_pubkeys[i].length != PUBKEY_LENGTH) { + revert WrongPubkeyLength(); + } + } + } + + /// @notice Returns the aggregate fee distribution proportion. + /// @return modulesFee Modules aggregate fee in base precision. + /// @return treasuryFee Treasury fee in base precision. + /// @return basePrecision Base precision: a value corresponding to the full fee. + function getStakingFeeAggregateDistribution() + public + view + returns (uint96 modulesFee, uint96 treasuryFee, uint256 basePrecision) + { + uint96[] memory moduleFees; + uint96 totalFee; + (,, moduleFees, totalFee, basePrecision) = getStakingRewardsDistribution(); + for (uint256 i; i < moduleFees.length; ++i) { + modulesFee += moduleFees[i]; + } + treasuryFee = totalFee - modulesFee; + } + + /// @notice Return shares table. + /// @return recipients Rewards recipient addresses corresponding to each module. + /// @return stakingModuleIds Module IDs. + /// @return stakingModuleFees Fee of each recipient. + /// @return totalFee Total fee to mint for each staking module and treasury. + /// @return precisionPoints Base precision number, which constitutes 100% fee. + function getStakingRewardsDistribution() + public + view + returns ( + address[] memory recipients, + uint256[] memory stakingModuleIds, + uint96[] memory stakingModuleFees, + uint96 totalFee, + uint256 precisionPoints + ) + { + uint256 totalValidatorsBalance = SRUtils._getTotalModulesValidatorsBalance(); + uint256 stakingModulesCount = totalValidatorsBalance == 0 ? 0 : SRStorage.getModulesCount(); + + stakingModuleIds = new uint256[](stakingModulesCount); + recipients = new address[](stakingModulesCount); + stakingModuleFees = new uint96[](stakingModulesCount); + precisionPoints = FEE_PRECISION_POINTS; + + /// @dev Return empty response if there are no staking modules or active validators yet. + if (stakingModulesCount == 0) { + return (recipients, stakingModuleIds, stakingModuleFees, totalFee, precisionPoints); + } + + uint256 rewardedStakingModulesCount = 0; + + for (uint256 i; i < stakingModulesCount; ++i) { + uint256 moduleId = SRStorage.getModuleIdAt(i); + uint256 allocation = SRUtils._getModuleValidatorsBalance(moduleId); + + /// @dev Skip staking modules which have no active balance. + if (allocation == 0) continue; + + stakingModuleIds[rewardedStakingModulesCount] = moduleId; + + ModuleStateConfig memory stateConfig = moduleId.getModuleState().config; + recipients[rewardedStakingModulesCount] = stateConfig.moduleAddress; + + (uint96 moduleFee, uint96 treasuryFee) = _computeModuleFee(allocation, totalValidatorsBalance, stateConfig); + + /// @dev If the staking module has the Stopped status for some reason, then + /// the staking module's rewards go to the treasury, so that the DAO has ability + /// to manage them (e.g. to compensate the staking module in case of an error, etc.) + if (stateConfig.status != StakingModuleStatus.Stopped) { + stakingModuleFees[rewardedStakingModulesCount] = moduleFee; + } + totalFee += treasuryFee + moduleFee; + + unchecked { + ++rewardedStakingModulesCount; + } + } + + // Total fee never exceeds 100%. + assert(totalFee <= precisionPoints); + + /// @dev Shrink arrays. + if (rewardedStakingModulesCount < stakingModulesCount) { + assembly ("memory-safe") { + mstore(stakingModuleIds, rewardedStakingModulesCount) + mstore(recipients, rewardedStakingModulesCount) + mstore(stakingModuleFees, rewardedStakingModulesCount) + } + } + + return (recipients, stakingModuleIds, stakingModuleFees, totalFee, precisionPoints); + } + + function getModuleValidatorsBalance(uint256 moduleId) external view returns (uint256) { + SRUtils._requireModuleIdExists(moduleId); + return SRUtils._getModuleValidatorsBalance(moduleId); + } + + function getTotalModulesValidatorsBalance() external view returns (uint256) { + return SRUtils._getTotalModulesValidatorsBalance(); + } + + function _computeModuleFee( + uint256 validatorsBalance, + uint256 totalValidatorsBalance, + ModuleStateConfig memory stateConfig + ) internal pure returns (uint96 moduleFee, uint96 treasuryFee) { + uint256 share = + validatorsBalance * FEE_PRECISION_POINTS / totalValidatorsBalance; + moduleFee = uint96(share * stateConfig.moduleFee / SRUtils.TOTAL_BASIS_POINTS); + treasuryFee = uint96(share * stateConfig.treasuryFee / SRUtils.TOTAL_BASIS_POINTS); + } + + /// @notice Returns the same as getStakingRewardsDistribution() but in reduced, 1e4 precision (DEPRECATED). + /// @dev Helper only for Lido contract. Use getStakingRewardsDistribution() instead. + /// @return totalFee Total fee to mint for each staking module and treasury in reduced, 1e4 precision. + function getTotalFeeE4Precision() external view returns (uint16 totalFee) { + /// @dev The logic is placed here but in Lido contract to save Lido bytecode. + (,,, uint96 totalFeeInHighPrecision, uint256 precision) = getStakingRewardsDistribution(); + // Here we rely on (totalFeeInHighPrecision <= precision). + totalFee = _toE4Precision(totalFeeInHighPrecision, precision); + } + + /// @notice Returns the same as getStakingFeeAggregateDistribution() but in reduced, 1e4 precision (DEPRECATED). + /// @dev Helper only for Lido contract. Use getStakingFeeAggregateDistribution() instead. + /// @return modulesFee Modules aggregate fee in reduced, 1e4 precision. + /// @return treasuryFee Treasury fee in reduced, 1e4 precision. + function getStakingFeeAggregateDistributionE4Precision() + external + view + returns (uint16 modulesFee, uint16 treasuryFee) + { + /// @dev The logic is placed here but in Lido contract to save Lido bytecode. + (uint256 modulesFeeHighPrecision, uint256 treasuryFeeHighPrecision, uint256 precision) = + getStakingFeeAggregateDistribution(); + // Here we rely on ({modules,treasury}FeeHighPrecision <= precision). + modulesFee = _toE4Precision(modulesFeeHighPrecision, precision); + treasuryFee = _toE4Precision(treasuryFeeHighPrecision, precision); + } + + /// @notice Returns new deposits allocation after the distribution of the `_depositAmount` deposits. + /// @param _depositAmount The maximum ETH amount of deposits to be allocated. + /// @param _isTopUp Whether the allocation is requested for top-up (true) or initial deposits (false). + /// @return totalAllocated - amount actually allocated + /// @return allocated - Array of newly allocated amounts for each module + /// @return newAllocations - Array of new allocation amounts for each module + function getDepositAllocations(uint256 _depositAmount, bool _isTopUp) + public + view + returns (uint256 totalAllocated, uint256[] memory allocated, uint256[] memory newAllocations) + { + (totalAllocated, allocated, newAllocations) = + SRLib._getDepositAllocations(_getConfig(), _depositAmount, _isTopUp); + } + + /// @notice Invokes a deposit call to the official Deposit contract. + /// @param _stakingModuleId Id of the staking module to be deposited. + /// @param _depositCalldata Staking module calldata. + /// @dev Only the DepositSecurityModule is allowed to call this method. + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external { + _checkAppAuth(_getDepositSecurityModule()); + (ModuleState storage state, ModuleStateConfig storage stateConfig) = _getModuleState(_stakingModuleId); + + if (stateConfig.status != StakingModuleStatus.Active) revert StakingModuleNotActive(); + + bytes32 withdrawalCredentials = _getWithdrawalCredentialsWithType(stateConfig.withdrawalCredentialsType); + address stakingModuleAddress = stateConfig.moduleAddress; + + // Get depositable ether from Lido (similar to topUp) + uint256 depositableEther = LIDO.getDepositableEther(); + uint256 stakingModuleDepositableEthAmount = + _getModuleDepositAllocation(_stakingModuleId, depositableEther, false); + // Calculate max deposits count (capped by max and module capacity) + (,, uint256 depositableValidatorsCount) = _getStakingModuleSummary(_stakingModuleId); + uint256 maxDepositsCount = Math.min( + Math.min(state.deposits.maxDepositsPerBlock, depositableValidatorsCount), + stakingModuleDepositableEthAmount / MAX_EFFECTIVE_BALANCE_WC_TYPE_01 // max possible initial deposits count + ); + + if (maxDepositsCount == 0) revert ZeroDeposits(); + + // Get deposit data from module first - it may return fewer keys than requested + (bytes memory publicKeysBatch, bytes memory signaturesBatch) = + IStakingModule(stakingModuleAddress).obtainDepositData(maxDepositsCount, _depositCalldata); + + // Calculate actual deposits count from returned keys + if (publicKeysBatch.length % PUBKEY_LENGTH != 0) revert WrongPubkeyLength(); + uint256 actualDepositsCount = publicKeysBatch.length / PUBKEY_LENGTH; + + if (actualDepositsCount > maxDepositsCount) revert ModuleReturnExceedTarget(); + + // Calculate actual deposit value based on keys returned + uint256 depositsValue = actualDepositsCount * MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + + /// @dev Update the local state of the contract to prevent a reentrancy attack + /// even though the staking modules are trusted contracts. + _updateModuleLastDepositState(_stakingModuleId, depositsValue); + + if (actualDepositsCount == 0) return; + + uint256 etherBalanceBeforeDeposits = address(this).balance; + + // Pull ETH from Lido based on actual keys returned + LIDO.withdrawDepositableEther(depositsValue, actualDepositsCount); + + BeaconChainDepositor.makeBeaconChainDeposits32ETH( + DEPOSIT_CONTRACT, + actualDepositsCount, + abi.encodePacked(withdrawalCredentials), + publicKeysBatch, + signaturesBatch + ); + + uint256 etherBalanceAfterDeposits = address(this).balance; + + /// @dev All pulled ETH must be deposited and self balance stay the same. + assert(etherBalanceBeforeDeposits == etherBalanceAfterDeposits); + } + + /// @notice Set 0x01 credentials to withdraw ETH on Consensus Layer side. + /// @param _withdrawalCredentials 0x01 withdrawal credentials field as defined in the Consensus Layer specs. + /// @dev Note that setWithdrawalCredentials discards all unused deposits data as the signatures are invalidated. + /// @dev The function is restricted to the `MANAGE_WITHDRAWAL_CREDENTIALS_ROLE` role. + function setWithdrawalCredentials(bytes32 _withdrawalCredentials) + external + onlyRole(MANAGE_WITHDRAWAL_CREDENTIALS_ROLE) + { + _setWithdrawalCredentials(_withdrawalCredentials); + } + + /// @notice Returns current credentials to withdraw ETH on Consensus Layer side. + /// @return Withdrawal credentials. + function getWithdrawalCredentials() public view returns (bytes32) { + return SRStorage.getRouterState().withdrawalCredentials; + } + + function _setWithdrawalCredentials(bytes32 wc) internal { + SRUtils._requireNotZero(WithdrawalCredentials.getAddr(wc)); + SRUtils._requireWCTypeValid(WithdrawalCredentials.getType(wc)); + SRStorage.getRouterState().withdrawalCredentials = wc; + emit WithdrawalCredentialsSet(wc, _msgSender()); + + // Notify all staking modules about the withdrawal credentials change + SRLib._notifyStakingModulesOfWithdrawalCredentialsChange(); + } + + function _getWithdrawalCredentialsWithType(uint8 withdrawalCredentialsType) internal view returns (bytes32) { + bytes32 wc = getWithdrawalCredentials(); + return wc.setType(withdrawalCredentialsType); + } + + /// @dev Save the last deposit state for the staking module and emit the event + /// @param stakingModuleId id of the staking module to be deposited + /// @param depositsValue value to deposit + function _updateModuleLastDepositState(uint256 stakingModuleId, uint256 depositsValue) internal { + SRLib._updateModuleLastDepositState(stakingModuleId); + emit StakingRouterETHDeposited(stakingModuleId, depositsValue); + } + + /// @notice Allocation for single module based on target share + /// @param moduleId Id of staking module + /// @param amountToAllocate Eth amount that can be deposited in module + /// @param isTopUp Whether the allocation is for top-up deposits + /// @return allocation Eth amount that can be deposited in module with id `moduleId` (can be less than `amountToAllocate`) + function _getModuleDepositAllocation(uint256 moduleId, uint256 amountToAllocate, bool isTopUp) + internal + view + returns (uint256 allocation) + { + return SRLib._getModuleDepositAllocation(_getConfig(), moduleId, amountToAllocate, isTopUp); + } + + /// module wrapper + function _getStakingModuleNodeOperatorsCount(IStakingModule _stakingModule) internal view returns (uint256) { + return _stakingModule.getNodeOperatorsCount(); + } + + function _getStakingModuleActiveNodeOperatorsCount(IStakingModule _stakingModule) internal view returns (uint256) { + return _stakingModule.getActiveNodeOperatorsCount(); + } + + function _getStakingModuleNodeOperatorIds(IStakingModule _stakingModule, uint256 _offset, uint256 _limit) + internal + view + returns (uint256[] memory) + { + return _stakingModule.getNodeOperatorIds(_offset, _limit); + } + + function _getStakingModuleNodeOperatorIsActive(IStakingModule _stakingModule, uint256 _nodeOperatorId) + internal + view + returns (bool) + { + return _stakingModule.getNodeOperatorIsActive(_nodeOperatorId); + } + + /// --- + + function _getModuleState(uint256 _moduleId) + internal + view + returns (ModuleState storage state, ModuleStateConfig storage stateConfig) + { + SRUtils._requireModuleIdExists(_moduleId); + state = _moduleId.getModuleState(); + stateConfig = state.config; + } + + function _getModuleStateCompat(uint256 _moduleId) internal view returns (StakingModule memory moduleState) { + moduleState.id = uint24(_moduleId); + + ModuleState storage state = _moduleId.getModuleState(); + moduleState.name = state.name; + + /// @dev use multiply SLOAD as this data readonly by offchain tools, so minimize bytecode size + + ModuleStateConfig storage stateConfig = state.config; + moduleState.stakingModuleAddress = stateConfig.moduleAddress; + moduleState.stakingModuleFee = stateConfig.moduleFee; + moduleState.treasuryFee = stateConfig.treasuryFee; + moduleState.stakeShareLimit = stateConfig.stakeShareLimit; + moduleState.status = uint8(stateConfig.status); + moduleState.priorityExitShareThreshold = stateConfig.priorityExitShareThreshold; + moduleState.withdrawalCredentialsType = stateConfig.withdrawalCredentialsType; + + ModuleStateDeposits storage stateDeposits = state.deposits; + moduleState.lastDepositAt = stateDeposits.lastDepositAt; + moduleState.lastDepositBlock = stateDeposits.lastDepositBlock; + moduleState.maxDepositsPerBlock = stateDeposits.maxDepositsPerBlock; + moduleState.minDepositBlockDistance = stateDeposits.minDepositBlockDistance; + + ModuleStateAccounting storage moduleAcc = state.accounting; + moduleState.validatorsBalanceGwei = moduleAcc.validatorsBalanceGwei; + moduleState.exitedValidatorsCount = moduleAcc.exitedValidatorsCount; + } + + /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. + function _getStakingModuleSummary(uint256 _moduleId) + internal + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount) + { + return _moduleId.getIStakingModule().getStakingModuleSummary(); + } + + function _getStakingModuleSummaryStruct(uint256 _stakingModuleId) + internal + view + returns (StakingModuleSummary memory summary) + { + (summary.totalExitedValidators, summary.totalDepositedValidators, summary.depositableValidatorsCount) = + _getStakingModuleSummary(_stakingModuleId); + } + + function _getNodeOperatorSummary(IStakingModule _stakingModule, uint256 _nodeOperatorId) + internal + view + returns (NodeOperatorSummary memory summary) + { + ( + summary.targetLimitMode, + summary.targetValidatorsCount,,,, + summary.totalExitedValidators, + summary.totalDepositedValidators, + summary.depositableValidatorsCount + ) = _stakingModule.getNodeOperatorSummary(_nodeOperatorId); + } + + function _getAccountingOracle() internal view returns (address) { + return LIDO_LOCATOR.accountingOracle(); + } + + function _getTopUpGateway() internal view returns (address) { + return LIDO_LOCATOR.topUpGateway(); + } + + function _getDepositSecurityModule() internal view returns (address) { + return LIDO_LOCATOR.depositSecurityModule(); + } + + function _checkAppAuth(address app) internal view { + if (_msgSender() != app) revert NotAuthorized(); + } + + /// @notice memory config cache + /// @dev Build once per tx, reuse across all lib calls + function _getConfig() private view returns (SRLib.Config memory) { + return + SRLib.Config({maxEBType1: MAX_EFFECTIVE_BALANCE_WC_TYPE_01, maxEBType2: MAX_EFFECTIVE_BALANCE_WC_TYPE_02}); + } + + function _toE4Precision(uint256 _value, uint256 _precision) internal pure returns (uint16) { + return uint16((_value * SRUtils.TOTAL_BASIS_POINTS) / _precision); + } +} diff --git a/contracts/0.8.9/Accounting.sol b/contracts/0.8.9/Accounting.sol index 975b428e0f..7dbb9dca5f 100644 --- a/contracts/0.8.9/Accounting.sol +++ b/contracts/0.8.9/Accounting.sol @@ -14,8 +14,21 @@ import {IVaultHub} from "contracts/common/interfaces/IVaultHub.sol"; import {IPostTokenRebaseReceiver} from "./interfaces/IPostTokenRebaseReceiver.sol"; import {WithdrawalQueue} from "./WithdrawalQueue.sol"; -import {StakingRouter} from "./StakingRouter.sol"; +interface IStakingRouter { + function getStakingRewardsDistribution() + external + view + returns ( + address[] memory recipients, + uint256[] memory stakingModuleIds, + uint96[] memory stakingModuleFees, + uint96 totalFee, + uint256 precisionPoints + ); + + function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) external; +} /// @title Lido Accounting contract /// @author folkyatina @@ -29,14 +42,15 @@ contract Accounting { IBurner burner; WithdrawalQueue withdrawalQueue; IPostTokenRebaseReceiver postTokenRebaseReceiver; - StakingRouter stakingRouter; + IStakingRouter stakingRouter; IVaultHub vaultHub; } /// @notice snapshot of the protocol state that may be changed during the report struct PreReportState { - uint256 clValidators; - uint256 clBalance; + uint256 clValidatorsBalance; + uint256 clPendingBalance; + uint256 depositedBalance; uint256 totalPooledEther; uint256 totalShares; uint256 depositedValidators; @@ -89,9 +103,6 @@ contract Accounting { uint256 treasurySharesToMint; } - /// @notice deposit size in wei (for pre-maxEB accounting) - uint256 private constant DEPOSIT_SIZE = 32 ether; - ILidoLocator public immutable LIDO_LOCATOR; ILido public immutable LIDO; @@ -135,7 +146,7 @@ contract Accounting { /// @dev reads the current state of the protocol to the memory function _snapshotPreReportState(Contracts memory _contracts, bool isSimulation) internal view returns (PreReportState memory pre) { - (pre.depositedValidators, pre.clValidators, pre.clBalance) = LIDO.getBeaconStat(); + (pre.clValidatorsBalance, pre.clPendingBalance,, pre.depositedBalance) = LIDO.getBalanceStats(); pre.totalPooledEther = LIDO.getTotalPooledEther(); pre.totalShares = LIDO.getTotalShares(); pre.externalShares = LIDO.getExternalShares(); @@ -165,10 +176,8 @@ contract Accounting { _report ); - // Principal CL balance is the sum of the current CL balance and - // validator deposits during this report - // TODO: to support maxEB we need to get rid of validator counting - update.principalClBalance = _pre.clBalance + (_report.clValidators - _pre.clValidators) * DEPOSIT_SIZE; + // Principal CL balance is sum of previous balances and new deposits + update.principalClBalance = _pre.clValidatorsBalance + _pre.clPendingBalance + _pre.depositedBalance; // Limit the rebase to avoid oracle frontrunning // by leaving some ether to sit in EL rewards vault or withdrawals vault @@ -182,7 +191,7 @@ contract Accounting { _pre.totalPooledEther - _pre.externalEther, // we need to change the base as shareRate is now calculated on _pre.totalShares - _pre.externalShares, // internal ether and shares, but inside it's still total update.principalClBalance, - _report.clBalance, + _report.clValidatorsBalance + _report.clPendingBalance, _report.withdrawalVaultBalance, _report.elRewardsVaultBalance, _report.sharesRequestedToBurn, @@ -190,13 +199,13 @@ contract Accounting { update.sharesToFinalizeWQ ); - uint256 postInternalSharesBeforeFees = - _pre.totalShares - _pre.externalShares // internal shares before - - update.totalSharesToBurn; // shares to be burned for withdrawals and cover + uint256 postInternalSharesBeforeFees = _pre.totalShares - + _pre.externalShares - // internal shares before + update.totalSharesToBurn; // shares to be burned for withdrawals and cover update.postInternalEther = _pre.totalPooledEther - _pre.externalEther // internal ether before - + _report.clBalance + update.withdrawalsVaultTransfer - update.principalClBalance + + _report.clValidatorsBalance + _report.clPendingBalance + update.withdrawalsVaultTransfer - update.principalClBalance + update.elRewardsVaultTransfer - update.etherToFinalizeWQ; @@ -208,7 +217,10 @@ contract Accounting { postInternalSharesBeforeFees ); - update.postInternalShares = postInternalSharesBeforeFees + update.sharesToMintAsFees + _pre.badDebtToInternalize; + update.postInternalShares = + postInternalSharesBeforeFees + + update.sharesToMintAsFees + + _pre.badDebtToInternalize; uint256 postExternalShares = _pre.externalShares - _pre.badDebtToInternalize; // can't underflow by design update.postTotalShares = update.postInternalShares + postExternalShares; @@ -232,7 +244,7 @@ contract Accounting { /// @return sharesToMintAsFees total number of shares to be minted as Lido Core fee /// @return feeDistribution the number of shares that is minted to each module or treasury function _calculateProtocolFees( - StakingRouter _stakingRouter, + IStakingRouter _stakingRouter, ReportValues calldata _report, CalculatedValues memory _update, uint256 _internalSharesBeforeFees @@ -283,7 +295,7 @@ contract Accounting { // but with fees taken as ether deduction instead of minting shares // to learn the amount of shares we need to mint to compensate for this fee - uint256 unifiedClBalance = _report.clBalance + _update.withdrawalsVaultTransfer; + uint256 unifiedClBalance = _report.clValidatorsBalance + _report.clPendingBalance + _update.withdrawalsVaultTransfer; // Don't mint/distribute any protocol fee on the non-profitable Lido oracle report // (when consensus layer balance delta is zero or negative). // See LIP-12 for details: @@ -313,7 +325,7 @@ contract Accounting { uint256 totalModuleFeeShares = 0; - for (uint256 i; i < stakingModuleFees.length; ++i) { + for (uint256 i; i < length; ++i) { uint256 moduleFee = stakingModuleFees[i]; if (moduleFee > 0) { uint256 moduleFeeShares = (_totalSharesToMintAsFees * moduleFee) / _totalFee; @@ -343,7 +355,11 @@ contract Accounting { ]; } - LIDO.processClStateUpdate(_report.timestamp, _pre.clValidators, _report.clValidators, _report.clBalance); + LIDO.processClStateUpdate( + _report.timestamp, + _report.clValidatorsBalance, + _report.clPendingBalance + ); if (_pre.badDebtToInternalize > 0) { _contracts.vaultHub.decreaseInternalizedBadDebt(_pre.badDebtToInternalize); @@ -356,7 +372,7 @@ contract Accounting { LIDO.collectRewardsAndProcessWithdrawals( _report.timestamp, - _report.clBalance, + _report.clValidatorsBalance + _report.clPendingBalance, _update.principalClBalance, _update.withdrawalsVaultTransfer, _update.elRewardsVaultTransfer, @@ -401,9 +417,7 @@ contract Accounting { CalculatedValues memory _update ) internal { if (_report.timestamp >= block.timestamp) revert IncorrectReportTimestamp(_report.timestamp, block.timestamp); - if (_report.clValidators < _pre.clValidators || _report.clValidators > _pre.depositedValidators) { - revert IncorrectReportValidators(_report.clValidators, _pre.clValidators, _pre.depositedValidators); - } + // Validator count validation removed for MaxEB support - now using balance-based accounting // Oracle should consider this limitation: // During the AO report the ether to finalize the WQ cannot be greater or equal to `simulatedPostInternalEther` @@ -411,13 +425,15 @@ contract Accounting { _contracts.oracleReportSanityChecker.checkAccountingOracleReport( _report.timeElapsed, - _update.principalClBalance, - _report.clBalance, + _pre.clValidatorsBalance, + _pre.clPendingBalance, + _report.clValidatorsBalance, + _report.clPendingBalance, _report.withdrawalVaultBalance, _report.elRewardsVaultBalance, _report.sharesRequestedToBurn, - _pre.clValidators, - _report.clValidators + _pre.depositedBalance, + _update.withdrawalsVaultTransfer ); if (_report.withdrawalFinalizationBatches.length > 0) { @@ -493,13 +509,12 @@ contract Accounting { IBurner(burner), WithdrawalQueue(withdrawalQueue), IPostTokenRebaseReceiver(postTokenRebaseReceiver), - StakingRouter(payable(stakingRouter)), + IStakingRouter(stakingRouter), IVaultHub(vaultHub) ); } error NotAuthorized(string operation, address addr); error IncorrectReportTimestamp(uint256 reportTimestamp, uint256 upperBoundTimestamp); - error IncorrectReportValidators(uint256 reportValidators, uint256 minValidators, uint256 maxValidators); error InternalSharesCantBeZero(); } diff --git a/contracts/0.8.9/BeaconChainDepositor.sol b/contracts/0.8.9/BeaconChainDepositor.sol deleted file mode 100644 index 4bcd2f5f37..0000000000 --- a/contracts/0.8.9/BeaconChainDepositor.sol +++ /dev/null @@ -1,99 +0,0 @@ -// SPDX-FileCopyrightText: 2023 Lido -// SPDX-License-Identifier: GPL-3.0 - -// See contracts/COMPILERS.md -pragma solidity 0.8.9; - -import {MemUtils} from "../common/lib/MemUtils.sol"; - -interface IDepositContract { - function get_deposit_root() external view returns (bytes32 rootHash); - - function deposit( - bytes calldata pubkey, // 48 bytes - bytes calldata withdrawal_credentials, // 32 bytes - bytes calldata signature, // 96 bytes - bytes32 deposit_data_root - ) external payable; -} - -contract BeaconChainDepositor { - uint256 internal constant PUBLIC_KEY_LENGTH = 48; - uint256 internal constant SIGNATURE_LENGTH = 96; - uint256 internal constant DEPOSIT_SIZE = 32 ether; - - /// @dev deposit amount 32eth in gweis converted to little endian uint64 - /// DEPOSIT_SIZE_IN_GWEI_LE64 = toLittleEndian64(32 ether / 1 gwei) - uint64 internal constant DEPOSIT_SIZE_IN_GWEI_LE64 = 0x0040597307000000; - - IDepositContract public immutable DEPOSIT_CONTRACT; - - constructor(address _depositContract) { - if (_depositContract == address(0)) revert DepositContractZeroAddress(); - DEPOSIT_CONTRACT = IDepositContract(_depositContract); - } - - /// @dev Invokes a deposit call to the official Beacon Deposit contract - /// @param _keysCount amount of keys to deposit - /// @param _withdrawalCredentials Commitment to a public key for withdrawals - /// @param _publicKeysBatch A BLS12-381 public keys batch - /// @param _signaturesBatch A BLS12-381 signatures batch - function _makeBeaconChainDeposits32ETH( - uint256 _keysCount, - bytes memory _withdrawalCredentials, - bytes memory _publicKeysBatch, - bytes memory _signaturesBatch - ) internal { - if (_publicKeysBatch.length != PUBLIC_KEY_LENGTH * _keysCount) { - revert InvalidPublicKeysBatchLength(_publicKeysBatch.length, PUBLIC_KEY_LENGTH * _keysCount); - } - if (_signaturesBatch.length != SIGNATURE_LENGTH * _keysCount) { - revert InvalidSignaturesBatchLength(_signaturesBatch.length, SIGNATURE_LENGTH * _keysCount); - } - - bytes memory publicKey = MemUtils.unsafeAllocateBytes(PUBLIC_KEY_LENGTH); - bytes memory signature = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH); - - for (uint256 i; i < _keysCount;) { - MemUtils.copyBytes(_publicKeysBatch, publicKey, i * PUBLIC_KEY_LENGTH, 0, PUBLIC_KEY_LENGTH); - MemUtils.copyBytes(_signaturesBatch, signature, i * SIGNATURE_LENGTH, 0, SIGNATURE_LENGTH); - - DEPOSIT_CONTRACT.deposit{value: DEPOSIT_SIZE}( - publicKey, _withdrawalCredentials, signature, _computeDepositDataRoot(_withdrawalCredentials, publicKey, signature) - ); - - unchecked { - ++i; - } - } - } - - /// @dev computes the deposit_root_hash required by official Beacon Deposit contract - /// @param _publicKey A BLS12-381 public key. - /// @param _signature A BLS12-381 signature - function _computeDepositDataRoot(bytes memory _withdrawalCredentials, bytes memory _publicKey, bytes memory _signature) - private - pure - returns (bytes32) - { - // Compute deposit data root (`DepositData` hash tree root) according to deposit_contract.sol - bytes memory sigPart1 = MemUtils.unsafeAllocateBytes(64); - bytes memory sigPart2 = MemUtils.unsafeAllocateBytes(SIGNATURE_LENGTH - 64); - MemUtils.copyBytes(_signature, sigPart1, 0, 0, 64); - MemUtils.copyBytes(_signature, sigPart2, 64, 0, SIGNATURE_LENGTH - 64); - - bytes32 publicKeyRoot = sha256(abi.encodePacked(_publicKey, bytes16(0))); - bytes32 signatureRoot = sha256(abi.encodePacked(sha256(abi.encodePacked(sigPart1)), sha256(abi.encodePacked(sigPart2, bytes32(0))))); - - return sha256( - abi.encodePacked( - sha256(abi.encodePacked(publicKeyRoot, _withdrawalCredentials)), - sha256(abi.encodePacked(DEPOSIT_SIZE_IN_GWEI_LE64, bytes24(0), signatureRoot)) - ) - ); - } - - error DepositContractZeroAddress(); - error InvalidPublicKeysBatchLength(uint256 actual, uint256 expected); - error InvalidSignaturesBatchLength(uint256 actual, uint256 expected); -} diff --git a/contracts/0.8.9/DepositSecurityModule.sol b/contracts/0.8.9/DepositSecurityModule.sol index b39ef28bb0..3edd27be95 100644 --- a/contracts/0.8.9/DepositSecurityModule.sol +++ b/contracts/0.8.9/DepositSecurityModule.sol @@ -7,7 +7,6 @@ pragma solidity 0.8.9; import {ECDSA} from "../common/lib/ECDSA.sol"; interface ILido { - function deposit(uint256 _maxDepositsCount, uint256 _stakingModuleId, bytes calldata _depositCalldata) external; function canDeposit() external view returns (bool); } @@ -17,16 +16,15 @@ interface IDepositContract { interface IStakingRouter { function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256); - function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256); - function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool); function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256); function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256); - function hasStakingModule(uint256 _stakingModuleId) external view returns (bool); + function canDeposit(uint256 _stakingModuleId) external view returns (bool); function decreaseStakingModuleVettedKeysCountByNodeOperator( uint256 _stakingModuleId, bytes calldata _nodeOperatorIds, bytes calldata _vettedSigningKeysCounts ) external; + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external; } /** @@ -308,7 +306,7 @@ contract DepositSecurityModule { * Reverts if any of the addresses is already a guardian or is zero. */ function addGuardians(address[] memory addresses, uint256 newQuorum) external onlyOwner { - for (uint256 i = 0; i < addresses.length; ) { + for (uint256 i = 0; i < addresses.length;) { _addGuardian(addresses[i]); unchecked { @@ -415,22 +413,16 @@ contract DepositSecurityModule { * - the staking module is active; * - the guardian quorum is not set to zero; * - the deposit distance is greater than the minimum required; - * - LIDO.canDeposit() returns true. + * - LIDO.canDeposit() returns true; + * - STAKING_ROUTER.canDeposit returns true. */ function canDeposit(uint256 stakingModuleId) external view returns (bool) { - if (!STAKING_ROUTER.hasStakingModule(stakingModuleId)) return false; + if (!STAKING_ROUTER.canDeposit(stakingModuleId)) return false; - bool isModuleActive = STAKING_ROUTER.getStakingModuleIsActive(stakingModuleId); bool isDepositDistancePassed = _isMinDepositDistancePassed(stakingModuleId); bool isLidoCanDeposit = LIDO.canDeposit(); - return ( - !isDepositsPaused - && isModuleActive - && quorum > 0 - && isDepositDistancePassed - && isLidoCanDeposit - ); + return (!isDepositsPaused && quorum > 0 && isDepositDistancePassed && isLidoCanDeposit); } /** @@ -462,12 +454,13 @@ contract DepositSecurityModule { /// guardian to react and pause deposits to all modules. uint256 lastDepositToModuleBlock = STAKING_ROUTER.getStakingModuleLastDepositBlock(stakingModuleId); uint256 minDepositBlockDistance = STAKING_ROUTER.getStakingModuleMinDepositBlockDistance(stakingModuleId); - uint256 maxLastDepositBlock = lastDepositToModuleBlock >= lastDepositBlock ? lastDepositToModuleBlock : lastDepositBlock; + uint256 maxLastDepositBlock = + lastDepositToModuleBlock >= lastDepositBlock ? lastDepositToModuleBlock : lastDepositBlock; return block.number - maxLastDepositBlock >= minDepositBlockDistance; } /** - * @notice Calls LIDO.deposit(maxDepositsPerBlock, stakingModuleId, depositCalldata). + * @notice Calls STAKING_ROUTER.deposit(stakingModuleId, depositCalldata). * @param blockNumber The block number at which the deposit intent was created. * @param blockHash The block hash at which the deposit intent was created. * @param depositRoot The deposit root hash. @@ -509,15 +502,15 @@ contract DepositSecurityModule { if (nonce != onchainNonce) revert ModuleNonceChanged(); if (quorum == 0 || sortedGuardianSignatures.length < quorum) revert DepositNoQuorum(); - if (!STAKING_ROUTER.getStakingModuleIsActive(stakingModuleId)) revert DepositInactiveModule(); + if (!STAKING_ROUTER.canDeposit(stakingModuleId)) revert DepositInactiveModule(); if (!_isMinDepositDistancePassed(stakingModuleId)) revert DepositTooFrequent(); if (blockHash == bytes32(0) || blockhash(blockNumber) != blockHash) revert DepositUnexpectedBlockHash(); if (isDepositsPaused) revert DepositsArePaused(); _verifyAttestSignatures(depositRoot, blockNumber, blockHash, stakingModuleId, nonce, sortedGuardianSignatures); - uint256 maxDepositsPerBlock = STAKING_ROUTER.getStakingModuleMaxDepositsPerBlock(stakingModuleId); - LIDO.deposit(maxDepositsPerBlock, stakingModuleId, depositCalldata); + // Call StakingRouter instead of Lido - SR will pull ETH from Lido + STAKING_ROUTER.deposit(stakingModuleId, depositCalldata); _setLastDepositBlock(block.number); } @@ -537,7 +530,7 @@ contract DepositSecurityModule { address prevSignerAddr; address signerAddr; - for (uint256 i = 0; i < sigs.length; ) { + for (uint256 i = 0; i < sigs.length;) { signerAddr = ECDSA.recover(msgHash, sigs[i].r, sigs[i].vs); if (!_isGuardian(signerAddr)) revert InvalidSignature(); if (signerAddr <= prevSignerAddr) revert SignaturesNotSorted(); @@ -620,9 +613,7 @@ contract DepositSecurityModule { if (blockHash == bytes32(0) || blockhash(blockNumber) != blockHash) revert UnvetUnexpectedBlockHash(); STAKING_ROUTER.decreaseStakingModuleVettedKeysCountByNodeOperator( - stakingModuleId, - nodeOperatorIds, - vettedSigningKeysCounts + stakingModuleId, nodeOperatorIds, vettedSigningKeysCounts ); } } diff --git a/contracts/0.8.9/LidoLocator.sol b/contracts/0.8.9/LidoLocator.sol index f60d04029f..d0fe0dd515 100644 --- a/contracts/0.8.9/LidoLocator.sol +++ b/contracts/0.8.9/LidoLocator.sol @@ -29,6 +29,7 @@ contract LidoLocator is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -36,6 +37,7 @@ contract LidoLocator is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } error ZeroAddress(); @@ -56,6 +58,7 @@ contract LidoLocator is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -63,6 +66,7 @@ contract LidoLocator is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; //solhint-enable immutable-vars-naming /** @@ -86,6 +90,7 @@ contract LidoLocator is ILidoLocator { oracleDaemonConfig = _assertNonZero(_config.oracleDaemonConfig); validatorExitDelayVerifier = _assertNonZero(_config.validatorExitDelayVerifier); triggerableWithdrawalsGateway = _assertNonZero(_config.triggerableWithdrawalsGateway); + consolidationGateway = _assertNonZero(_config.consolidationGateway); accounting = _assertNonZero(_config.accounting); predepositGuarantee = _assertNonZero(_config.predepositGuarantee); wstETH = _assertNonZero(_config.wstETH); @@ -93,6 +98,7 @@ contract LidoLocator is ILidoLocator { vaultFactory = _assertNonZero(_config.vaultFactory); lazyOracle = _assertNonZero(_config.lazyOracle); operatorGrid = _assertNonZero(_config.operatorGrid); + topUpGateway = _assertNonZero(_config.topUpGateway); } function coreComponents() external view returns ( diff --git a/contracts/0.8.9/StakingRouter.sol b/contracts/0.8.9/StakingRouter.sol deleted file mode 100644 index 8643d1d425..0000000000 --- a/contracts/0.8.9/StakingRouter.sol +++ /dev/null @@ -1,1505 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -/* See contracts/COMPILERS.md */ -pragma solidity 0.8.9; - -import {MinFirstAllocationStrategy} from "contracts/common/lib/MinFirstAllocationStrategy.sol"; -import {Math256} from "contracts/common/lib/Math256.sol"; -import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; - -import {AccessControlEnumerable} from "./utils/access/AccessControlEnumerable.sol"; -import {UnstructuredStorage} from "./lib/UnstructuredStorage.sol"; -import {Versioned} from "./utils/Versioned.sol"; -import {BeaconChainDepositor} from "./BeaconChainDepositor.sol"; - -contract StakingRouter is AccessControlEnumerable, BeaconChainDepositor, Versioned { - using UnstructuredStorage for bytes32; - - /// @dev Events - event StakingModuleAdded(uint256 indexed stakingModuleId, address stakingModule, string name, address createdBy); - event StakingModuleShareLimitSet(uint256 indexed stakingModuleId, uint256 stakeShareLimit, uint256 priorityExitShareThreshold, address setBy); - event StakingModuleFeesSet(uint256 indexed stakingModuleId, uint256 stakingModuleFee, uint256 treasuryFee, address setBy); - event StakingModuleStatusSet(uint256 indexed stakingModuleId, StakingModuleStatus status, address setBy); - event StakingModuleExitedValidatorsIncompleteReporting(uint256 indexed stakingModuleId, uint256 unreportedExitedValidatorsCount); - event StakingModuleMaxDepositsPerBlockSet( - uint256 indexed stakingModuleId, uint256 maxDepositsPerBlock, address setBy - ); - event StakingModuleMinDepositBlockDistanceSet( - uint256 indexed stakingModuleId, uint256 minDepositBlockDistance, address setBy - ); - event WithdrawalCredentialsSet(bytes32 withdrawalCredentials, address setBy); - event WithdrawalsCredentialsChangeFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - event ExitedAndStuckValidatorsCountsUpdateFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - event RewardsMintedReportFailed(uint256 indexed stakingModuleId, bytes lowLevelRevertData); - - /// Emitted when the StakingRouter received ETH - event StakingRouterETHDeposited(uint256 indexed stakingModuleId, uint256 amount); - - event StakingModuleExitNotificationFailed( - uint256 indexed stakingModuleId, - uint256 indexed nodeOperatorId, - bytes _publicKey - ); - - /// @dev Errors - error ZeroAddressLido(); - error ZeroAddressAdmin(); - error ZeroAddressStakingModule(); - error InvalidStakeShareLimit(); - error InvalidFeeSum(); - error StakingModuleNotActive(); - error EmptyWithdrawalsCredentials(); - error DirectETHTransfer(); - error InvalidReportData(uint256 code); - error ExitedValidatorsCountCannotDecrease(); - error ReportedExitedValidatorsExceedDeposited( - uint256 reportedExitedValidatorsCount, - uint256 depositedValidatorsCount - ); - error StakingModulesLimitExceeded(); - error StakingModuleUnregistered(); - error AppAuthLidoFailed(); - error StakingModuleStatusTheSame(); - error StakingModuleWrongName(); - error UnexpectedCurrentValidatorsCount( - uint256 currentModuleExitedValidatorsCount, - uint256 currentNodeOpExitedValidatorsCount - ); - error UnexpectedFinalExitedValidatorsCount ( - uint256 newModuleTotalExitedValidatorsCount, - uint256 newModuleTotalExitedValidatorsCountInStakingRouter - ); - error InvalidDepositsValue(uint256 etherValue, uint256 depositsCount); - error StakingModuleAddressExists(); - error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); - error UnrecoverableModuleError(); - error InvalidPriorityExitShareThreshold(); - error InvalidMinDepositBlockDistance(); - error InvalidMaxDepositPerBlockValue(); - - enum StakingModuleStatus { - Active, // deposits and rewards allowed - DepositsPaused, // deposits NOT allowed, rewards allowed - Stopped // deposits and rewards NOT allowed - } - - struct StakingModule { - /// @notice Unique id of the staking module. - uint24 id; - /// @notice Address of the staking module. - address stakingModuleAddress; - /// @notice Part of the fee taken from staking rewards that goes to the staking module. - uint16 stakingModuleFee; - /// @notice Part of the fee taken from staking rewards that goes to the treasury. - uint16 treasuryFee; - /// @notice Maximum stake share that can be allocated to a module, in BP. - /// @dev Formerly known as `targetShare`. - uint16 stakeShareLimit; - /// @notice Staking module status if staking module can not accept the deposits or can - /// participate in further reward distribution. - uint8 status; - /// @notice Name of the staking module. - string name; - /// @notice block.timestamp of the last deposit of the staking module. - /// @dev NB: lastDepositAt gets updated even if the deposit value was 0 and no actual deposit happened. - uint64 lastDepositAt; - /// @notice block.number of the last deposit of the staking module. - /// @dev NB: lastDepositBlock gets updated even if the deposit value was 0 and no actual deposit happened. - uint256 lastDepositBlock; - /// @notice Number of exited validators. - uint256 exitedValidatorsCount; - /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. - uint16 priorityExitShareThreshold; - /// @notice The maximum number of validators that can be deposited in a single block. - /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. - /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function. - uint64 maxDepositsPerBlock; - /// @notice The minimum distance between deposits in blocks. - /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. - /// See docs for the `OracleReportSanityChecker.setAppearedValidatorsPerDayLimit` function). - uint64 minDepositBlockDistance; - } - - struct StakingModuleCache { - address stakingModuleAddress; - uint24 stakingModuleId; - uint16 stakingModuleFee; - uint16 treasuryFee; - uint16 stakeShareLimit; - StakingModuleStatus status; - uint256 activeValidatorsCount; - uint256 availableValidatorsCount; - } - - struct ValidatorExitData { - uint256 stakingModuleId; - uint256 nodeOperatorId; - bytes pubkey; - } - - bytes32 public constant MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = keccak256("MANAGE_WITHDRAWAL_CREDENTIALS_ROLE"); - bytes32 public constant STAKING_MODULE_MANAGE_ROLE = keccak256("STAKING_MODULE_MANAGE_ROLE"); - bytes32 public constant STAKING_MODULE_UNVETTING_ROLE = keccak256("STAKING_MODULE_UNVETTING_ROLE"); - bytes32 public constant REPORT_EXITED_VALIDATORS_ROLE = keccak256("REPORT_EXITED_VALIDATORS_ROLE"); - bytes32 public constant REPORT_VALIDATOR_EXITING_STATUS_ROLE = keccak256("REPORT_VALIDATOR_EXITING_STATUS_ROLE"); - bytes32 public constant REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = keccak256("REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE"); - bytes32 public constant UNSAFE_SET_EXITED_VALIDATORS_ROLE = keccak256("UNSAFE_SET_EXITED_VALIDATORS_ROLE"); - bytes32 public constant REPORT_REWARDS_MINTED_ROLE = keccak256("REPORT_REWARDS_MINTED_ROLE"); - - bytes32 internal constant LIDO_POSITION = keccak256("lido.StakingRouter.lido"); - - /// @dev Credentials to withdraw ETH on Consensus Layer side. - bytes32 internal constant WITHDRAWAL_CREDENTIALS_POSITION = keccak256("lido.StakingRouter.withdrawalCredentials"); - - /// @dev Total count of staking modules. - bytes32 internal constant STAKING_MODULES_COUNT_POSITION = keccak256("lido.StakingRouter.stakingModulesCount"); - /// @dev Id of the last added staking module. This counter grow on staking modules adding. - bytes32 internal constant LAST_STAKING_MODULE_ID_POSITION = keccak256("lido.StakingRouter.lastStakingModuleId"); - /// @dev Mapping is used instead of array to allow to extend the StakingModule. - bytes32 internal constant STAKING_MODULES_MAPPING_POSITION = keccak256("lido.StakingRouter.stakingModules"); - /// @dev Position of the staking modules in the `_stakingModules` map, plus 1 because - /// index 0 means a value is not in the set. - bytes32 internal constant STAKING_MODULE_INDICES_MAPPING_POSITION = keccak256("lido.StakingRouter.stakingModuleIndicesOneBased"); - - uint256 public constant FEE_PRECISION_POINTS = 10 ** 20; // 100 * 10 ** 18 - uint256 public constant TOTAL_BASIS_POINTS = 10000; - uint256 public constant MAX_STAKING_MODULES_COUNT = 32; - /// @dev Restrict the name size with 31 bytes to storage in a single slot. - uint256 public constant MAX_STAKING_MODULE_NAME_LENGTH = 31; - - constructor(address _depositContract) BeaconChainDepositor(_depositContract) {} - - /// @notice Initializes the contract. - /// @param _admin Lido DAO Aragon agent contract address. - /// @param _lido Lido address. - /// @param _withdrawalCredentials Credentials to withdraw ETH on Consensus Layer side. - /// @dev Proxy initialization method. - function initialize(address _admin, address _lido, bytes32 _withdrawalCredentials) external { - if (_admin == address(0)) revert ZeroAddressAdmin(); - if (_lido == address(0)) revert ZeroAddressLido(); - - _initializeContractVersionTo(3); - - _setupRole(DEFAULT_ADMIN_ROLE, _admin); - - LIDO_POSITION.setStorageAddress(_lido); - WITHDRAWAL_CREDENTIALS_POSITION.setStorageBytes32(_withdrawalCredentials); - emit WithdrawalCredentialsSet(_withdrawalCredentials, msg.sender); - } - - /// @dev Prohibit direct transfer to contract. - receive() external payable { - revert DirectETHTransfer(); - } - - /// @notice A function to finalize upgrade to v2 (from v1). Removed and no longer used. - /// @dev https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md - /// See historical usage in commit: https://github.com/lidofinance/core/blob/c19480aa3366b26aa6eac17f85a6efae8b9f4f72/contracts/0.8.9/StakingRouter.sol#L190 - // function finalizeUpgrade_v2( - // uint256[] memory _priorityExitShareThresholds, - // uint256[] memory _maxDepositsPerBlock, - // uint256[] memory _minDepositBlockDistances - // ) external - - /// @notice Finalizes upgrade to v3 (from v2). Can be called only once. - function finalizeUpgrade_v3() external { - _checkContractVersion(2); - _updateContractVersion(3); - } - - /// @notice Returns Lido contract address. - /// @return Lido contract address. - function getLido() public view returns (address) { - return LIDO_POSITION.getStorageAddress(); - } - - /// @notice Registers a new staking module. - /// @param _name Name of staking module. - /// @param _stakingModuleAddress Address of staking module. - /// @param _stakeShareLimit Maximum share that can be allocated to a module. - /// @param _priorityExitShareThreshold Module's priority exit share threshold. - /// @param _stakingModuleFee Fee of the staking module taken from the staking rewards. - /// @param _treasuryFee Treasury fee. - /// @param _maxDepositsPerBlock The maximum number of validators that can be deposited in a single block. - /// @param _minDepositBlockDistance The minimum distance between deposits in blocks. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function addStakingModule( - string calldata _name, - address _stakingModuleAddress, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - if (_stakingModuleAddress == address(0)) revert ZeroAddressStakingModule(); - if (bytes(_name).length == 0 || bytes(_name).length > MAX_STAKING_MODULE_NAME_LENGTH) revert StakingModuleWrongName(); - - uint256 newStakingModuleIndex = getStakingModulesCount(); - - if (newStakingModuleIndex >= MAX_STAKING_MODULES_COUNT) - revert StakingModulesLimitExceeded(); - - for (uint256 i; i < newStakingModuleIndex; ) { - if (_stakingModuleAddress == _getStakingModuleByIndex(i).stakingModuleAddress) - revert StakingModuleAddressExists(); - - unchecked { - ++i; - } - } - - StakingModule storage newStakingModule = _getStakingModuleByIndex(newStakingModuleIndex); - uint24 newStakingModuleId = uint24(LAST_STAKING_MODULE_ID_POSITION.getStorageUint256()) + 1; - - newStakingModule.id = newStakingModuleId; - newStakingModule.name = _name; - newStakingModule.stakingModuleAddress = _stakingModuleAddress; - /// @dev Since `enum` is `uint8` by nature, so the `status` is stored as `uint8` to avoid - /// possible problems when upgrading. But for human readability, we use `enum` as - /// function parameter type. More about conversion in the docs: - /// https://docs.soliditylang.org/en/v0.8.17/types.html#enums - newStakingModule.status = uint8(StakingModuleStatus.Active); - - /// @dev Simulate zero value deposit to prevent real deposits into the new StakingModule via - /// DepositSecurityModule just after the addition. - _updateModuleLastDepositState(newStakingModule, newStakingModuleId, 0); - - _setStakingModuleIndexById(newStakingModuleId, newStakingModuleIndex); - LAST_STAKING_MODULE_ID_POSITION.setStorageUint256(newStakingModuleId); - STAKING_MODULES_COUNT_POSITION.setStorageUint256(newStakingModuleIndex + 1); - - emit StakingModuleAdded(newStakingModuleId, _stakingModuleAddress, _name, msg.sender); - _updateStakingModule( - newStakingModule, - newStakingModuleId, - _stakeShareLimit, - _priorityExitShareThreshold, - _stakingModuleFee, - _treasuryFee, - _maxDepositsPerBlock, - _minDepositBlockDistance - ); - } - - /// @notice Updates staking module params. - /// @param _stakingModuleId Staking module id. - /// @param _stakeShareLimit Target total stake share. - /// @param _priorityExitShareThreshold Module's priority exit share threshold. - /// @param _stakingModuleFee Fee of the staking module taken from the staking rewards. - /// @param _treasuryFee Treasury fee. - /// @param _maxDepositsPerBlock The maximum number of validators that can be deposited in a single block. - /// @param _minDepositBlockDistance The minimum distance between deposits in blocks. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function updateStakingModule( - uint256 _stakingModuleId, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - _updateStakingModule( - stakingModule, - _stakingModuleId, - _stakeShareLimit, - _priorityExitShareThreshold, - _stakingModuleFee, - _treasuryFee, - _maxDepositsPerBlock, - _minDepositBlockDistance - ); - } - - function _updateStakingModule( - StakingModule storage stakingModule, - uint256 _stakingModuleId, - uint256 _stakeShareLimit, - uint256 _priorityExitShareThreshold, - uint256 _stakingModuleFee, - uint256 _treasuryFee, - uint256 _maxDepositsPerBlock, - uint256 _minDepositBlockDistance - ) internal { - if (_stakeShareLimit > TOTAL_BASIS_POINTS) revert InvalidStakeShareLimit(); - if (_priorityExitShareThreshold > TOTAL_BASIS_POINTS) revert InvalidPriorityExitShareThreshold(); - if (_stakeShareLimit > _priorityExitShareThreshold) revert InvalidPriorityExitShareThreshold(); - if (_stakingModuleFee + _treasuryFee > TOTAL_BASIS_POINTS) revert InvalidFeeSum(); - if (_minDepositBlockDistance == 0 || _minDepositBlockDistance > type(uint64).max) revert InvalidMinDepositBlockDistance(); - if (_maxDepositsPerBlock > type(uint64).max) revert InvalidMaxDepositPerBlockValue(); - - stakingModule.stakeShareLimit = uint16(_stakeShareLimit); - stakingModule.priorityExitShareThreshold = uint16(_priorityExitShareThreshold); - stakingModule.treasuryFee = uint16(_treasuryFee); - stakingModule.stakingModuleFee = uint16(_stakingModuleFee); - stakingModule.maxDepositsPerBlock = uint64(_maxDepositsPerBlock); - stakingModule.minDepositBlockDistance = uint64(_minDepositBlockDistance); - - emit StakingModuleShareLimitSet(_stakingModuleId, _stakeShareLimit, _priorityExitShareThreshold, msg.sender); - emit StakingModuleFeesSet(_stakingModuleId, _stakingModuleFee, _treasuryFee, msg.sender); - emit StakingModuleMaxDepositsPerBlockSet(_stakingModuleId, _maxDepositsPerBlock, msg.sender); - emit StakingModuleMinDepositBlockDistanceSet(_stakingModuleId, _minDepositBlockDistance, msg.sender); - } - - /// @notice Updates the limit of the validators that can be used for deposit. - /// @param _stakingModuleId Id of the staking module. - /// @param _nodeOperatorId Id of the node operator. - /// @param _targetLimitMode Target limit mode. - /// @param _targetLimit Target limit of the node operator. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function updateTargetValidatorsLimits( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - uint256 _targetLimitMode, - uint256 _targetLimit - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - _getIStakingModuleById(_stakingModuleId).updateTargetValidatorsLimits( - _nodeOperatorId, _targetLimitMode, _targetLimit - ); - } - - /// @notice Reports the minted rewards to the staking modules with the specified ids. - /// @param _stakingModuleIds Ids of the staking modules. - /// @param _totalShares Total shares minted for the staking modules. - /// @dev The function is restricted to the `REPORT_REWARDS_MINTED_ROLE` role. - function reportRewardsMinted(uint256[] calldata _stakingModuleIds, uint256[] calldata _totalShares) - external - onlyRole(REPORT_REWARDS_MINTED_ROLE) - { - _validateEqualArrayLengths(_stakingModuleIds.length, _totalShares.length); - - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - if (_totalShares[i] > 0) { - try _getIStakingModuleById(_stakingModuleIds[i]).onRewardsMinted(_totalShares[i]) {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onRewardsMinted() reverts because of the - /// "out of gas" error. Here we assume that the onRewardsMinted() method doesn't - /// have reverts with empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit RewardsMintedReportFailed( - _stakingModuleIds[i], - lowLevelRevertData - ); - } - } - - unchecked { - ++i; - } - } - } - - /// @notice Updates total numbers of exited validators for staking modules with the specified module ids. - /// @param _stakingModuleIds Ids of the staking modules to be updated. - /// @param _exitedValidatorsCounts New counts of exited validators for the specified staking modules. - /// @return The total increase in the aggregate number of exited validators across all updated modules. - /// - /// @dev The total numbers are stored in the staking router and can differ from the totals obtained by calling - /// `IStakingModule.getStakingModuleSummary()`. The overall process of updating validator counts is the following: - /// - /// 1. In the first data submission phase, the oracle calls `updateExitedValidatorsCountByStakingModule` on the - /// staking router, passing the totals by module. The staking router stores these totals and uses them to - /// distribute new stake and staking fees between the modules. There can only be single call of this function - /// per oracle reporting frame. - /// - /// 2. In the second part of the second data submission phase, the oracle calls - /// `StakingRouter.reportStakingModuleExitedValidatorsCountByNodeOperator` on the staking router which passes - /// the counts by node operator to the staking module by calling `IStakingModule.updateExitedValidatorsCount`. - /// This can be done multiple times for the same module, passing data for different subsets of node - /// operators. - /// - /// 3. At the end of the second data submission phase, it's expected for the aggregate exited validators count - /// across all module's node operators (stored in the module) to match the total count for this module - /// (stored in the staking router). However, it might happen that the second phase of data submission doesn't - /// finish until the new oracle reporting frame is started, in which case staking router will emit a warning - /// event `StakingModuleExitedValidatorsIncompleteReporting` when the first data submission phase is performed - /// for a new reporting frame. This condition will result in the staking module having an incomplete data about - /// the exited validator counts during the whole reporting frame. Handling this condition is - /// the responsibility of each staking module. - /// - /// 4. When the second reporting phase is finished, i.e. when the oracle submitted the complete data on the exited - /// validator counts per node operator for the current reporting frame, the oracle calls - /// `StakingRouter.onValidatorsCountsByNodeOperatorReportingFinished` which, in turn, calls - /// `IStakingModule.onExitedAndStuckValidatorsCountsUpdated` on all modules. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function updateExitedValidatorsCountByStakingModule( - uint256[] calldata _stakingModuleIds, - uint256[] calldata _exitedValidatorsCounts - ) - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - returns (uint256) - { - _validateEqualArrayLengths(_stakingModuleIds.length, _exitedValidatorsCounts.length); - - uint256 newlyExitedValidatorsCount; - - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - uint256 stakingModuleId = _stakingModuleIds[i]; - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(stakingModuleId)); - - uint256 prevReportedExitedValidatorsCount = stakingModule.exitedValidatorsCount; - if (_exitedValidatorsCounts[i] < prevReportedExitedValidatorsCount) { - revert ExitedValidatorsCountCannotDecrease(); - } - - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - /* uint256 depositableValidatorsCount */ - ) = _getStakingModuleSummary(IStakingModule(stakingModule.stakingModuleAddress)); - - if (_exitedValidatorsCounts[i] > totalDepositedValidators) { - revert ReportedExitedValidatorsExceedDeposited( - _exitedValidatorsCounts[i], - totalDepositedValidators - ); - } - - newlyExitedValidatorsCount += _exitedValidatorsCounts[i] - prevReportedExitedValidatorsCount; - - if (totalExitedValidators < prevReportedExitedValidatorsCount) { - // not all of the exited validators were async reported to the module - emit StakingModuleExitedValidatorsIncompleteReporting( - stakingModuleId, - prevReportedExitedValidatorsCount - totalExitedValidators - ); - } - - stakingModule.exitedValidatorsCount = _exitedValidatorsCounts[i]; - - unchecked { - ++i; - } - } - - return newlyExitedValidatorsCount; - } - - /// @notice Updates exited validators counts per node operator for the staking module with - /// the specified id. See the docs for `updateExitedValidatorsCountByStakingModule` for the - /// description of the overall update process. - /// - /// @param _stakingModuleId The id of the staking modules to be updated. - /// @param _nodeOperatorIds Ids of the node operators to be updated. - /// @param _exitedValidatorsCounts New counts of exited validators for the specified node operators. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function reportStakingModuleExitedValidatorsCountByNodeOperator( - uint256 _stakingModuleId, - bytes calldata _nodeOperatorIds, - bytes calldata _exitedValidatorsCounts - ) - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - { - _checkValidatorsByNodeOperatorReportData(_nodeOperatorIds, _exitedValidatorsCounts); - _getIStakingModuleById(_stakingModuleId).updateExitedValidatorsCount(_nodeOperatorIds, _exitedValidatorsCounts); - } - - struct ValidatorsCountsCorrection { - /// @notice The expected current number of exited validators of the module that is - /// being corrected. - uint256 currentModuleExitedValidatorsCount; - /// @notice The expected current number of exited validators of the node operator - /// that is being corrected. - uint256 currentNodeOperatorExitedValidatorsCount; - /// @notice The corrected number of exited validators of the module. - uint256 newModuleExitedValidatorsCount; - /// @notice The corrected number of exited validators of the node operator. - uint256 newNodeOperatorExitedValidatorsCount; - } - - /// @notice Sets exited validators count for the given module and given node operator in that module - /// without performing critical safety checks, e.g. that exited validators count cannot decrease. - /// - /// Should only be used by the DAO in extreme cases and with sufficient precautions to correct invalid - /// data reported by the oracle committee due to a bug in the oracle daemon. - /// - /// @param _stakingModuleId Id of the staking module. - /// @param _nodeOperatorId Id of the node operator. - /// @param _triggerUpdateFinish Whether to call `onExitedAndStuckValidatorsCountsUpdated` on the module - /// after applying the corrections. - /// @param _correction See the docs for the `ValidatorsCountsCorrection` struct. - /// - /// @dev Reverts if the current numbers of exited validators of the module and node operator - /// don't match the supplied expected current values. - /// - /// @dev The function is restricted to the `UNSAFE_SET_EXITED_VALIDATORS_ROLE` role. - function unsafeSetExitedValidatorsCount( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - bool _triggerUpdateFinish, - ValidatorsCountsCorrection memory _correction - ) - external - onlyRole(UNSAFE_SET_EXITED_VALIDATORS_ROLE) - { - StakingModule storage stakingModuleState = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - IStakingModule stakingModule = IStakingModule(stakingModuleState.stakingModuleAddress); - - ( - /* uint256 targetLimitMode */, - /* uint256 targetValidatorsCount */, - /* uint256 stuckValidatorsCount, */, - /* uint256 refundedValidatorsCount */, - /* uint256 stuckPenaltyEndTimestamp */, - uint256 totalExitedValidators, - /* uint256 totalDepositedValidators */, - /* uint256 depositableValidatorsCount */ - ) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); - - if (_correction.currentModuleExitedValidatorsCount != stakingModuleState.exitedValidatorsCount || - _correction.currentNodeOperatorExitedValidatorsCount != totalExitedValidators - ) { - revert UnexpectedCurrentValidatorsCount( - stakingModuleState.exitedValidatorsCount, - totalExitedValidators - ); - } - - stakingModuleState.exitedValidatorsCount = _correction.newModuleExitedValidatorsCount; - - stakingModule.unsafeUpdateValidatorsCount( - _nodeOperatorId, - _correction.newNodeOperatorExitedValidatorsCount - ); - - ( - uint256 moduleTotalExitedValidators, - uint256 moduleTotalDepositedValidators, - ) = _getStakingModuleSummary(stakingModule); - - if (_correction.newModuleExitedValidatorsCount > moduleTotalDepositedValidators) { - revert ReportedExitedValidatorsExceedDeposited( - _correction.newModuleExitedValidatorsCount, - moduleTotalDepositedValidators - ); - } - - if (_triggerUpdateFinish) { - if (moduleTotalExitedValidators != _correction.newModuleExitedValidatorsCount) { - revert UnexpectedFinalExitedValidatorsCount( - moduleTotalExitedValidators, - _correction.newModuleExitedValidatorsCount - ); - } - - stakingModule.onExitedAndStuckValidatorsCountsUpdated(); - } - } - - /// @notice Finalizes the reporting of the exited validators counts for the current - /// reporting frame. - /// - /// @dev Called by the oracle when the second phase of data reporting finishes, i.e. when the - /// oracle submitted the complete data on the exited validator counts per node operator - /// for the current reporting frame. See the docs for `updateExitedValidatorsCountByStakingModule` - /// for the description of the overall update process. - /// - /// @dev The function is restricted to the `REPORT_EXITED_VALIDATORS_ROLE` role. - function onValidatorsCountsByNodeOperatorReportingFinished() - external - onlyRole(REPORT_EXITED_VALIDATORS_ROLE) - { - uint256 stakingModulesCount = getStakingModulesCount(); - StakingModule storage stakingModule; - IStakingModule moduleContract; - - for (uint256 i; i < stakingModulesCount; ) { - stakingModule = _getStakingModuleByIndex(i); - moduleContract = IStakingModule(stakingModule.stakingModuleAddress); - - (uint256 exitedValidatorsCount, , ) = _getStakingModuleSummary(moduleContract); - if (exitedValidatorsCount == stakingModule.exitedValidatorsCount) { - // oracle finished updating exited validators for all node ops - try moduleContract.onExitedAndStuckValidatorsCountsUpdated() {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onExitedAndStuckValidatorsCountsUpdated() - /// reverts because of the "out of gas" error. Here we assume that the - /// onExitedAndStuckValidatorsCountsUpdated() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit ExitedAndStuckValidatorsCountsUpdateFailed( - stakingModule.id, - lowLevelRevertData - ); - } - } - - unchecked { - ++i; - } - } - } - - /// @notice Decreases vetted signing keys counts per node operator for the staking module with - /// the specified id. - /// @param _stakingModuleId The id of the staking module to be updated. - /// @param _nodeOperatorIds Ids of the node operators to be updated. - /// @param _vettedSigningKeysCounts New counts of vetted signing keys for the specified node operators. - /// @dev The function is restricted to the `STAKING_MODULE_UNVETTING_ROLE` role. - function decreaseStakingModuleVettedKeysCountByNodeOperator( - uint256 _stakingModuleId, - bytes calldata _nodeOperatorIds, - bytes calldata _vettedSigningKeysCounts - ) external onlyRole(STAKING_MODULE_UNVETTING_ROLE) { - _checkValidatorsByNodeOperatorReportData(_nodeOperatorIds, _vettedSigningKeysCounts); - _getIStakingModuleById(_stakingModuleId).decreaseVettedSigningKeysCount(_nodeOperatorIds, _vettedSigningKeysCounts); - } - - /// @notice Returns all registered staking modules. - /// @return res Array of staking modules. - function getStakingModules() external view returns (StakingModule[] memory res) { - uint256 stakingModulesCount = getStakingModulesCount(); - res = new StakingModule[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - res[i] = _getStakingModuleByIndex(i); - - unchecked { - ++i; - } - } - } - - /// @notice Returns the ids of all registered staking modules. - /// @return stakingModuleIds Array of staking module ids. - function getStakingModuleIds() public view returns (uint256[] memory stakingModuleIds) { - uint256 stakingModulesCount = getStakingModulesCount(); - stakingModuleIds = new uint256[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - stakingModuleIds[i] = _getStakingModuleByIndex(i).id; - - unchecked { - ++i; - } - } - } - - /// @notice Returns the staking module by its id. - /// @param _stakingModuleId Id of the staking module. - /// @return Staking module data. - function getStakingModule(uint256 _stakingModuleId) - public - view - returns (StakingModule memory) - { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - } - - /// @notice Returns total number of staking modules. - /// @return Total number of staking modules. - function getStakingModulesCount() public view returns (uint256) { - return STAKING_MODULES_COUNT_POSITION.getStorageUint256(); - } - - /// @notice Returns true if staking module with the given id was registered via `addStakingModule`, false otherwise. - /// @param _stakingModuleId Id of the staking module. - /// @return True if staking module with the given id was registered, false otherwise. - function hasStakingModule(uint256 _stakingModuleId) external view returns (bool) { - return _getStorageStakingIndicesMapping()[_stakingModuleId] != 0; - } - - /// @notice Returns status of staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Status of the staking module. - function getStakingModuleStatus(uint256 _stakingModuleId) - public - view - returns (StakingModuleStatus) - { - return StakingModuleStatus(_getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).status); - } - - /// @notice A summary of the staking module's validators. - struct StakingModuleSummary { - /// @notice The total number of validators in the EXITED state on the Consensus Layer. - /// @dev This value can't decrease in normal conditions. - uint256 totalExitedValidators; - - /// @notice The total number of validators deposited via the official Deposit Contract. - /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this - /// counter is not decreasing. - uint256 totalDepositedValidators; - - /// @notice The number of validators in the set available for deposit - uint256 depositableValidatorsCount; - } - - /// @notice A summary of node operator and its validators. - struct NodeOperatorSummary { - /// @notice Shows whether the current target limit applied to the node operator. - uint256 targetLimitMode; - - /// @notice Relative target active validators limit for operator. - uint256 targetValidatorsCount; - - /// @notice The number of validators with an expired request to exit time. - /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. - uint256 stuckValidatorsCount; - - /// @notice The number of validators that can't be withdrawn, but deposit costs were - /// compensated to the Lido by the node operator. - /// @dev [deprecated] Refunded validators processing has been removed, this field is no longer used. - uint256 refundedValidatorsCount; - - /// @notice A time when the penalty for stuck validators stops applying to node operator rewards. - /// @dev [deprecated] Stuck key processing has been removed, this field is no longer used. - uint256 stuckPenaltyEndTimestamp; - - /// @notice The total number of validators in the EXITED state on the Consensus Layer. - /// @dev This value can't decrease in normal conditions. - uint256 totalExitedValidators; - - /// @notice The total number of validators deposited via the official Deposit Contract. - /// @dev This value is a cumulative counter: even when the validator goes into EXITED state this - /// counter is not decreasing. - uint256 totalDepositedValidators; - - /// @notice The number of validators in the set available for deposit. - uint256 depositableValidatorsCount; - } - - /// @notice Returns all-validators summary in the staking module. - /// @param _stakingModuleId Id of the staking module to return summary for. - /// @return summary Staking module summary. - function getStakingModuleSummary(uint256 _stakingModuleId) - public - view - returns (StakingModuleSummary memory summary) - { - IStakingModule stakingModule = IStakingModule(getStakingModule(_stakingModuleId).stakingModuleAddress); - ( - summary.totalExitedValidators, - summary.totalDepositedValidators, - summary.depositableValidatorsCount - ) = _getStakingModuleSummary(stakingModule); - } - - - /// @notice Returns node operator summary from the staking module. - /// @param _stakingModuleId Id of the staking module where node operator is onboarded. - /// @param _nodeOperatorId Id of the node operator to return summary for. - /// @return summary Node operator summary. - function getNodeOperatorSummary(uint256 _stakingModuleId, uint256 _nodeOperatorId) - public - view - returns (NodeOperatorSummary memory summary) - { - IStakingModule stakingModule = IStakingModule(getStakingModule(_stakingModuleId).stakingModuleAddress); - /// @dev using intermediate variables below due to "Stack too deep" error in case of - /// assigning directly into the NodeOperatorSummary struct - ( - uint256 targetLimitMode, - uint256 targetValidatorsCount, - /* uint256 stuckValidatorsCount */, - /* uint256 refundedValidatorsCount */, - /* uint256 stuckPenaltyEndTimestamp */, - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ) = stakingModule.getNodeOperatorSummary(_nodeOperatorId); - summary.targetLimitMode = targetLimitMode; - summary.targetValidatorsCount = targetValidatorsCount; - summary.totalExitedValidators = totalExitedValidators; - summary.totalDepositedValidators = totalDepositedValidators; - summary.depositableValidatorsCount = depositableValidatorsCount; - } - - /// @notice A collection of the staking module data stored across the StakingRouter and the - /// staking module contract. - /// - /// @dev This data, first of all, is designed for off-chain usage and might be redundant for - /// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. - struct StakingModuleDigest { - /// @notice The number of node operators registered in the staking module. - uint256 nodeOperatorsCount; - /// @notice The number of node operators registered in the staking module in active state. - uint256 activeNodeOperatorsCount; - /// @notice The current state of the staking module taken from the StakingRouter. - StakingModule state; - /// @notice A summary of the staking module's validators. - StakingModuleSummary summary; - } - - /// @notice A collection of the node operator data stored in the staking module. - /// @dev This data, first of all, is designed for off-chain usage and might be redundant for - /// on-chain calls. Give preference for dedicated methods for gas-efficient on-chain calls. - struct NodeOperatorDigest { - /// @notice Id of the node operator. - uint256 id; - /// @notice Shows whether the node operator is active or not. - bool isActive; - /// @notice A summary of node operator and its validators. - NodeOperatorSummary summary; - } - - /// @notice Returns staking module digest for each staking module registered in the staking router. - /// @return Array of staking module digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getAllStakingModuleDigests() external view returns (StakingModuleDigest[] memory) { - return getStakingModuleDigests(getStakingModuleIds()); - } - - /// @notice Returns staking module digest for passed staking module ids. - /// @param _stakingModuleIds Ids of the staking modules to return data for. - /// @return digests Array of staking module digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getStakingModuleDigests(uint256[] memory _stakingModuleIds) - public - view - returns (StakingModuleDigest[] memory digests) - { - digests = new StakingModuleDigest[](_stakingModuleIds.length); - for (uint256 i = 0; i < _stakingModuleIds.length; ) { - StakingModule memory stakingModuleState = getStakingModule(_stakingModuleIds[i]); - IStakingModule stakingModule = IStakingModule(stakingModuleState.stakingModuleAddress); - digests[i] = StakingModuleDigest({ - nodeOperatorsCount: stakingModule.getNodeOperatorsCount(), - activeNodeOperatorsCount: stakingModule.getActiveNodeOperatorsCount(), - state: stakingModuleState, - summary: getStakingModuleSummary(_stakingModuleIds[i]) - }); - - unchecked { - ++i; - } - } - } - - /// @notice Returns node operator digest for each node operator registered in the given staking module. - /// @param _stakingModuleId Id of the staking module to return data for. - /// @return Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getAllNodeOperatorDigests(uint256 _stakingModuleId) external view returns (NodeOperatorDigest[] memory) { - return getNodeOperatorDigests( - _stakingModuleId, 0, _getIStakingModuleById(_stakingModuleId).getNodeOperatorsCount() - ); - } - - /// @notice Returns node operator digest for passed node operator ids in the given staking module. - /// @param _stakingModuleId Id of the staking module where node operators registered. - /// @param _offset Node operators offset starting with 0. - /// @param _limit The max number of node operators to return. - /// @return Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getNodeOperatorDigests( - uint256 _stakingModuleId, - uint256 _offset, - uint256 _limit - ) public view returns (NodeOperatorDigest[] memory) { - return getNodeOperatorDigests( - _stakingModuleId, _getIStakingModuleById(_stakingModuleId).getNodeOperatorIds(_offset, _limit) - ); - } - - /// @notice Returns node operator digest for a slice of node operators registered in the given - /// staking module. - /// @param _stakingModuleId Id of the staking module where node operators registered. - /// @param _nodeOperatorIds Ids of the node operators to return data for. - /// @return digests Array of node operator digests. - /// @dev WARNING: This method is not supposed to be used for onchain calls due to high gas costs - /// for data aggregation. - function getNodeOperatorDigests(uint256 _stakingModuleId, uint256[] memory _nodeOperatorIds) - public - view - returns (NodeOperatorDigest[] memory digests) - { - IStakingModule stakingModule = _getIStakingModuleById(_stakingModuleId); - digests = new NodeOperatorDigest[](_nodeOperatorIds.length); - for (uint256 i = 0; i < _nodeOperatorIds.length; ) { - digests[i] = NodeOperatorDigest({ - id: _nodeOperatorIds[i], - isActive: stakingModule.getNodeOperatorIsActive(_nodeOperatorIds[i]), - summary: getNodeOperatorSummary(_stakingModuleId, _nodeOperatorIds[i]) - }); - - unchecked { - ++i; - } - } - } - - /// @notice Sets the staking module status flag for participation in further deposits and/or reward distribution. - /// @param _stakingModuleId Id of the staking module to be updated. - /// @param _status New status of the staking module. - /// @dev The function is restricted to the `STAKING_MODULE_MANAGE_ROLE` role. - function setStakingModuleStatus( - uint256 _stakingModuleId, - StakingModuleStatus _status - ) external onlyRole(STAKING_MODULE_MANAGE_ROLE) { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - if (StakingModuleStatus(stakingModule.status) == _status) revert StakingModuleStatusTheSame(); - _setStakingModuleStatus(stakingModule, _status); - } - - /// @notice Returns whether the staking module is stopped. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the staking module is stopped, false otherwise. - function getStakingModuleIsStopped(uint256 _stakingModuleId) external view returns (bool) - { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Stopped; - } - - /// @notice Returns whether the deposits are paused for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the deposits are paused, false otherwise. - function getStakingModuleIsDepositsPaused(uint256 _stakingModuleId) - external - view - returns (bool) - { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.DepositsPaused; - } - - /// @notice Returns whether the staking module is active. - /// @param _stakingModuleId Id of the staking module. - /// @return True if the staking module is active, false otherwise. - function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool) { - return getStakingModuleStatus(_stakingModuleId) == StakingModuleStatus.Active; - } - - /// @notice Returns staking module nonce. - /// @param _stakingModuleId Id of the staking module. - /// @return Staking module nonce. - function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256) { - return _getIStakingModuleById(_stakingModuleId).getNonce(); - } - - /// @notice Returns the last deposit block for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Last deposit block for the staking module. - function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) - external - view - returns (uint256) - { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).lastDepositBlock; - } - - /// @notice Returns the min deposit block distance for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Min deposit block distance for the staking module. - function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).minDepositBlockDistance; - } - - /// @notice Returns the max deposits count per block for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return Max deposits count per block for the staking module. - function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).maxDepositsPerBlock; - } - - /// @notice Returns active validators count for the staking module. - /// @param _stakingModuleId Id of the staking module. - /// @return activeValidatorsCount Active validators count for the staking module. - function getStakingModuleActiveValidatorsCount(uint256 _stakingModuleId) - external - view - returns (uint256 activeValidatorsCount) - { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - /* uint256 depositableValidatorsCount */ - ) = _getStakingModuleSummary(IStakingModule(stakingModule.stakingModuleAddress)); - - activeValidatorsCount = totalDepositedValidators - Math256.max( - stakingModule.exitedValidatorsCount, totalExitedValidators - ); - } - - /// @notice Returns the max count of deposits which the staking module can provide data for based - /// on the passed `_maxDepositsValue` amount. - /// @param _stakingModuleId Id of the staking module to be deposited. - /// @param _maxDepositsValue Max amount of ether that might be used for deposits count calculation. - /// @return Max number of deposits might be done using the given staking module. - function getStakingModuleMaxDepositsCount(uint256 _stakingModuleId, uint256 _maxDepositsValue) - public - view - returns (uint256) - { - ( - /* uint256 allocated */, - uint256[] memory newDepositsAllocation, - StakingModuleCache[] memory stakingModulesCache - ) = _getDepositsAllocation(_maxDepositsValue / DEPOSIT_SIZE); - uint256 stakingModuleIndex = _getStakingModuleIndexById(_stakingModuleId); - return - newDepositsAllocation[stakingModuleIndex] - stakingModulesCache[stakingModuleIndex].activeValidatorsCount; - } - - /// @notice Returns the aggregate fee distribution proportion. - /// @return modulesFee Modules aggregate fee in base precision. - /// @return treasuryFee Treasury fee in base precision. - /// @return basePrecision Base precision: a value corresponding to the full fee. - function getStakingFeeAggregateDistribution() public view returns ( - uint96 modulesFee, - uint96 treasuryFee, - uint256 basePrecision - ) { - uint96[] memory moduleFees; - uint96 totalFee; - (, , moduleFees, totalFee, basePrecision) = getStakingRewardsDistribution(); - for (uint256 i; i < moduleFees.length; ) { - modulesFee += moduleFees[i]; - - unchecked { - ++i; - } - } - treasuryFee = totalFee - modulesFee; - } - - /// @notice Return shares table. - /// @return recipients Rewards recipient addresses corresponding to each module. - /// @return stakingModuleIds Module IDs. - /// @return stakingModuleFees Fee of each recipient. - /// @return totalFee Total fee to mint for each staking module and treasury. - /// @return precisionPoints Base precision number, which constitutes 100% fee. - function getStakingRewardsDistribution() - public - view - returns ( - address[] memory recipients, - uint256[] memory stakingModuleIds, - uint96[] memory stakingModuleFees, - uint96 totalFee, - uint256 precisionPoints - ) - { - (uint256 totalActiveValidators, StakingModuleCache[] memory stakingModulesCache) = _loadStakingModulesCache(); - uint256 stakingModulesCount = stakingModulesCache.length; - - /// @dev Return empty response if there are no staking modules or active validators yet. - if (stakingModulesCount == 0 || totalActiveValidators == 0) { - return (new address[](0), new uint256[](0), new uint96[](0), 0, FEE_PRECISION_POINTS); - } - - precisionPoints = FEE_PRECISION_POINTS; - stakingModuleIds = new uint256[](stakingModulesCount); - recipients = new address[](stakingModulesCount); - stakingModuleFees = new uint96[](stakingModulesCount); - - uint256 rewardedStakingModulesCount = 0; - uint256 stakingModuleValidatorsShare; - uint96 stakingModuleFee; - - for (uint256 i; i < stakingModulesCount; ) { - /// @dev Skip staking modules which have no active validators. - if (stakingModulesCache[i].activeValidatorsCount > 0) { - stakingModuleIds[rewardedStakingModulesCount] = stakingModulesCache[i].stakingModuleId; - stakingModuleValidatorsShare = ((stakingModulesCache[i].activeValidatorsCount * precisionPoints) / totalActiveValidators); - - recipients[rewardedStakingModulesCount] = address(stakingModulesCache[i].stakingModuleAddress); - stakingModuleFee = uint96((stakingModuleValidatorsShare * stakingModulesCache[i].stakingModuleFee) / TOTAL_BASIS_POINTS); - /// @dev If the staking module has the `Stopped` status for some reason, then - /// the staking module's rewards go to the treasury, so that the DAO has ability - /// to manage them (e.g. to compensate the staking module in case of an error, etc.) - if (stakingModulesCache[i].status != StakingModuleStatus.Stopped) { - stakingModuleFees[rewardedStakingModulesCount] = stakingModuleFee; - } - // Else keep stakingModuleFees[rewardedStakingModulesCount] = 0, but increase totalFee. - - totalFee += (uint96((stakingModuleValidatorsShare * stakingModulesCache[i].treasuryFee) / TOTAL_BASIS_POINTS) + stakingModuleFee); - - unchecked { - rewardedStakingModulesCount++; - } - } - - unchecked { - ++i; - } - } - - // Total fee never exceeds 100%. - assert(totalFee <= precisionPoints); - - /// @dev Shrink arrays. - if (rewardedStakingModulesCount < stakingModulesCount) { - assembly { - mstore(stakingModuleIds, rewardedStakingModulesCount) - mstore(recipients, rewardedStakingModulesCount) - mstore(stakingModuleFees, rewardedStakingModulesCount) - } - } - } - - /// @notice Returns the same as getStakingRewardsDistribution() but in reduced, 1e4 precision (DEPRECATED). - /// @dev Helper only for Lido contract. Use getStakingRewardsDistribution() instead. - /// @return totalFee Total fee to mint for each staking module and treasury in reduced, 1e4 precision. - function getTotalFeeE4Precision() external view returns (uint16 totalFee) { - /// @dev The logic is placed here but in Lido contract to save Lido bytecode. - (, , , uint96 totalFeeInHighPrecision, uint256 precision) = getStakingRewardsDistribution(); - // Here we rely on (totalFeeInHighPrecision <= precision). - totalFee = _toE4Precision(totalFeeInHighPrecision, precision); - } - - /// @notice Returns the same as getStakingFeeAggregateDistribution() but in reduced, 1e4 precision (DEPRECATED). - /// @dev Helper only for Lido contract. Use getStakingFeeAggregateDistribution() instead. - /// @return modulesFee Modules aggregate fee in reduced, 1e4 precision. - /// @return treasuryFee Treasury fee in reduced, 1e4 precision. - function getStakingFeeAggregateDistributionE4Precision() - external view - returns (uint16 modulesFee, uint16 treasuryFee) - { - /// @dev The logic is placed here but in Lido contract to save Lido bytecode. - ( - uint256 modulesFeeHighPrecision, - uint256 treasuryFeeHighPrecision, - uint256 precision - ) = getStakingFeeAggregateDistribution(); - // Here we rely on ({modules,treasury}FeeHighPrecision <= precision). - modulesFee = _toE4Precision(modulesFeeHighPrecision, precision); - treasuryFee = _toE4Precision(treasuryFeeHighPrecision, precision); - } - - /// @notice Returns new deposits allocation after the distribution of the `_depositsCount` deposits. - /// @param _depositsCount The maximum number of deposits to be allocated. - /// @return allocated Number of deposits allocated to the staking modules. - /// @return allocations Array of new deposits allocation to the staking modules. - function getDepositsAllocation(uint256 _depositsCount) external view returns (uint256 allocated, uint256[] memory allocations) { - (allocated, allocations, ) = _getDepositsAllocation(_depositsCount); - } - - /// @notice Invokes a deposit call to the official Deposit contract. - /// @param _depositsCount Number of deposits to make. - /// @param _stakingModuleId Id of the staking module to be deposited. - /// @param _depositCalldata Staking module calldata. - /// @dev Only the Lido contract is allowed to call this method. - function deposit( - uint256 _depositsCount, - uint256 _stakingModuleId, - bytes calldata _depositCalldata - ) external payable { - if (msg.sender != LIDO_POSITION.getStorageAddress()) revert AppAuthLidoFailed(); - - bytes32 withdrawalCredentials = getWithdrawalCredentials(); - if (withdrawalCredentials == 0) revert EmptyWithdrawalsCredentials(); - - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - if (StakingModuleStatus(stakingModule.status) != StakingModuleStatus.Active) - revert StakingModuleNotActive(); - - /// @dev Firstly update the local state of the contract to prevent a reentrancy attack - /// even though the staking modules are trusted contracts. - uint256 depositsValue = msg.value; - if (depositsValue != _depositsCount * DEPOSIT_SIZE) revert InvalidDepositsValue(depositsValue, _depositsCount); - - _updateModuleLastDepositState(stakingModule, _stakingModuleId, depositsValue); - - if (_depositsCount > 0) { - (bytes memory publicKeysBatch, bytes memory signaturesBatch) = - IStakingModule(stakingModule.stakingModuleAddress) - .obtainDepositData(_depositsCount, _depositCalldata); - - uint256 etherBalanceBeforeDeposits = address(this).balance; - _makeBeaconChainDeposits32ETH( - _depositsCount, - abi.encodePacked(withdrawalCredentials), - publicKeysBatch, - signaturesBatch - ); - uint256 etherBalanceAfterDeposits = address(this).balance; - - /// @dev All sent ETH must be deposited and self balance stay the same. - assert(etherBalanceBeforeDeposits - etherBalanceAfterDeposits == depositsValue); - } - } - - /// @notice Set credentials to withdraw ETH on Consensus Layer side. - /// @param _withdrawalCredentials withdrawal credentials field as defined in the Consensus Layer specs. - /// @dev Note that setWithdrawalCredentials discards all unused deposits data as the signatures are invalidated. - /// @dev The function is restricted to the `MANAGE_WITHDRAWAL_CREDENTIALS_ROLE` role. - function setWithdrawalCredentials(bytes32 _withdrawalCredentials) external onlyRole(MANAGE_WITHDRAWAL_CREDENTIALS_ROLE) { - WITHDRAWAL_CREDENTIALS_POSITION.setStorageBytes32(_withdrawalCredentials); - - uint256 stakingModulesCount = getStakingModulesCount(); - for (uint256 i; i < stakingModulesCount; ) { - StakingModule storage stakingModule = _getStakingModuleByIndex(i); - - unchecked { - ++i; - } - - try IStakingModule(stakingModule.stakingModuleAddress) - .onWithdrawalCredentialsChanged() {} - catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onWithdrawalCredentialsChanged() - /// reverts because of the "out of gas" error. Here we assume that the - /// onWithdrawalCredentialsChanged() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - _setStakingModuleStatus(stakingModule, StakingModuleStatus.DepositsPaused); - emit WithdrawalsCredentialsChangeFailed(stakingModule.id, lowLevelRevertData); - } - } - - emit WithdrawalCredentialsSet(_withdrawalCredentials, msg.sender); - } - - /// @notice Returns current credentials to withdraw ETH on Consensus Layer side. - /// @return Withdrawal credentials. - function getWithdrawalCredentials() public view returns (bytes32) { - return WITHDRAWAL_CREDENTIALS_POSITION.getStorageBytes32(); - } - - function _checkValidatorsByNodeOperatorReportData( - bytes calldata _nodeOperatorIds, - bytes calldata _validatorsCounts - ) internal pure { - if (_nodeOperatorIds.length % 8 != 0 || _validatorsCounts.length % 16 != 0) { - revert InvalidReportData(3); - } - uint256 nodeOperatorsCount = _nodeOperatorIds.length / 8; - if (_validatorsCounts.length / 16 != nodeOperatorsCount) { - revert InvalidReportData(2); - } - if (nodeOperatorsCount == 0) { - revert InvalidReportData(1); - } - } - - /// @dev Save the last deposit state for the staking module and emit the event - /// @param stakingModule staking module storage ref - /// @param stakingModuleId id of the staking module to be deposited - /// @param depositsValue value to deposit - function _updateModuleLastDepositState( - StakingModule storage stakingModule, - uint256 stakingModuleId, - uint256 depositsValue - ) internal { - stakingModule.lastDepositAt = uint64(block.timestamp); - stakingModule.lastDepositBlock = block.number; - emit StakingRouterETHDeposited(stakingModuleId, depositsValue); - } - - - /// @dev Loads modules into a memory cache. - /// @return totalActiveValidators Total active validators across all modules. - /// @return stakingModulesCache Array of StakingModuleCache structs. - function _loadStakingModulesCache() internal view returns ( - uint256 totalActiveValidators, - StakingModuleCache[] memory stakingModulesCache - ) { - uint256 stakingModulesCount = getStakingModulesCount(); - stakingModulesCache = new StakingModuleCache[](stakingModulesCount); - for (uint256 i; i < stakingModulesCount; ) { - stakingModulesCache[i] = _loadStakingModulesCacheItem(i); - totalActiveValidators += stakingModulesCache[i].activeValidatorsCount; - - unchecked { - ++i; - } - } - } - - function _loadStakingModulesCacheItem(uint256 _stakingModuleIndex) - internal - view - returns (StakingModuleCache memory cacheItem) - { - StakingModule storage stakingModuleData = _getStakingModuleByIndex(_stakingModuleIndex); - - cacheItem.stakingModuleAddress = stakingModuleData.stakingModuleAddress; - cacheItem.stakingModuleId = stakingModuleData.id; - cacheItem.stakingModuleFee = stakingModuleData.stakingModuleFee; - cacheItem.treasuryFee = stakingModuleData.treasuryFee; - cacheItem.stakeShareLimit = stakingModuleData.stakeShareLimit; - cacheItem.status = StakingModuleStatus(stakingModuleData.status); - - ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ) = _getStakingModuleSummary(IStakingModule(cacheItem.stakingModuleAddress)); - - cacheItem.availableValidatorsCount = cacheItem.status == StakingModuleStatus.Active - ? depositableValidatorsCount - : 0; - - // The module might not receive all exited validators data yet => we need to replacing - // the exitedValidatorsCount with the one that the staking router is aware of. - cacheItem.activeValidatorsCount = - totalDepositedValidators - - Math256.max(totalExitedValidators, stakingModuleData.exitedValidatorsCount); - } - - function _setStakingModuleStatus(StakingModule storage _stakingModule, StakingModuleStatus _status) internal { - StakingModuleStatus prevStatus = StakingModuleStatus(_stakingModule.status); - if (prevStatus != _status) { - _stakingModule.status = uint8(_status); - emit StakingModuleStatusSet(_stakingModule.id, _status, msg.sender); - } - } - - function _getDepositsAllocation( - uint256 _depositsToAllocate - ) internal view returns (uint256 allocated, uint256[] memory allocations, StakingModuleCache[] memory stakingModulesCache) { - // Calculate total used validators for operators. - uint256 totalActiveValidators; - - (totalActiveValidators, stakingModulesCache) = _loadStakingModulesCache(); - - uint256 stakingModulesCount = stakingModulesCache.length; - allocations = new uint256[](stakingModulesCount); - if (stakingModulesCount > 0) { - /// @dev New estimated active validators count. - totalActiveValidators += _depositsToAllocate; - uint256[] memory capacities = new uint256[](stakingModulesCount); - uint256 targetValidators; - - for (uint256 i; i < stakingModulesCount; ) { - allocations[i] = stakingModulesCache[i].activeValidatorsCount; - targetValidators = (stakingModulesCache[i].stakeShareLimit * totalActiveValidators) / TOTAL_BASIS_POINTS; - capacities[i] = Math256.min(targetValidators, stakingModulesCache[i].activeValidatorsCount + stakingModulesCache[i].availableValidatorsCount); - - unchecked { - ++i; - } - } - - (allocated, allocations) = MinFirstAllocationStrategy.allocate(allocations, capacities, _depositsToAllocate); - } - } - - function _getStakingModuleIndexById(uint256 _stakingModuleId) internal view returns (uint256) { - mapping(uint256 => uint256) storage _stakingModuleIndicesOneBased = _getStorageStakingIndicesMapping(); - uint256 indexOneBased = _stakingModuleIndicesOneBased[_stakingModuleId]; - if (indexOneBased == 0) revert StakingModuleUnregistered(); - return indexOneBased - 1; - } - - function _setStakingModuleIndexById(uint256 _stakingModuleId, uint256 _stakingModuleIndex) internal { - mapping(uint256 => uint256) storage _stakingModuleIndicesOneBased = _getStorageStakingIndicesMapping(); - _stakingModuleIndicesOneBased[_stakingModuleId] = _stakingModuleIndex + 1; - } - - function _getIStakingModuleById(uint256 _stakingModuleId) internal view returns (IStakingModule) { - return IStakingModule(_getStakingModuleAddressById(_stakingModuleId)); - } - - function _getStakingModuleByIndex(uint256 _stakingModuleIndex) internal view returns (StakingModule storage) { - mapping(uint256 => StakingModule) storage _stakingModules = _getStorageStakingModulesMapping(); - return _stakingModules[_stakingModuleIndex]; - } - - function _getStakingModuleAddressById(uint256 _stakingModuleId) internal view returns (address) { - return _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)).stakingModuleAddress; - } - - function _getStorageStakingModulesMapping() internal pure returns (mapping(uint256 => StakingModule) storage result) { - bytes32 position = STAKING_MODULES_MAPPING_POSITION; - assembly { - result.slot := position - } - } - - function _getStorageStakingIndicesMapping() internal pure returns (mapping(uint256 => uint256) storage result) { - bytes32 position = STAKING_MODULE_INDICES_MAPPING_POSITION; - assembly { - result.slot := position - } - } - - function _toE4Precision(uint256 _value, uint256 _precision) internal pure returns (uint16) { - return uint16((_value * TOTAL_BASIS_POINTS) / _precision); - } - - function _validateEqualArrayLengths(uint256 firstArrayLength, uint256 secondArrayLength) internal pure { - if (firstArrayLength != secondArrayLength) { - revert ArraysLengthMismatch(firstArrayLength, secondArrayLength); - } - } - - /// @dev Optimizes contract deployment size by wrapping the 'stakingModule.getStakingModuleSummary' function. - function _getStakingModuleSummary(IStakingModule stakingModule) internal view returns (uint256, uint256, uint256) { - return stakingModule.getStakingModuleSummary(); - } - - /// @notice Handles tracking and penalization logic for a node operator who failed to exit their validator within the defined exit window. - /// @dev This function is called to report the current exit-related status of a validator belonging to a specific node operator. - /// It accepts a validator's public key, associated with the duration (in seconds) it was eligible to exit but has not exited. - /// This data could be used to trigger penalties for the node operator if the validator has been non-exiting for too long. - /// @param _stakingModuleId The ID of the staking module. - /// @param _nodeOperatorId The ID of the node operator whose validator status is being delivered. - /// @param _proofSlotTimestamp The timestamp (slot time) when the validator was last known to be in an active ongoing state. - /// @param _publicKey The public key of the validator being reported. - /// @param _eligibleToExitInSec The duration (in seconds) indicating how long the validator has been eligible to exit after request but has not exited. - function reportValidatorExitDelay( - uint256 _stakingModuleId, - uint256 _nodeOperatorId, - uint256 _proofSlotTimestamp, - bytes calldata _publicKey, - uint256 _eligibleToExitInSec - ) - external - onlyRole(REPORT_VALIDATOR_EXITING_STATUS_ROLE) - { - _getIStakingModuleById(_stakingModuleId).reportValidatorExitDelay( - _nodeOperatorId, - _proofSlotTimestamp, - _publicKey, - _eligibleToExitInSec - ); - } - - /// @notice Handles the triggerable exit event for a set of validators. - /// @dev This function is called when validators are exited using triggerable exit requests on the Execution Layer. - /// @param validatorExitData An array of `ValidatorExitData` structs, each representing a validator - /// for which a triggerable exit was requested. Each entry includes: - /// - `stakingModuleId`: ID of the staking module. - /// - `nodeOperatorId`: ID of the node operator. - /// - `pubkey`: Validator public key, 48 bytes length. - /// @param _withdrawalRequestPaidFee Fee amount paid to send a withdrawal request on the Execution Layer (EL). - /// @param _exitType The type of exit being performed. - /// This parameter may be interpreted differently across various staking modules depending on their specific implementation. - function onValidatorExitTriggered( - ValidatorExitData[] calldata validatorExitData, - uint256 _withdrawalRequestPaidFee, - uint256 _exitType - ) - external - onlyRole(REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE) - { - ValidatorExitData calldata data; - for (uint256 i = 0; i < validatorExitData.length; ++i) { - data = validatorExitData[i]; - - try _getIStakingModuleById(data.stakingModuleId).onValidatorExitTriggered( - data.nodeOperatorId, - data.pubkey, - _withdrawalRequestPaidFee, - _exitType - ) - {} catch (bytes memory lowLevelRevertData) { - /// @dev This check is required to prevent incorrect gas estimation of the method. - /// Without it, Ethereum nodes that use binary search for gas estimation may - /// return an invalid value when the onValidatorExitTriggered() - /// reverts because of the "out of gas" error. Here we assume that the - /// onValidatorExitTriggered() method doesn't have reverts with - /// empty error data except "out of gas". - if (lowLevelRevertData.length == 0) revert UnrecoverableModuleError(); - emit StakingModuleExitNotificationFailed(data.stakingModuleId, data.nodeOperatorId, data.pubkey); - } - } - } -} diff --git a/contracts/0.8.9/WithdrawalVault.sol b/contracts/0.8.9/WithdrawalVault.sol index 9964bea5e4..80517409b1 100644 --- a/contracts/0.8.9/WithdrawalVault.sol +++ b/contracts/0.8.9/WithdrawalVault.sol @@ -8,7 +8,7 @@ import {IERC20} from "@openzeppelin/contracts-v4.4/token/ERC20/IERC20.sol"; import {IERC721} from "@openzeppelin/contracts-v4.4/token/ERC721/IERC721.sol"; import {SafeERC20} from "@openzeppelin/contracts-v4.4/token/ERC20/utils/SafeERC20.sol"; import {Versioned} from "./utils/Versioned.sol"; -import {WithdrawalVaultEIP7002} from "./WithdrawalVaultEIP7002.sol"; +import {WithdrawalVaultEIP7685} from "./WithdrawalVaultEIP7685.sol"; interface ILido { /** @@ -22,12 +22,13 @@ interface ILido { /** * @title A vault for temporary storage of withdrawals */ -contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { +contract WithdrawalVault is Versioned, WithdrawalVaultEIP7685 { using SafeERC20 for IERC20; ILido public immutable LIDO; address public immutable TREASURY; address public immutable TRIGGERABLE_WITHDRAWALS_GATEWAY; + address public immutable CONSOLIDATION_GATEWAY; // Events /** @@ -43,9 +44,9 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { event ERC721Recovered(address indexed requestedBy, address indexed token, uint256 tokenId); // Errors - error ZeroAddress(); error NotLido(); error NotTriggerableWithdrawalsGateway(); + error NotConsolidationGateway(); error NotEnoughEther(uint256 requested, uint256 balance); error ZeroAmount(); @@ -53,14 +54,23 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { * @param _lido the Lido token (stETH) address * @param _treasury the Lido treasury address (see ERC20/ERC721-recovery interfaces) */ - constructor(address _lido, address _treasury, address _triggerableWithdrawalsGateway) { + constructor( + address _lido, + address _treasury, + address _triggerableWithdrawalsGateway, + address _consolidationGateway, + address _withdrawalRequest, + address _consolidationRequest + ) WithdrawalVaultEIP7685(_withdrawalRequest, _consolidationRequest) { _onlyNonZeroAddress(_lido); _onlyNonZeroAddress(_treasury); _onlyNonZeroAddress(_triggerableWithdrawalsGateway); + _onlyNonZeroAddress(_consolidationGateway); LIDO = ILido(_lido); TREASURY = _treasury; TRIGGERABLE_WITHDRAWALS_GATEWAY = _triggerableWithdrawalsGateway; + CONSOLIDATION_GATEWAY = _consolidationGateway; } /// @dev Ensures the contract’s ETH balance is unchanged. @@ -75,14 +85,14 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { function initialize() external { // Initializations for v0 --> v2 _checkContractVersion(0); - _initializeContractVersionTo(2); + _initializeContractVersionTo(3); } - /// @notice Finalizes upgrade to v2 (from v1). Can be called only once. - function finalizeUpgrade_v2() external { - // Finalization for v1 --> v2 - _checkContractVersion(1); - _updateContractVersion(2); + /// @notice Finalizes upgrade to v3 (from v2). Can be called only once. + function finalizeUpgrade_v3() external { + // Finalization for v2 --> v3 + _checkContractVersion(2); + _updateContractVersion(3); } /** @@ -136,10 +146,6 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { _token.transferFrom(address(this), TREASURY, _tokenId); } - function _onlyNonZeroAddress(address _address) internal pure { - if (_address == address(0)) revert ZeroAddress(); - } - /** * @dev Submits EIP-7002 full or partial withdrawal requests for the specified public keys. * Each full withdrawal request instructs a validator to fully withdraw its stake and exit its duties as a validator. @@ -171,6 +177,32 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { _addWithdrawalRequests(pubkeys, amounts); } + /** + * @dev Submits EIP-7251 consolidation requests, one per (source, target) pair. + * Each request instructs a validator to consolidate its stake to the target validator. + * + * @param sourcePubkeys An array of 48-byte public keys corresponding to validators requesting the consolidation. + * + * @param targetPubkeys An array of 48-byte public keys corresponding to validators receiving the consolidation. + * + * @notice Reverts if: + * - The caller is not ConsolidationsGateway. + * - The provided public key array is empty. + * - The provided public key array malformed. + * - The provided source public key and target public key arrays are not of equal length. + * - The provided total withdrawal fee value is invalid. + */ + function addConsolidationRequests( + bytes[] calldata sourcePubkeys, + bytes[] calldata targetPubkeys + ) external payable preservesEthBalance { + if (msg.sender != CONSOLIDATION_GATEWAY) { + revert NotConsolidationGateway(); + } + + _addConsolidationRequests(sourcePubkeys, targetPubkeys); + } + /** * @dev Retrieves the current EIP-7002 withdrawal fee. * @return The minimum fee required per withdrawal request. @@ -178,4 +210,12 @@ contract WithdrawalVault is Versioned, WithdrawalVaultEIP7002 { function getWithdrawalRequestFee() public view returns (uint256) { return _getWithdrawalRequestFee(); } + + /** + * @dev Retrieves the current EIP-7251 consolidation fee. + * @return The minimum fee required per consolidation request. + */ + function getConsolidationRequestFee() external view returns (uint256) { + return _getConsolidationRequestFee(); + } } diff --git a/contracts/0.8.9/WithdrawalVaultEIP7002.sol b/contracts/0.8.9/WithdrawalVaultEIP7002.sol deleted file mode 100644 index d4449939eb..0000000000 --- a/contracts/0.8.9/WithdrawalVaultEIP7002.sol +++ /dev/null @@ -1,66 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Lido -// SPDX-License-Identifier: GPL-3.0 - -/* See contracts/COMPILERS.md */ -pragma solidity 0.8.9; - -/** - * @title A base contract for a withdrawal vault, enables to submit EIP-7002 withdrawal requests. - */ -abstract contract WithdrawalVaultEIP7002 { - address public constant WITHDRAWAL_REQUEST = 0x00000961Ef480Eb55e80D19ad83579A64c007002; - - event WithdrawalRequestAdded(bytes request); - - error ZeroArgument(string name); - error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); - error FeeReadFailed(); - error FeeInvalidData(); - error IncorrectFee(uint256 requiredFee, uint256 providedFee); - error RequestAdditionFailed(bytes callData); - - function _addWithdrawalRequests(bytes[] calldata pubkeys, uint64[] calldata amounts) internal { - uint256 requestsCount = pubkeys.length; - if (requestsCount == 0) revert ZeroArgument("pubkeys"); - if (requestsCount != amounts.length) revert ArraysLengthMismatch(requestsCount, amounts.length); - - uint256 fee = _getWithdrawalRequestFee(); - _checkFee(requestsCount * fee); - - for (uint256 i = 0; i < requestsCount; ++i) { - _callAddWithdrawalRequest(pubkeys[i], amounts[i], fee); - } - } - - function _getWithdrawalRequestFee() internal view returns (uint256) { - (bool success, bytes memory feeData) = WITHDRAWAL_REQUEST.staticcall(""); - - if (!success) { - revert FeeReadFailed(); - } - - if (feeData.length != 32) { - revert FeeInvalidData(); - } - - return abi.decode(feeData, (uint256)); - } - - function _callAddWithdrawalRequest(bytes calldata pubkey, uint64 amount, uint256 fee) internal { - assert(pubkey.length == 48); - - bytes memory request = abi.encodePacked(pubkey, amount); - (bool success,) = WITHDRAWAL_REQUEST.call{value: fee}(request); - if (!success) { - revert RequestAdditionFailed(request); - } - - emit WithdrawalRequestAdded(request); - } - - function _checkFee(uint256 fee) internal view { - if (msg.value != fee) { - revert IncorrectFee(fee, msg.value); - } - } -} diff --git a/contracts/0.8.9/WithdrawalVaultEIP7685.sol b/contracts/0.8.9/WithdrawalVaultEIP7685.sol new file mode 100644 index 0000000000..5155b4302a --- /dev/null +++ b/contracts/0.8.9/WithdrawalVaultEIP7685.sol @@ -0,0 +1,132 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* See contracts/COMPILERS.md */ +pragma solidity 0.8.9; + + +/** + * @title Withdrawal Vault EIP-7685 Support + * @notice Abstract contract providing base functionality for + * general-purpose Execution Layer requests. + * @dev Implements support for the following request types: + * - EIP-7002: Withdrawal requests + * - EIP-7251: Consolidation requests + */ +abstract contract WithdrawalVaultEIP7685 { + address public immutable WITHDRAWAL_REQUEST; + address public immutable CONSOLIDATION_REQUEST; + + uint256 internal constant PUBLIC_KEY_LENGTH = 48; + + event WithdrawalRequestAdded(bytes request); + event ConsolidationRequestAdded(bytes request); + + error ZeroAddress(); + error ZeroArgument(string name); + error ArraysLengthMismatch(uint256 firstArrayLength, uint256 secondArrayLength); + error FeeReadFailed(); + error FeeInvalidData(); + error IncorrectFee(uint256 requiredFee, uint256 providedFee); + error RequestAdditionFailed(bytes callData); + error InvalidPublicKeyLength(bytes pubkey); + + constructor(address _withdrawalRequest, address _consolidationRequest) { + _onlyNonZeroAddress(_withdrawalRequest); + _onlyNonZeroAddress(_consolidationRequest); + + WITHDRAWAL_REQUEST = _withdrawalRequest; + CONSOLIDATION_REQUEST = _consolidationRequest; + } + + function _addWithdrawalRequests(bytes[] calldata pubkeys, uint64[] calldata amounts) internal { + uint256 requestsCount = pubkeys.length; + if (requestsCount == 0) revert ZeroArgument("pubkeys"); + if (requestsCount != amounts.length) revert ArraysLengthMismatch(requestsCount, amounts.length); + + uint256 fee = _getWithdrawalRequestFee(); + _requireExactFee(requestsCount * fee); + + for (uint256 i = 0; i < requestsCount; ++i) { + _validatePublicKey(pubkeys[i]); + _callAddWithdrawalRequest(pubkeys[i], amounts[i], fee); + } + } + + function _addConsolidationRequests( + bytes[] calldata sourcePubkeys, + bytes[] calldata targetPubkeys + ) internal { + uint256 requestsCount = sourcePubkeys.length; + if (requestsCount == 0) revert ZeroArgument("sourcePubkeys"); + if (requestsCount != targetPubkeys.length) + revert ArraysLengthMismatch(requestsCount, targetPubkeys.length); + + uint256 fee = _getConsolidationRequestFee(); + _requireExactFee(requestsCount * fee); + + for (uint256 i = 0; i < requestsCount; ++i) { + _validatePublicKey(sourcePubkeys[i]); + _validatePublicKey(targetPubkeys[i]); + _callAddConsolidationRequest(sourcePubkeys[i], targetPubkeys[i], fee); + } + } + + function _getWithdrawalRequestFee() internal view returns (uint256) { + return _getFeeFromContract(WITHDRAWAL_REQUEST); + } + + function _getConsolidationRequestFee() internal view returns (uint256) { + return _getFeeFromContract(CONSOLIDATION_REQUEST); + } + + function _getFeeFromContract(address contractAddress) internal view returns (uint256) { + (bool success, bytes memory feeData) = contractAddress.staticcall(""); + + if (!success) { + revert FeeReadFailed(); + } + + if (feeData.length != 32) { + revert FeeInvalidData(); + } + + return abi.decode(feeData, (uint256)); + } + + function _validatePublicKey(bytes calldata pubkey) internal pure { + if (pubkey.length != PUBLIC_KEY_LENGTH) { + revert InvalidPublicKeyLength(pubkey); + } + } + + function _callAddWithdrawalRequest(bytes calldata pubkey, uint64 amount, uint256 fee) internal { + bytes memory request = abi.encodePacked(pubkey, amount); + (bool success,) = WITHDRAWAL_REQUEST.call{value: fee}(request); + if (!success) { + revert RequestAdditionFailed(request); + } + + emit WithdrawalRequestAdded(request); + } + + function _callAddConsolidationRequest(bytes calldata sourcePubkey, bytes calldata targetPubkey, uint256 fee) internal { + bytes memory request = abi.encodePacked(sourcePubkey, targetPubkey); + (bool success,) = CONSOLIDATION_REQUEST.call{value: fee}(request); + if (!success) { + revert RequestAdditionFailed(request); + } + + emit ConsolidationRequestAdded(request); + } + + function _requireExactFee(uint256 requiredFee) internal view { + if (requiredFee != msg.value) { + revert IncorrectFee(requiredFee, msg.value); + } + } + + function _onlyNonZeroAddress(address _address) internal pure { + if (_address == address(0)) revert ZeroAddress(); + } +} diff --git a/contracts/0.8.9/oracle/AccountingOracle.sol b/contracts/0.8.9/oracle/AccountingOracle.sol index 779361f193..1735d42ed0 100644 --- a/contracts/0.8.9/oracle/AccountingOracle.sol +++ b/contracts/0.8.9/oracle/AccountingOracle.sol @@ -6,21 +6,32 @@ pragma solidity 0.8.9; import {SafeCast} from "@openzeppelin/contracts-v4.4/utils/math/SafeCast.sol"; +import {ILido} from "contracts/common/interfaces/ILido.sol"; import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; import {ReportValues} from "contracts/common/interfaces/ReportValues.sol"; import {ILazyOracle} from "contracts/common/interfaces/ILazyOracle.sol"; - import {UnstructuredStorage} from "../lib/UnstructuredStorage.sol"; - import {BaseOracle} from "./BaseOracle.sol"; - interface IReportReceiver { function handleOracleReport(ReportValues memory values) external; } interface IOracleReportSanityChecker { - function checkExitedValidatorsRatePerDay(uint256 _exitedValidatorsCount) external view; + function checkExitedEthAmountPerDay( + uint256 _newlyExitedValidatorsCount, + uint256 _timeElapsed + ) external view; + function checkModuleAndCLBalancesChangeRates( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _preCLValidatorsBalanceWei, + uint256 _preCLPendingBalanceWei, + uint256 _postCLValidatorsBalanceWei, + uint256 _postCLPendingBalanceWei, + uint256 _depositsWei, + uint256 _timeElapsed + ) external view; function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) external view; function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) external view; @@ -32,6 +43,16 @@ interface IStakingRouter { uint256[] calldata _exitedValidatorsCounts ) external returns (uint256); + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view; + + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external; + function reportStakingModuleExitedValidatorsCountByNodeOperator( uint256 _stakingModuleId, bytes calldata _nodeOperatorIds, @@ -51,10 +72,9 @@ contract AccountingOracle is BaseOracle { error LidoLocatorCannotBeZero(); error AdminCannotBeZero(); - error LidoCannotBeZero(); - error IncorrectOracleMigration(uint256 code); error SenderNotAllowed(); error InvalidExitedValidatorsData(); + error InvalidClBalancesData(); error UnsupportedExtraDataFormat(uint256 format); error UnsupportedExtraDataType(uint256 itemIndex, uint256 dataType); error DeprecatedExtraDataType(uint256 itemIndex, uint256 dataType); @@ -68,7 +88,6 @@ contract AccountingOracle is BaseOracle { error UnexpectedExtraDataIndex(uint256 expectedIndex, uint256 receivedIndex); error InvalidExtraDataItem(uint256 itemIndex); error InvalidExtraDataSortOrder(uint256 itemIndex); - event ExtraDataSubmitted(uint256 indexed refSlot, uint256 itemsProcessed, uint256 itemsCount); event WarnExtraDataIncompleteProcessing(uint256 indexed refSlot, uint256 processedItemsCount, uint256 itemsCount); @@ -119,10 +138,11 @@ contract AccountingOracle is BaseOracle { _updateContractVersion(2); _updateContractVersion(3); _updateContractVersion(4); + _updateContractVersion(5); } - function finalizeUpgrade_v4(uint256 consensusVersion) external { - _updateContractVersion(4); + function finalizeUpgrade_v5(uint256 consensusVersion) external { + _updateContractVersion(5); _setConsensusVersion(consensusVersion); } @@ -147,12 +167,12 @@ contract AccountingOracle is BaseOracle { /// CL values /// - /// @dev The number of validators on consensus layer that were ever deposited - /// via Lido as observed at the reference slot. - uint256 numValidators; - /// @dev Cumulative balance of all Lido validators on the consensus layer + /// @dev Sum of consensus-layer validator balances (`validator.balance`), + /// excluding pending deposits, as observed at the reference slot. + uint256 clValidatorsBalanceGwei; + /// @dev Pending deposits balance on the consensus layer /// as observed at the reference slot. - uint256 clBalanceGwei; + uint256 clPendingBalanceGwei; /// @dev Ids of staking modules that have more exited validators than the number /// stored in the respective staking module contract as observed at the reference /// slot. @@ -161,7 +181,13 @@ contract AccountingOracle is BaseOracle { /// the stakingModuleIdsWithNewlyExitedValidators array as observed at the /// reference slot. uint256[] numExitedValidatorsByStakingModule; - /// + /// @dev Ids of staking modules that have effective balances changed compared to the number + /// stored in the respective staking module contract as observed at the reference slot. + uint256[] stakingModuleIdsWithUpdatedBalance; + /// @dev Sum of consensus-layer validator balances (`validator.balance`) + /// for each staking module in `stakingModuleIdsWithUpdatedBalance`, + /// excluding pending deposits, as observed at the reference slot. + uint256[] validatorBalancesGweiByStakingModule; /// EL values /// @@ -411,6 +437,11 @@ contract AccountingOracle is BaseOracle { result.extraDataItemsSubmitted = extraState.itemsProcessed; } + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp) { + refSlot = _getCurrentRefSlot(); + refSlotTimestamp = _getSlotTimestamp(refSlot); + } + /// /// Implementation & helpers /// @@ -465,29 +496,43 @@ contract AccountingOracle is BaseOracle { } uint256 slotsElapsed = data.refSlot - prevRefSlot; + uint256 timeElapsed = slotsElapsed * SECONDS_PER_SLOT; IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); IWithdrawalQueue withdrawalQueue = IWithdrawalQueue(LOCATOR.withdrawalQueue()); + IOracleReportSanityChecker sanityChecker = IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()); + _checkStakingRouterModuleBalances(sanityChecker, data, timeElapsed); _processStakingRouterExitedValidatorsByModule( stakingRouter, + sanityChecker, data.stakingModuleIdsWithNewlyExitedValidators, data.numExitedValidatorsByStakingModule, - slotsElapsed + timeElapsed + ); + + /// @notice update CL balances in StakingRouter + /// @dev we need to update balances before rewards and fee calculation + /// Note, deposit trackers not changed at this moment, they are bumped + /// in StakingRouter.onAccountingReport during `handleAccountingReport` + _processStakingRouterValidatorBalancesByModule( + stakingRouter, + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule ); withdrawalQueue.onOracleReport( data.isBunkerMode, - GENESIS_TIME + prevRefSlot * SECONDS_PER_SLOT, - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT + _getSlotTimestamp(prevRefSlot), + _getSlotTimestamp(data.refSlot) ); IReportReceiver(LOCATOR.accounting()).handleOracleReport( ReportValues( - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT, - slotsElapsed * SECONDS_PER_SLOT, - data.numValidators, - data.clBalanceGwei * 1e9, + _getSlotTimestamp(data.refSlot), + timeElapsed, + data.clValidatorsBalanceGwei * 1e9, + data.clPendingBalanceGwei * 1e9, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, @@ -497,7 +542,7 @@ contract AccountingOracle is BaseOracle { ); ILazyOracle(LOCATOR.lazyOracle()).updateReportData( - GENESIS_TIME + data.refSlot * SECONDS_PER_SLOT, + _getSlotTimestamp(data.refSlot), data.refSlot, data.vaultsDataTreeRoot, data.vaultsDataTreeCid @@ -514,11 +559,16 @@ contract AccountingOracle is BaseOracle { }); } + function _getSlotTimestamp(uint256 slot) internal view returns (uint256) { + return GENESIS_TIME + (slot * SECONDS_PER_SLOT); + } + function _processStakingRouterExitedValidatorsByModule( IStakingRouter stakingRouter, + IOracleReportSanityChecker sanityChecker, uint256[] calldata stakingModuleIds, uint256[] calldata numExitedValidatorsByStakingModule, - uint256 slotsElapsed + uint256 timeElapsed ) internal { if (stakingModuleIds.length != numExitedValidatorsByStakingModule.length) { revert InvalidExitedValidatorsData(); @@ -528,7 +578,7 @@ contract AccountingOracle is BaseOracle { return; } - for (uint256 i = 1; i < stakingModuleIds.length; ) { + for (uint256 i = 1; i < stakingModuleIds.length;) { if (stakingModuleIds[i] <= stakingModuleIds[i - 1]) { revert InvalidExitedValidatorsData(); } @@ -537,7 +587,7 @@ contract AccountingOracle is BaseOracle { } } - for (uint256 i = 0; i < stakingModuleIds.length; ) { + for (uint256 i = 0; i < stakingModuleIds.length;) { if (numExitedValidatorsByStakingModule[i] == 0) { revert InvalidExitedValidatorsData(); } @@ -551,14 +601,24 @@ contract AccountingOracle is BaseOracle { numExitedValidatorsByStakingModule ); - uint256 exitedValidatorsRatePerDay = (newlyExitedValidatorsCount * (1 days)) / - (SECONDS_PER_SLOT * slotsElapsed); - - IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitedValidatorsRatePerDay( - exitedValidatorsRatePerDay + sanityChecker.checkExitedEthAmountPerDay( + newlyExitedValidatorsCount, + timeElapsed ); } + function _processStakingRouterValidatorBalancesByModule( + IStakingRouter stakingRouter, + uint256[] calldata stakingModuleIds, + uint256[] calldata validatorBalancesGwei + ) internal { + if (stakingModuleIds.length == 0) { + return; + } + + stakingRouter.reportValidatorBalancesByStakingModule(stakingModuleIds, validatorBalancesGwei); + } + function _submitReportExtraDataEmpty() internal { ExtraDataProcessingState memory procState = _storageExtraDataProcessingState().value; _checkCanSubmitExtraData(procState, EXTRA_DATA_FORMAT_EMPTY); @@ -643,12 +703,56 @@ contract AccountingOracle is BaseOracle { procState.dataHash = dataHash; procState.itemsProcessed = uint64(itemsProcessed); procState.lastSortingKey = iter.lastSortingKey; - _storageExtraDataProcessingState().value = procState; + _storageExtraDataProcessingState().value = procState; } emit ExtraDataSubmitted(procState.refSlot, procState.itemsProcessed, procState.itemsCount); } + function _checkStakingRouterModuleBalances( + IOracleReportSanityChecker sanityChecker, + ReportData calldata data, + uint256 timeElapsed + ) internal view { + // This check must run before `reportValidatorBalancesByStakingModule(...)` mutates the router state, + // because it compares the report against the previous per-module validators balances in StakingRouter + // and the pre-report protocol pending/deposits snapshot in Lido. + IStakingRouter stakingRouter = IStakingRouter(LOCATOR.stakingRouter()); + stakingRouter.validateReportValidatorBalancesByStakingModule( + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule + ); + + uint256[] memory validatorBalancesWeiByStakingModule = + _normalizeStakingRouterValidatorBalancesToWei(data.validatorBalancesGweiByStakingModule); + + (uint256 preCLValidatorsBalanceWei, uint256 preCLPendingBalanceWei,, uint256 depositsWei) = + ILido(LOCATOR.lido()).getBalanceStats(); + sanityChecker.checkModuleAndCLBalancesChangeRates( + data.stakingModuleIdsWithUpdatedBalance, + validatorBalancesWeiByStakingModule, + preCLValidatorsBalanceWei, + preCLPendingBalanceWei, + data.clValidatorsBalanceGwei * 1 gwei, + data.clPendingBalanceGwei * 1 gwei, + depositsWei, + timeElapsed + ); + } + + function _normalizeStakingRouterValidatorBalancesToWei( + uint256[] calldata validatorBalancesGwei + ) internal pure returns (uint256[] memory validatorBalancesWeiByStakingModule) { + uint256 modulesCount = validatorBalancesGwei.length; + validatorBalancesWeiByStakingModule = new uint256[](modulesCount); + for (uint256 i = 0; i < modulesCount; ) { + validatorBalancesWeiByStakingModule[i] = validatorBalancesGwei[i] * 1 gwei; + unchecked { + ++i; + } + } + } + function _processExtraDataItems(bytes calldata data, ExtraDataIterState memory iter) internal { uint256 dataOffset = iter.dataOffset; uint256 maxNodeOperatorsPerItem = 0; @@ -687,12 +791,14 @@ contract AccountingOracle is BaseOracle { revert DeprecatedExtraDataType(index, itemType); } - if (itemType != EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + uint256 nodeOpsProcessed; + + if (itemType == EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + nodeOpsProcessed = _processExtraDataItem(data, iter); + } else { revert UnsupportedExtraDataType(index, itemType); } - uint256 nodeOpsProcessed = _processExtraDataItem(data, iter); - if (nodeOpsProcessed > maxNodeOperatorsPerItem) { maxNodeOperatorsPerItem = nodeOpsProcessed; maxNodeOperatorItemIndex = index; @@ -723,7 +829,7 @@ contract AccountingOracle is BaseOracle { uint256 nodeOpsCount; uint256 nodeOpId; bytes calldata nodeOpIds; - bytes calldata valuesCounts; + bytes calldata payload; if (dataOffset + 35 > data.length) { // has to fit at least moduleId (3 bytes), nodeOpsCount (8 bytes), @@ -735,7 +841,7 @@ contract AccountingOracle is BaseOracle { assembly { // layout at the dataOffset: // | 3 bytes | 8 bytes | nodeOpsCount * 8 bytes | nodeOpsCount * 16 bytes | - // | moduleId | nodeOpsCount | nodeOperatorIds | validatorsCounts | + // | moduleId | nodeOpsCount | nodeOperatorIds | payload | let header := calldataload(add(data.offset, dataOffset)) moduleId := shr(232, header) nodeOpsCount := and(shr(168, header), 0xffffffffffffffff) @@ -743,9 +849,9 @@ contract AccountingOracle is BaseOracle { nodeOpIds.length := mul(nodeOpsCount, 8) // read the 1st node operator id for checking the sorting order later nodeOpId := shr(192, calldataload(nodeOpIds.offset)) - valuesCounts.offset := add(nodeOpIds.offset, nodeOpIds.length) - valuesCounts.length := mul(nodeOpsCount, 16) - dataOffset := sub(add(valuesCounts.offset, valuesCounts.length), data.offset) + payload.offset := add(nodeOpIds.offset, nodeOpIds.length) + payload.length := mul(nodeOpsCount, 16) + dataOffset := sub(add(payload.offset, payload.length), data.offset) } if (moduleId == 0) { @@ -784,8 +890,11 @@ contract AccountingOracle is BaseOracle { revert InvalidExtraDataItem(iter.index); } - IStakingRouter(iter.stakingRouter) - .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, nodeOpIds, valuesCounts); + // Route to appropriate StakingRouter function based on item type + if (iter.itemType == EXTRA_DATA_TYPE_EXITED_VALIDATORS) { + IStakingRouter(iter.stakingRouter) + .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, nodeOpIds, payload); + } iter.dataOffset = dataOffset; return nodeOpsCount; diff --git a/contracts/0.8.9/oracle/ValidatorsExitBus.sol b/contracts/0.8.9/oracle/ValidatorsExitBus.sol index 07eddf9d20..0074c6310c 100644 --- a/contracts/0.8.9/oracle/ValidatorsExitBus.sol +++ b/contracts/0.8.9/oracle/ValidatorsExitBus.sol @@ -2,11 +2,14 @@ // SPDX-License-Identifier: GPL-3.0 pragma solidity 0.8.9; +import {SafeCast} from "@openzeppelin/contracts-v4.4/utils/math/SafeCast.sol"; + import {AccessControlEnumerable} from "../utils/access/AccessControlEnumerable.sol"; import {UnstructuredStorage} from "../lib/UnstructuredStorage.sol"; import {Versioned} from "../utils/Versioned.sol"; -import {ExitRequestLimitData, ExitLimitUtilsStorage, ExitLimitUtils} from "../lib/ExitLimitUtils.sol"; +import {LimitData, RateLimitStorage, RateLimit} from "../../common/lib/RateLimit.sol"; import {PausableUntil} from "../utils/PausableUntil.sol"; +import {WithdrawalCredentials} from "../../common/lib/WithdrawalCredentials.sol"; interface ITriggerableWithdrawalsGateway { struct ValidatorData { @@ -22,10 +25,47 @@ interface ITriggerableWithdrawalsGateway { ) external payable; } +/// @notice New interface for staking modules (CSM, CuratedV2) +/// @dev Returns only pubkeys +interface IUnifiedStakingModule { + /// @dev It also works for legacy staking modules (NOR, SDVT) where `getSigningKeys` returns different + /// tuple `(bytes memory pubkeys, bytes memory signatures, bool[] memory used)`. + /// The trick: `abi.decode(returndata, (bytes))` will decode only the first tuple element. + /// This is safe as long as the first returned value really is `bytes pubkeys` in that position. + function getSigningKeys( + uint256 nodeOperatorId, + uint256 startIndex, + uint256 keysCount + ) external view returns (bytes memory); +} + +interface IStakingRouter { + struct ModuleStateConfig { + address moduleAddress; + uint16 moduleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint16 priorityExitShareThreshold; + uint8 status; + uint8 withdrawalCredentialsType; + } + + function getStakingModuleStateConfig(uint256 _stakingModuleId) + external + view + returns (ModuleStateConfig memory stateConfig); +} + interface ILidoLocator { function validatorExitDelayVerifier() external view returns (address); function triggerableWithdrawalsGateway() external view returns (address); - function oracleReportSanityChecker() external view returns(address); + function oracleReportSanityChecker() external view returns (address); + function stakingRouter() external view returns (address); +} + +interface IOracleReportSanityCheckerForExitBus { + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256); + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256); } /** @@ -35,8 +75,7 @@ interface ILidoLocator { */ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, Versioned { using UnstructuredStorage for bytes32; - using ExitLimitUtilsStorage for bytes32; - using ExitLimitUtils for ExitRequestLimitData; + using SafeCast for uint256; /** * @notice Thrown when an invalid zero value is passed @@ -65,6 +104,17 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V */ error InvalidRequestsDataSortOrder(); + /** + * @notice Thrown when provided public key does not match the registered signing key + * @param index Index of the validator in the exit request list + */ + error InvalidPublicKey(uint256 index); + + /** + * @notice Thrown when retrieved pubkey length is invalid + */ + error InvalidRetrievedKeyLength(); + /** * Thrown when there are attempt to send exit events for request that was not submitted earlier by trusted entities */ @@ -93,11 +143,11 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V error InvalidExitDataIndexSortOrder(); /** - * @notice Thrown when remaining exit requests limit is not enough to cover sender requests - * @param requestsCount Amount of requests that were sent for processing - * @param remainingLimit Amount of requests that still can be processed at current day + * @notice Thrown when remaining exit balance limit is not enough to cover the exit requests + * @param balanceEth Total balance being requested for exit in ETH + * @param remainingLimitEth Remaining balance limit in ETH that can still be processed */ - error ExitRequestsLimitExceeded(uint256 requestsCount, uint256 remainingLimit); + error ExitRequestsLimitExceeded(uint256 balanceEth, uint256 remainingLimitEth); /** * @notice Thrown when submitting was not started for request @@ -110,6 +160,9 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V */ error TooManyExitRequestsInReport(uint256 requestsCount, uint256 maxRequestsPerReport); + error InvalidMaxEBWeight(); + error UnexpectedWCType(); + /** * @notice Emitted when an entity with the SUBMIT_REPORT_HASH_ROLE role submits a hash of the exit requests data. * @param exitRequestsHash keccak256 hash of the encoded validators list @@ -134,11 +187,11 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Emitted when limits configs are set. - * @param maxExitRequestsLimit The maximum number of exit requests. - * @param exitsPerFrame The number of exits that can be restored per frame. - * @param frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. + * @param maxExitBalanceEth The maximum exit balance limit in ETH. + * @param balancePerFrameEth The exit balance in ETH that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored. */ - event ExitRequestsLimitSet(uint256 maxExitRequestsLimit, uint256 exitsPerFrame, uint256 frameDurationInSec); + event ExitBalanceLimitSet(uint256 maxExitBalanceEth, uint256 balancePerFrameEth, uint256 frameDurationInSec); /** * @notice Emitted when exit requests were delivered @@ -148,7 +201,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Emitted when max validators per report value is set. - * @param maxValidatorsPerReport The number of valdiators allowed per report. + * @param maxValidatorsPerReport The number of validators allowed per report. */ event SetMaxValidatorsPerReport(uint256 maxValidatorsPerReport); @@ -161,6 +214,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V uint256 nodeOpId; uint256 moduleId; uint256 valIndex; + uint256 keyIndex; // - will be max uint256 for format 1, actual value for format 2 bytes pubkey; } @@ -181,6 +235,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// Length in bytes of packed request uint256 internal constant PACKED_REQUEST_LENGTH = 64; + uint256 internal constant PACKED_REQUEST_LENGTH_V2 = 72; uint256 internal constant PUBLIC_KEY_LENGTH = 48; @@ -202,13 +257,25 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// uint256 public constant DATA_FORMAT_LIST = 1; + /// @notice The extended list format that includes a key index for each validator. + /// + /// Each validator exit request is 72 bytes: + /// + /// MSB <-------------------------------------------------------------------- LSB + /// | 3 bytes | 5 bytes | 8 bytes | 8 bytes | 48 bytes | + /// | moduleId | nodeOpId | validatorIndex | keyIndex | validatorPubkey | + /// + /// Encoding and sorting rules are identical to `DATA_FORMAT_LIST`. + /// `keyIndex` is used to validate the pubkey against on-chain registered keys. + uint256 public constant DATA_FORMAT_LIST_WITH_KEY_INDEX = 2; + ILidoLocator internal immutable LOCATOR; /// @dev Storage slot: uint256 totalRequestsProcessed bytes32 internal constant TOTAL_REQUESTS_PROCESSED_POSITION = keccak256("lido.ValidatorsExitBusOracle.totalRequestsProcessed"); - // Storage slot for exit request limit configuration and current quota tracking - bytes32 internal constant EXIT_REQUEST_LIMIT_POSITION = keccak256("lido.ValidatorsExitBus.maxExitRequestLimit"); + // Storage slot for exit balance limit configuration and current quota tracking (in ETH, not Gwei) + bytes32 internal constant EXIT_BALANCE_LIMIT_POSITION = keccak256("lido.ValidatorsExitBus.exitBalanceLimitEth"); // Storage slot for the maximum number of validator exit requests allowed per processing report bytes32 internal constant MAX_VALIDATORS_PER_REPORT_POSITION = keccak256("lido.ValidatorsExitBus.maxValidatorsPerReport"); @@ -218,7 +285,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V uint256 public constant EXIT_TYPE = 2; - /// @dev Ensures the contract’s ETH balance is unchanged. + /// @dev Ensures the contract's ETH balance is unchanged. modifier preservesEthBalance() { uint256 balanceBeforeCall = address(this).balance - msg.value; _; @@ -229,6 +296,16 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V LOCATOR = ILidoLocator(lidoLocator); } + function MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01() public view returns (uint16) { + (uint16 maxEBWeightType1, ) = _getMaxEffectiveBalanceWeights(); + return maxEBWeightType1; + } + + function MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02() public view returns (uint16) { + (, uint16 maxEBWeightType2) = _getMaxEffectiveBalanceWeights(); + return maxEBWeightType2; + } + /** * @notice Submit a hash of the exit requests data. * @@ -272,16 +349,17 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V _checkExitRequestData(request.data, request.dataFormat); _checkContractVersion(requestStatus.contractVersion); - uint256 requestsCount = request.data.length / PACKED_REQUEST_LENGTH; + uint256 requestsCount = request.data.length / _getPackedRequestLength(request.dataFormat); uint256 maxRequestsPerReport = _getMaxValidatorsPerReport(); if (requestsCount > maxRequestsPerReport) { revert TooManyExitRequestsInReport(requestsCount, maxRequestsPerReport); } - _consumeLimit(requestsCount); + uint256 totalBalanceEth = _calculateTotalExitBalanceEth(request.data, request.dataFormat); + _consumeLimit(totalBalanceEth); - _processExitRequestsList(request.data); + _processExitRequestsList(request.data, request.dataFormat); TOTAL_REQUESTS_PROCESSED_POSITION.setStorageUint256( TOTAL_REQUESTS_PROCESSED_POSITION.getStorageUint256() + requestsCount @@ -332,7 +410,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V memory triggerableExitData = new ITriggerableWithdrawalsGateway.ValidatorData[](exitDataIndexes.length); uint256 lastExitDataIndex = type(uint256).max; - uint256 requestsCount = exitsData.data.length / PACKED_REQUEST_LENGTH; + uint256 requestsCount = exitsData.data.length / _getPackedRequestLength(exitsData.dataFormat); for (uint256 i = 0; i < exitDataIndexes.length; i++) { if (exitDataIndexes[i] >= requestsCount) { @@ -345,7 +423,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V lastExitDataIndex = exitDataIndexes[i]; - ValidatorData memory validatorData = _getValidatorData(exitsData.data, exitDataIndexes[i]); + ValidatorData memory validatorData = _getValidatorData(exitsData.data, exitsData.dataFormat, exitDataIndexes[i]); if (validatorData.moduleId == 0) revert InvalidModuleId(); @@ -363,45 +441,45 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /** * @notice Sets the limits config - * @param maxExitRequestsLimit The maximum number of exit requests. - * @param exitsPerFrame The number of exits that can be restored per frame. - * @param frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. + * @param maxExitBalanceEth The maximum exit balance limit in ETH. + * @param balancePerFrameEth The exit balance in ETH that can be restored per frame. + * @param frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored. */ function setExitRequestLimit( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) external onlyRole(EXIT_REQUEST_LIMIT_MANAGER_ROLE) { - _setExitRequestLimit(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /** * @notice Returns information about current limits data - * @return maxExitRequestsLimit Maximum exit requests limit - * @return exitsPerFrame The number of exits that can be restored per frame. - * @return frameDurationInSec The duration of each frame, in seconds, after which `exitsPerFrame` exits can be restored. - * @return prevExitRequestsLimit Limit left after previous requests - * @return currentExitRequestsLimit Current exit requests limit + * @return maxExitBalanceEth Maximum exit balance limit in ETH + * @return balancePerFrameEth The exit balance in ETH that can be restored per frame + * @return frameDurationInSec The duration of each frame, in seconds, after which `balancePerFrameEth` can be restored + * @return prevExitBalanceEth Balance limit in ETH left after previous requests + * @return currentExitBalanceEth Current exit balance limit in ETH */ function getExitRequestLimitFullInfo() external view returns ( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec, - uint256 prevExitRequestsLimit, - uint256 currentExitRequestsLimit + uint256 prevExitBalanceEth, + uint256 currentExitBalanceEth ) { - ExitRequestLimitData memory exitRequestLimitData = EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit(); - maxExitRequestsLimit = exitRequestLimitData.maxExitRequestsLimit; - exitsPerFrame = exitRequestLimitData.exitsPerFrame; - frameDurationInSec = exitRequestLimitData.frameDurationInSec; - prevExitRequestsLimit = exitRequestLimitData.prevExitRequestsLimit; - - currentExitRequestsLimit = exitRequestLimitData.isExitLimitSet() - ? exitRequestLimitData.calculateCurrentExitLimit(_getTimestamp()) + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + maxExitBalanceEth = limitData.maxLimit; + balancePerFrameEth = limitData.itemsPerFrame; + frameDurationInSec = limitData.frameDurationInSec; + prevExitBalanceEth = limitData.prevLimit; + + currentExitBalanceEth = RateLimit.isLimitSet(limitData) + ? RateLimit.calculateCurrentLimit(limitData, _getTimestamp()) : type(uint256).max; } @@ -457,11 +535,12 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V ) external pure returns (bytes memory pubkey, uint256 nodeOpId, uint256 moduleId, uint256 valIndex) { _checkExitRequestData(exitRequests, dataFormat); - if (index >= exitRequests.length / PACKED_REQUEST_LENGTH) { - revert ExitDataIndexOutOfRange(index, exitRequests.length / PACKED_REQUEST_LENGTH); + uint256 requestsCount = exitRequests.length / _getPackedRequestLength(dataFormat); + if (index >= requestsCount) { + revert ExitDataIndexOutOfRange(index, requestsCount); } - ValidatorData memory validatorData = _getValidatorData(exitRequests, index); + ValidatorData memory validatorData = _getValidatorData(exitRequests, dataFormat, index); valIndex = validatorData.valIndex; nodeOpId = validatorData.nodeOpId; @@ -507,14 +586,23 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V return TOTAL_REQUESTS_PROCESSED_POSITION.getStorageUint256(); } + /// @dev Returns the packed request length for a given data format + function _getPackedRequestLength(uint256 dataFormat) internal pure returns (uint256) { + if (dataFormat == DATA_FORMAT_LIST) { + return PACKED_REQUEST_LENGTH; // 64 + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + return PACKED_REQUEST_LENGTH_V2; // 72 + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + /// Internal functions function _checkExitRequestData(bytes calldata requests, uint256 dataFormat) internal pure { - if (dataFormat != DATA_FORMAT_LIST) { - revert UnsupportedRequestsDataFormat(dataFormat); - } + uint256 packedLength = _getPackedRequestLength(dataFormat); // validates format - if (requests.length == 0 || requests.length % PACKED_REQUEST_LENGTH != 0) { + if (requests.length == 0 || requests.length % packedLength != 0) { revert InvalidRequestsDataLength(); } } @@ -554,38 +642,40 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } function _setExitRequestLimit( - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) internal { uint256 timestamp = _getTimestamp(); - EXIT_REQUEST_LIMIT_POSITION.setStorageExitRequestLimit( - EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit().setExitLimits( - maxExitRequestsLimit, - exitsPerFrame, - frameDurationInSec, - timestamp - ) + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + limitData = RateLimit.setLimits( + limitData, + maxExitBalanceEth, + balancePerFrameEth, + frameDurationInSec, + timestamp ); + RateLimitStorage.setStorageLimit(EXIT_BALANCE_LIMIT_POSITION, limitData); - emit ExitRequestsLimitSet(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + emit ExitBalanceLimitSet(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } - function _consumeLimit(uint256 requestsCount) internal { - ExitRequestLimitData memory exitRequestLimitData = EXIT_REQUEST_LIMIT_POSITION.getStorageExitRequestLimit(); - if (!exitRequestLimitData.isExitLimitSet()) { + function _consumeLimit(uint256 balanceEth) internal { + LimitData memory limitData = RateLimitStorage.getStorageLimit(EXIT_BALANCE_LIMIT_POSITION); + if (!RateLimit.isLimitSet(limitData)) { return; } - uint256 limit = exitRequestLimitData.calculateCurrentExitLimit(_getTimestamp()); + uint256 limitEth = RateLimit.calculateCurrentLimit(limitData, _getTimestamp()); - if (requestsCount > limit) { - revert ExitRequestsLimitExceeded(requestsCount, limit); + if (balanceEth > limitEth) { + revert ExitRequestsLimitExceeded(balanceEth, limitEth); } - EXIT_REQUEST_LIMIT_POSITION.setStorageExitRequestLimit( - exitRequestLimitData.updatePrevExitLimit(limit - requestsCount, _getTimestamp()) + RateLimitStorage.setStorageLimit( + EXIT_BALANCE_LIMIT_POSITION, + RateLimit.updatePrevLimit(limitData, limitEth - balanceEth, _getTimestamp()) ); } @@ -634,13 +724,35 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V /// Methods for reading data from tightly packed validator exit requests /// Format DATA_FORMAT_LIST = 1; - /** - * @notice Method for reading node operator id, module id and validator index from validator exit request data - * @param exitRequestData Validator exit requests data. DATA_FORMAT = 1 - * @param index index of request in array above - * @return validatorData Validator data including node operator id, module id, validator index - */ + /** + * @notice Method for reading node operator id, module id, validator index, and optionally key index + * from validator exit request data + * @param exitRequestData Validator exit requests data + * @param dataFormat Format of the data (1 or 2) + * @param index index of request in array above + * @return validatorData Validator data including node operator id, module id, validator index, and key index + */ function _getValidatorData( + bytes calldata exitRequestData, + uint256 dataFormat, + uint256 index + ) internal pure returns (ValidatorData memory validatorData) { + if (dataFormat == DATA_FORMAT_LIST) { + return _getValidatorDataV1(exitRequestData, index); + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + return _getValidatorDataV2(exitRequestData, index); + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + + /** + * @notice Extracts validator data from format 1 (64 bytes per request, no keyIndex) + * @param exitRequestData Validator exit requests data + * @param index index of request in array + * @return validatorData Validator data with keyIndex = type(uint256).max + */ + function _getValidatorDataV1( bytes calldata exitRequestData, uint256 index ) internal pure returns (ValidatorData memory validatorData) { @@ -663,6 +775,7 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V validatorData.valIndex = uint64(dataWithoutPubkey); validatorData.nodeOpId = uint40(dataWithoutPubkey >> 64); validatorData.moduleId = uint24(dataWithoutPubkey >> (64 + 40)); + validatorData.keyIndex = type(uint256).max; // Format 1 always uses keyIndex set to max uint256 to indicate unused bytes memory pubkey = new bytes(PUBLIC_KEY_LENGTH); assembly { @@ -676,10 +789,207 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } /** - * This method read report data (DATA_FORMAT=1) within a range - * Check dataWithoutPubkey <= lastDataWithoutPubkey needs to prevent duplicates + * @notice Extracts validator data from format 2 (72 bytes per request, includes keyIndex) + * @param exitRequestData Validator exit requests data + * @param index index of request in array + * @return validatorData Validator data with extracted keyIndex + */ + function _getValidatorDataV2( + bytes calldata exitRequestData, + uint256 index + ) internal pure returns (ValidatorData memory validatorData) { + uint256 itemOffset; + uint256 dataWithoutPubkey; + + assembly { + // Compute the start of this packed request (item) + itemOffset := add(exitRequestData.offset, mul(PACKED_REQUEST_LENGTH_V2, index)) + + // Load the first 24 bytes which contain moduleId (24 bits), + // nodeOpId (40 bits), valIndex (64 bits), and keyIndex (64 bits). + dataWithoutPubkey := shr(64, calldataload(itemOffset)) + } + + // dataWithoutPubkey format (192 bits total): + // MSB <--------------------------------- 192 bits ----------------------------------> LSB + // | 64 bits: zeros | 24 bits: moduleId | 40 bits: nodeOpId | 64 bits: valIndex | 64 bits: keyIndex | + + validatorData.keyIndex = uint64(dataWithoutPubkey); + validatorData.valIndex = uint64(dataWithoutPubkey >> 64); + validatorData.nodeOpId = uint40(dataWithoutPubkey >> (64 + 64)); + validatorData.moduleId = uint24(dataWithoutPubkey >> (64 + 64 + 40)); + + bytes memory pubkey = new bytes(PUBLIC_KEY_LENGTH); + assembly { + itemOffset := add(exitRequestData.offset, mul(PACKED_REQUEST_LENGTH_V2, index)) + let pubkeyCalldataOffset := add(itemOffset, 24) + let pubkeyMemPtr := add(pubkey, 32) + calldatacopy(pubkeyMemPtr, pubkeyCalldataOffset, PUBLIC_KEY_LENGTH) + } + + validatorData.pubkey = pubkey; + } + + /** + * @notice Calculates the total balance in ETH for all validators in the exit requests + * @dev This function determines the max effective balance based on the module's withdrawal credentials type: + * - Legacy modules (0x01 withdrawal credentials): 32 ETH per validator + * - Compounding modules (0x02 withdrawal credentials): 2048 ETH per validator (post-MaxEB/EIP-7251) + * + * The withdrawal credentials type is queried from the Staking Router for each module, + * eliminating the need for hardcoded module IDs. + * + * For gas efficiency, module types are cached during iteration to avoid repeated external calls + * for the same module. + * + * @param data Packed exit requests data + * @param dataFormat Format of the data (1 or 2) + * @return totalBalanceEth Total balance of all validators being exited in ETH + */ + function _calculateTotalExitBalanceEth(bytes calldata data, uint256 dataFormat) + internal + view + returns (uint256 totalBalanceEth) + { + (uint16 maxEBWeightType1, uint16 maxEBWeightType2) = _getMaxEffectiveBalanceWeights(); + uint256 packedLength; + uint256 dataShift; + uint256 moduleShift; + + if (dataFormat == DATA_FORMAT_LIST) { + packedLength = PACKED_REQUEST_LENGTH; + dataShift = 128; + moduleShift = 104; + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + packedLength = PACKED_REQUEST_LENGTH_V2; + dataShift = 64; + moduleShift = 168; + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + + uint256 baseOffset; + assembly { + baseOffset := data.offset + } + + uint256 requestsCount = data.length / packedLength; + uint256 cachedModuleId = 0; + uint256 cachedModuleMaxEBWeightEth = 0; + + for (uint256 i = 0; i < requestsCount; ++i) { + uint256 moduleId; + uint256 itemOffset; + + assembly { + itemOffset := add(baseOffset, mul(packedLength, i)) + let dataWithoutPubkey := shr(dataShift, calldataload(itemOffset)) + moduleId := shr(moduleShift, dataWithoutPubkey) // Extract top 24 bits + } + + if (moduleId != cachedModuleId) { + cachedModuleId = moduleId; + cachedModuleMaxEBWeightEth = _getModuleMaxEBWeight(moduleId, maxEBWeightType1, maxEBWeightType2); + } + totalBalanceEth += cachedModuleMaxEBWeightEth; + } + } + + function _getMaxEffectiveBalanceWeights() internal view returns (uint16 maxEBWeightType1, uint16 maxEBWeightType2) { + IOracleReportSanityCheckerForExitBus sanityChecker = + IOracleReportSanityCheckerForExitBus(LOCATOR.oracleReportSanityChecker()); + + uint256 maxEBWeightType1Raw = sanityChecker.getMaxEffectiveBalanceWeightWCType01(); + uint256 maxEBWeightType2Raw = sanityChecker.getMaxEffectiveBalanceWeightWCType02(); + if ( + maxEBWeightType1Raw == 0 || maxEBWeightType2Raw == 0 || maxEBWeightType1Raw > type(uint16).max + || maxEBWeightType2Raw > type(uint16).max + ) { + revert InvalidMaxEBWeight(); + } + + maxEBWeightType1 = maxEBWeightType1Raw.toUint16(); + maxEBWeightType2 = maxEBWeightType2Raw.toUint16(); + } + + function _getModuleMaxEBWeight( + uint256 moduleId, + uint16 maxEBWeightType1, + uint16 maxEBWeightType2 + ) internal view returns (uint16) { + uint256 wcType = + IStakingRouter(LOCATOR.stakingRouter()).getStakingModuleStateConfig(moduleId).withdrawalCredentialsType; + if (WithdrawalCredentials.isType1(wcType)) { + return maxEBWeightType1; + } else if (WithdrawalCredentials.isType2(wcType)) { + return maxEBWeightType2; + } + revert UnexpectedWCType(); + } + + /** + * @notice Verify that a pubkey belongs to the specified module and node operator + * @param moduleId Staking module ID + * @param nodeOpId Node operator ID + * @param keyIndex Index of the key in the module + * @param pubkey Public key to verify (48 bytes) + * @param requestIndex Index of the request in the batch (for error reporting) + * @param cachedModuleId Previously cached module ID (type(uint256).max if none) + * @param cachedModuleAddress Previously cached module address + * @return newModuleAddress Updated module address (same if module unchanged) */ - function _processExitRequestsList(bytes calldata data) internal { + function _verifyKey( + uint256 moduleId, + uint256 nodeOpId, + uint256 keyIndex, + bytes calldata pubkey, + uint256 requestIndex, + uint256 cachedModuleId, + address cachedModuleAddress + ) internal view returns (address newModuleAddress) { + if (moduleId == cachedModuleId) { + newModuleAddress = cachedModuleAddress; + } else { + newModuleAddress = IStakingRouter(LOCATOR.stakingRouter()).getStakingModuleStateConfig(moduleId).moduleAddress; + } + + bytes memory retrievedKeys = IUnifiedStakingModule(newModuleAddress) + .getSigningKeys( + nodeOpId, + keyIndex, // startIndex + 1 // keysCount: get only 1 key + ); + + if (retrievedKeys.length != 48) { + revert InvalidRetrievedKeyLength(); + } + + if (keccak256(retrievedKeys) != keccak256(pubkey)) { + revert InvalidPublicKey(requestIndex); + } + } + + /** + * @notice Dispatcher that processes exit requests based on data format + * @param data Packed exit requests data + * @param dataFormat Format of the data (1 or 2) + */ + function _processExitRequestsList(bytes calldata data, uint256 dataFormat) internal { + if (dataFormat == DATA_FORMAT_LIST) { + _processExitRequestsListV1(data); + } else if (dataFormat == DATA_FORMAT_LIST_WITH_KEY_INDEX) { + _processExitRequestsListV2(data); + } else { + revert UnsupportedRequestsDataFormat(dataFormat); + } + } + + /** + * @notice Process exit requests for format 1 (64 bytes per request, no keyIndex) + * @dev Check dataWithoutPubkey <= lastDataWithoutPubkey prevents duplicates and ensures sorting + * @param data Packed exit requests data (DATA_FORMAT=1) + */ + function _processExitRequestsListV1(bytes calldata data) internal { uint256 offset; uint256 offsetPastEnd; uint256 lastDataWithoutPubkey = 0; @@ -731,6 +1041,91 @@ abstract contract ValidatorsExitBus is AccessControlEnumerable, PausableUntil, V } } + /** + * @notice Process exit requests for format 2 (72 bytes per request, includes keyIndex) + * @dev Uniqueness and sort check uses (moduleId, nodeOpId, valIndex) only — keyIndex is excluded + * so that the same validator cannot appear twice with different keyIndex (double-counted). + * @param data Packed exit requests data (DATA_FORMAT=2) + */ + function _processExitRequestsListV2(bytes calldata data) internal { + uint256 offset; + uint256 offsetPastEnd; + uint256 lastValidatorData = 0; // (moduleId, nodeOpId, valIndex) — 128 bits, no keyIndex + uint256 timestamp = _getTimestamp(); + uint256 index = 0; + + assembly { + offset := data.offset + offsetPastEnd := add(offset, data.length) + } + + bytes calldata pubkey; + uint256 dataWithoutPubkey; + uint256 validatorData; // 128 bits: moduleId | nodeOpId | valIndex (for sort/duplicate check) + uint256 moduleId; + + // Cache module data to avoid repeated external calls for the same module + uint256 cachedModuleId = 0; + address cachedModuleAddress; + + assembly { + pubkey.length := 48 + } + + while (offset < offsetPastEnd) { + assembly { + // 24 most significant bytes are taken by module id, node op id, val index, and key index + dataWithoutPubkey := shr(64, calldataload(offset)) + // the next 48 bytes are taken by the pubkey + pubkey.offset := add(offset, 24) + // totalling to 72 bytes + offset := add(offset, 72) + } + + // For sort/duplicate check use only (moduleId, nodeOpId, valIndex) — drop keyIndex (low 64 bits) + validatorData = dataWithoutPubkey >> 64; + + moduleId = uint24(dataWithoutPubkey >> (64 + 64 + 40)); + + if (moduleId == 0) { + revert InvalidModuleId(); + } + + // Uniqueness and sort by validator identity only: (moduleId, nodeOpId, valIndex) + // dataWithoutPubkey (192b): ... | valIndex | keyIndex | -> validatorData (128b): ... | valIndex + if (validatorData <= lastValidatorData) { + revert InvalidRequestsDataSortOrder(); + } + + // Verify that the pubkey belongs to the module and node operator + // Cache is updated if module changed + cachedModuleAddress = _verifyKey( + moduleId, + uint40(dataWithoutPubkey >> (64 + 64)), // nodeOpId + uint64(dataWithoutPubkey), // keyIndex + pubkey, + index, + cachedModuleId, + cachedModuleAddress + ); + + cachedModuleId = moduleId; + lastValidatorData = validatorData; + + emit ValidatorExitRequest( + moduleId, + uint40(dataWithoutPubkey >> (64 + 64)), // nodeOpId + uint64(dataWithoutPubkey >> 64), // valIndex + pubkey, + timestamp + ); + + unchecked { + ++index; + } + } + } + /// Storage helpers function _storageRequestStatus() internal pure returns (mapping(bytes32 => RequestStatus) storage r) { diff --git a/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol b/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol index b3062b241b..31505ff001 100644 --- a/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol +++ b/contracts/0.8.9/oracle/ValidatorsExitBusOracle.sol @@ -10,7 +10,7 @@ import {BaseOracle} from "./BaseOracle.sol"; import {ValidatorsExitBus} from "./ValidatorsExitBus.sol"; interface IOracleReportSanityChecker { - function checkExitBusOracleReport(uint256 _exitRequestsCount) external view; + function checkExitBusOracleReport(uint256 _maxBalanceExitRequestedPerReportInEth) external view; } contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { @@ -49,16 +49,19 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { uint256 secondsPerSlot, uint256 genesisTime, address lidoLocator - ) BaseOracle(secondsPerSlot, genesisTime) ValidatorsExitBus(lidoLocator) {} + ) + BaseOracle(secondsPerSlot, genesisTime) + ValidatorsExitBus(lidoLocator) + {} function initialize( address admin, address consensusContract, uint256 consensusVersion, uint256 lastProcessingRefSlot, - uint256 maxValidatorsPerRequest, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, + uint256 maxValidatorsPerReport, + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, uint256 frameDurationInSec ) external { if (admin == address(0)) revert AdminCannotBeZero(); @@ -66,33 +69,30 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { _pauseFor(PAUSE_INFINITELY); _initialize(consensusContract, consensusVersion, lastProcessingRefSlot); + _updateContractVersion(2); + _updateContractVersion(3); - _initialize_v2(maxValidatorsPerRequest, maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setMaxValidatorsPerReport(maxValidatorsPerReport); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /** - * @notice A function to finalize upgrade to v2 (from v1). Can be called only once + * @notice A function to finalize upgrade to v3 (from v1). Can be called only once * * For more details see https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-10.md */ - function finalizeUpgrade_v2( + function finalizeUpgrade_v3( uint256 maxValidatorsPerReport, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, - uint256 frameDurationInSec + uint256 maxExitBalanceEth, + uint256 balancePerFrameEth, + uint256 frameDurationInSec, + uint256 consensusVersion ) external { - _initialize_v2(maxValidatorsPerReport, maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); - } + _updateContractVersion(3); + _setConsensusVersion(consensusVersion); - function _initialize_v2( - uint256 maxValidatorsPerReport, - uint256 maxExitRequestsLimit, - uint256 exitsPerFrame, - uint256 frameDurationInSec - ) internal { - _updateContractVersion(2); _setMaxValidatorsPerReport(maxValidatorsPerReport); - _setExitRequestLimit(maxExitRequestsLimit, exitsPerFrame, frameDurationInSec); + _setExitRequestLimit(maxExitBalanceEth, balancePerFrameEth, frameDurationInSec); } /// @@ -119,8 +119,8 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { /// @dev Total number of validator exit requests in this report. Must not be greater /// than limit checked in OracleReportSanityChecker.checkExitBusOracleReport. uint256 requestsCount; - /// @dev Format of the validator exit requests data. Currently, only the - /// DATA_FORMAT_LIST=1 is supported. + /// @dev Format of the validator exit requests data. Currently, only the extended + /// DATA_FORMAT_LIST_WITH_KEY_INDEX=2 is supported. uint256 dataFormat; /// @dev Validator exit requests data. Can differ based on the data format, /// see the constant defining a specific data format below for more info. @@ -226,27 +226,33 @@ contract ValidatorsExitBusOracle is BaseOracle, ValidatorsExitBus { } function _handleConsensusReportData(ReportData calldata data) internal { - if (data.dataFormat != DATA_FORMAT_LIST) { + if (data.dataFormat != DATA_FORMAT_LIST_WITH_KEY_INDEX) { revert UnsupportedRequestsDataFormat(data.dataFormat); } - if (data.data.length % PACKED_REQUEST_LENGTH != 0) { + uint256 packedLength = _getPackedRequestLength(data.dataFormat); + if (data.data.length % packedLength != 0) { revert InvalidRequestsDataLength(); } - if (data.data.length / PACKED_REQUEST_LENGTH != data.requestsCount) { + if (data.data.length / packedLength != data.requestsCount) { revert UnexpectedRequestsDataLength(); } - IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitBusOracleReport(data.requestsCount); + // Calculate total balance of validators being exited in ETH (uint256) + // Module 1 (curated) uses 32 ETH, other modules use 2048 ETH per validator + uint256 totalExitBalanceEth = _calculateTotalExitBalanceEth(data.data, data.dataFormat); + IOracleReportSanityChecker(LOCATOR.oracleReportSanityChecker()).checkExitBusOracleReport( + totalExitBalanceEth + ); - _processExitRequestsList(data.data); + _processExitRequestsList(data.data, data.dataFormat); _storageDataProcessingState().value = DataProcessingState({ refSlot: data.refSlot.toUint64(), requestsCount: data.requestsCount.toUint64(), requestsProcessed: data.requestsCount.toUint64(), - dataFormat: uint16(DATA_FORMAT_LIST) + dataFormat: uint16(data.dataFormat) }); if (data.requestsCount == 0) { diff --git a/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol b/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol index bd1c1ab03c..1480a33d5a 100644 --- a/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol +++ b/contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol @@ -12,8 +12,8 @@ import {AccessControlEnumerable} from "../utils/access/AccessControlEnumerable.s import {PositiveTokenRebaseLimiter, TokenRebaseLimiterData} from "../lib/PositiveTokenRebaseLimiter.sol"; import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; import {IBurner} from "contracts/common/interfaces/IBurner.sol"; - -import {StakingRouter} from "../StakingRouter.sol"; +import {ILido} from "contracts/common/interfaces/ILido.sol"; +import {IVersioned} from "contracts/common/interfaces/IVersioned.sol"; import {ISecondOpinionOracle} from "../interfaces/ISecondOpinionOracle.sol"; interface IWithdrawalQueue { @@ -32,32 +32,36 @@ interface IWithdrawalQueue { bool isClaimed; } - function getWithdrawalStatus(uint256[] calldata _requestIds) - external - view - returns (WithdrawalRequestStatus[] memory statuses); + function getWithdrawalStatus( + uint256[] calldata _requestIds + ) external view returns (WithdrawalRequestStatus[] memory statuses); } interface IBaseOracle { - function SECONDS_PER_SLOT() external view returns (uint256); - function GENESIS_TIME() external view returns (uint256); function getLastProcessingRefSlot() external view returns (uint256); } +interface IStakingRouter { + function getStakingModuleStateAccounting(uint256 _stakingModuleId) + external + view + returns ( + uint64 validatorsBalanceGwei, + uint64 exitedValidatorsCount + ); +} + /// @notice The set of restrictions used in the sanity checks of the oracle report /// @dev struct is loaded from the storage and stored in memory during the tx running struct LimitsList { - /// @notice The max possible number of validators that might be reported as `exited` - /// per single day, depends on the Consensus Layer churn limit - /// @dev Must fit into uint16 (<= 65_535) - uint256 exitedValidatorsPerDayLimit; - - /// @notice The max possible number of validators that might be reported as `appeared` - /// per single day, limited by the max daily deposits via DepositSecurityModule in practice - /// isn't limited by a consensus layer (because `appeared` includes `pending`, i.e., not `activated` yet) - /// @dev Must fit into uint16 (<= 65_535) - uint256 appearedValidatorsPerDayLimit; - + /// @notice The max possible exited ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 exitedEthAmountPerDayLimit; + /// @notice The max possible appeared ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 appearedEthAmountPerDayLimit; /// @notice The max annual increase of the total validators' balances on the Consensus Layer /// since the previous oracle report /// (the increase that is limited does not include fresh deposits to the Beacon Chain as well as withdrawn ether) @@ -70,85 +74,129 @@ struct LimitsList { /// @dev Represented in the Basis Points (100% == 10_000) uint256 simulatedShareRateDeviationBPLimit; - /// @notice The max number of exit requests allowed in report to ValidatorsExitBusOracle - uint256 maxValidatorExitRequestsPerReport; + /// @notice The max requested to exit balance in ETH + /// @dev Sum of all max effective balances of all requested validators should be equal or lower in one report + uint256 maxBalanceExitRequestedPerReportInEth; + /// @notice WC 0x01 max effective balance equivalent weight in ETH + /// @dev Must fit into uint16 and be non-zero + uint256 maxEffectiveBalanceWeightWCType01; + /// @notice WC 0x02 max effective balance equivalent weight in ETH + /// @dev Must fit into uint16 and be non-zero + uint256 maxEffectiveBalanceWeightWCType02; /// @notice The max number of data list items reported to accounting oracle in extra data per single transaction /// @dev Must fit into uint16 (<= 65_535) uint256 maxItemsPerExtraDataTransaction; - /// @notice The max number of node operators reported per extra data list item /// @dev Must fit into uint16 (<= 65_535) uint256 maxNodeOperatorsPerExtraDataItem; - /// @notice The min time required to be passed from the creation of the request to be /// finalized till the time of the oracle report uint256 requestTimestampMargin; - /// @notice The positive token rebase allowed per single LidoOracle report /// @dev uses 1e9 precision, e.g.: 1e6 - 0.1%; 1e9 - 100%, see `setMaxPositiveTokenRebase()` uint256 maxPositiveTokenRebase; - - /// @notice Initial slashing amount per one validator to calculate initial slashing of the validators' balances on the Consensus Layer - /// @dev Represented in the PWei (1^15 Wei). Must fit into uint16 (<= 65_535) - uint256 initialSlashingAmountPWei; - - /// @notice Inactivity penalties amount per one validator to calculate penalties of the validators' balances on the Consensus Layer - /// @dev Represented in the PWei (1^15 Wei). Must fit into uint16 (<= 65_535) - uint256 inactivityPenaltiesAmountPWei; - + /// @notice The max allowed CL balance decrease over the CL_BALANCE_WINDOW as a fraction of the adjusted balance + /// @dev Represented in the Basis Points (100% == 10_000). Must fit into uint16 (<= 65_535) + uint256 maxCLBalanceDecreaseBP; /// @notice The maximum percent on how Second Opinion Oracle reported value could be greater /// than reported by the AccountingOracle. There is an assumption that second opinion oracle CL balance /// can be greater as calculated for the withdrawal credentials. /// @dev Represented in the Basis Points (100% == 10_000) uint256 clBalanceOraclesErrorUpperBPLimit; + /// @notice The max possible consolidation ETH amount that might be reported + /// per single day. + /// @dev Must fit into uint32 (<= 4_294_967_295) + uint256 consolidationEthAmountPerDayLimit; + /// @notice Effective ETH amount attributed to a single exited validator + /// in the exited ETH amount per day check. + /// @dev Stored in whole ETH units. Must fit into uint16. + uint256 exitedValidatorEthAmountLimit; } -/// @dev The packed version of the LimitsList struct to be effectively persisted in storage -struct LimitsListPacked { - uint16 exitedValidatorsPerDayLimit; - uint16 appearedValidatorsPerDayLimit; +/// @dev The packed accounting/rebase limits persisted in a single storage slot +struct AccountingCoreLimitsPacked { + uint32 exitedEthAmountPerDayLimit; + uint32 appearedEthAmountPerDayLimit; + uint32 consolidationEthAmountPerDayLimit; uint16 annualBalanceIncreaseBPLimit; uint16 simulatedShareRateDeviationBPLimit; - uint16 maxValidatorExitRequestsPerReport; + uint64 maxPositiveTokenRebase; + uint16 maxCLBalanceDecreaseBP; + uint16 clBalanceOraclesErrorUpperBPLimit; + uint16 exitedValidatorEthAmountLimit; +} + +/// @dev The packed operational limits persisted in a single storage slot +struct OperationalLimitsPacked { + uint16 maxBalanceExitRequestedPerReportInEth; + uint16 maxEffectiveBalanceWeightWCType01; + uint16 maxEffectiveBalanceWeightWCType02; uint16 maxItemsPerExtraDataTransaction; uint16 maxNodeOperatorsPerExtraDataItem; uint32 requestTimestampMargin; - uint64 maxPositiveTokenRebase; - uint16 initialSlashingAmountPWei; - uint16 inactivityPenaltiesAmountPWei; - uint16 clBalanceOraclesErrorUpperBPLimit; } struct ReportData { - uint64 timestamp; - uint64 totalExitedValidators; - uint128 negativeCLRebaseWei; + uint64 timestamp; // Logical report timestamp in seconds + uint128 clBalance; // Total CL balance (validators + pending) in Wei + uint128 deposits; // Deposits for the period since the last report in Wei + uint128 clWithdrawals; // Actual ETH moved from CL to withdrawal vault this period +} + +struct CLBalanceDecreaseCheckParams { + uint256 maxCLBalanceDecreaseBP; + uint256 clBalanceOraclesErrorUpperBPLimit; + uint256 preCLBalance; + uint256 postCLBalance; + uint256 withdrawalVaultBalance; + uint256 withdrawalsVaultTransfer; + uint256 deposits; + uint256 timeElapsed; +} + +struct CLBalanceChangeCheckParams { + uint256 timeElapsed; + uint256 preCLValidatorsBalance; + uint256 preCLPendingBalance; + uint256 postCLValidatorsBalance; + uint256 postCLPendingBalance; + uint256 deposits; +} + +struct ActivationBalanceCheckResult { + uint256 effectiveTimeElapsed; + uint256 activatedBalanceWithGap; } uint256 constant MAX_BASIS_POINTS = 10_000; uint256 constant SHARE_RATE_PRECISION_E27 = 1e27; -uint256 constant ONE_PWEI = 1e15; /// @title Sanity checks for the Lido's oracle report /// @notice The contracts contain methods to perform sanity checks of the Lido's oracle report /// and lever methods for granular tuning of the params of the checks contract OracleReportSanityChecker is AccessControlEnumerable { using LimitsListPacker for LimitsList; - using LimitsListUnpacker for LimitsListPacked; + using LimitsListUnpacker for AccountingCoreLimitsPacked; using PositiveTokenRebaseLimiter for TokenRebaseLimiterData; bytes32 public constant ALL_LIMITS_MANAGER_ROLE = keccak256("ALL_LIMITS_MANAGER_ROLE"); - bytes32 public constant EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = - keccak256("EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); - bytes32 public constant APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE = - keccak256("APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE = + keccak256("CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE"); + bytes32 public constant EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE = + keccak256("EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE"); bytes32 public constant ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE = keccak256("ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE"); bytes32 public constant SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE = keccak256("SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE"); - bytes32 public constant MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE = - keccak256("MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE"); + bytes32 public constant MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE = + keccak256("MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE"); + bytes32 public constant MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE = + keccak256("MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE"); bytes32 public constant MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE = keccak256("MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE"); bytes32 public constant MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE = @@ -156,21 +204,23 @@ contract OracleReportSanityChecker is AccessControlEnumerable { bytes32 public constant REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE = keccak256("REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE"); bytes32 public constant MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE = keccak256("MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE"); - bytes32 public constant SECOND_OPINION_MANAGER_ROLE = - keccak256("SECOND_OPINION_MANAGER_ROLE"); - bytes32 public constant INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE = - keccak256("INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE"); - + bytes32 public constant SECOND_OPINION_MANAGER_ROLE = keccak256("SECOND_OPINION_MANAGER_ROLE"); + bytes32 public constant MAX_CL_BALANCE_DECREASE_MANAGER_ROLE = + keccak256("MAX_CL_BALANCE_DECREASE_MANAGER_ROLE"); uint256 private constant DEFAULT_TIME_ELAPSED = 1 hours; uint256 private constant DEFAULT_CL_BALANCE = 1 gwei; uint256 private constant SECONDS_PER_DAY = 24 * 60 * 60; + uint256 private constant ANNUAL_BALANCE_INCREASE_DENOMINATOR = 365 days * MAX_BASIS_POINTS; + /// @dev Maximum withdrawals ether used for migration bootstrap, bounded by CL churn limit per report window + uint256 private constant MAX_WITHDRAWALS_ETH_BY_CHURN_LIMIT_PER_REPORT = 57_600 ether; + /// @dev Time window for the CL balance decrease check + uint256 private constant CL_BALANCE_WINDOW = 36 days; ILidoLocator private immutable LIDO_LOCATOR; - uint256 private immutable GENESIS_TIME; - uint256 private immutable SECONDS_PER_SLOT; address private immutable ACCOUNTING_ADDRESS; - LimitsListPacked private _limits; + AccountingCoreLimitsPacked private _accountingCoreLimits; + OperationalLimitsPacked private _operationalLimits; /// @dev Historical reports data ReportData[] public reportData; @@ -178,23 +228,30 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @dev The address of the second opinion oracle ISecondOpinionOracle public secondOpinionOracle; + /// @dev Withdrawal vault balance after the last report's transfer was applied. + /// Used to compute actual CL withdrawals: clWithdrawals = WVB_current - _lastVaultBalanceAfterTransfer + uint256 private _lastVaultBalanceAfterTransfer; + + /// @dev Logical timestamp of the latest stored report snapshot. + /// It is advanced by `_timeElapsed` on each accounting report. + uint256 private _lastReportTimestamp; + + /// @dev Migration flag: false until the first successful accounting report after migration. + /// The per-module validators balance increase check is skipped while the flag is false. + bool private _isPostMigrationFirstReportDone; + /// @param _lidoLocator address of the LidoLocator instance - /// @param _accountingOracle address of the AccountingOracle instance /// @param _accounting address of the Accounting instance /// @param _admin address to grant DEFAULT_ADMIN_ROLE of the AccessControl contract /// @param _limitsList initial values to be set for the limits list constructor( address _lidoLocator, - address _accountingOracle, address _accounting, address _admin, LimitsList memory _limitsList ) { if (_admin == address(0)) revert AdminCannotBeZero(); LIDO_LOCATOR = ILidoLocator(_lidoLocator); - - GENESIS_TIME = IBaseOracle(_accountingOracle).GENESIS_TIME(); - SECONDS_PER_SLOT = IBaseOracle(_accountingOracle).SECONDS_PER_SLOT(); ACCOUNTING_ADDRESS = _accounting; _updateLimits(_limitsList); @@ -214,7 +271,19 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Returns the limits list for the Lido's oracle report sanity checks function getOracleReportLimits() public view returns (LimitsList memory) { - return _limits.unpack(); + return _accountingCoreLimits.unpack(_operationalLimits); + } + + function getMaxCLBalanceDecreaseBP() external view returns (uint256) { + return _accountingCoreLimits.maxCLBalanceDecreaseBP; + } + + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256) { + return _operationalLimits.maxEffectiveBalanceWeightWCType01; + } + + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256) { + return _operationalLimits.maxEffectiveBalanceWeightWCType02; } /// @notice Returns max positive token rebase value with 1e9 precision: @@ -241,13 +310,16 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// NB: The value is not set by default (explicit initialization required), /// the recommended sane values are from 0.05% to 0.1%. function getMaxPositiveTokenRebase() public view returns (uint256) { - return _limits.maxPositiveTokenRebase; + return _accountingCoreLimits.maxPositiveTokenRebase; } /// @notice Sets the new values for the limits list and second opinion oracle /// @param _limitsList new limits list /// @param _secondOpinionOracle negative rebase oracle. - function setOracleReportLimits(LimitsList calldata _limitsList, ISecondOpinionOracle _secondOpinionOracle) external onlyRole(ALL_LIMITS_MANAGER_ROLE) { + function setOracleReportLimits( + LimitsList calldata _limitsList, + ISecondOpinionOracle _secondOpinionOracle + ) external onlyRole(ALL_LIMITS_MANAGER_ROLE) { _updateLimits(_limitsList); if (_secondOpinionOracle != secondOpinionOracle) { secondOpinionOracle = _secondOpinionOracle; @@ -255,47 +327,58 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - /// @notice Sets the new value for the exitedValidatorsPerDayLimit - /// - /// NB: AccountingOracle reports validators as exited once they passed the `EXIT_EPOCH` on Consensus Layer - /// therefore, the value should be set in accordance to the consensus layer churn limit - /// - /// @param _exitedValidatorsPerDayLimit new exitedValidatorsPerDayLimit value - function setExitedValidatorsPerDayLimit(uint256 _exitedValidatorsPerDayLimit) - external - onlyRole(EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.exitedValidatorsPerDayLimit = _exitedValidatorsPerDayLimit; - _updateLimits(limitsList); + /// @notice Sets the new value for the exitedEthAmountPerDayLimit + /// @param _exitedEthAmountPerDayLimit new exitedEthAmountPerDayLimit value + function setExitedEthAmountPerDayLimit( + uint256 _exitedEthAmountPerDayLimit + ) public onlyRole(EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_exitedEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.exitedEthAmountPerDayLimit = SafeCast.toUint32(_exitedEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); } - /// @notice Sets the new value for the appearedValidatorsPerDayLimit - /// - /// NB: AccountingOracle reports validators as appeared once they become `pending` - /// (might be not `activated` yet). Thus, this limit should be high enough because consensus layer - /// has no intrinsic churn limit for the amount of `pending` validators (only for `activated` instead). - /// For Lido it depends on the amount of deposits that can be made via DepositSecurityModule daily. - /// - /// @param _appearedValidatorsPerDayLimit new appearedValidatorsPerDayLimit value - function setAppearedValidatorsPerDayLimit(uint256 _appearedValidatorsPerDayLimit) - external - onlyRole(APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.appearedValidatorsPerDayLimit = _appearedValidatorsPerDayLimit; - _updateLimits(limitsList); + /// @notice Sets the new value for the appearedEthAmountPerDayLimit + /// @param _appearedEthAmountPerDayLimit new appearedEthAmountPerDayLimit value + function setAppearedEthAmountPerDayLimit( + uint256 _appearedEthAmountPerDayLimit + ) public onlyRole(APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_appearedEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.appearedEthAmountPerDayLimit = SafeCast.toUint32(_appearedEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); + } + + /// @notice Sets the new value for the consolidationEthAmountPerDayLimit + /// @param _consolidationEthAmountPerDayLimit new consolidationEthAmountPerDayLimit value + function setConsolidationEthAmountPerDayLimit( + uint256 _consolidationEthAmountPerDayLimit + ) external onlyRole(CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_consolidationEthAmountPerDayLimit, 0, type(uint32).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.consolidationEthAmountPerDayLimit = SafeCast.toUint32(_consolidationEthAmountPerDayLimit); + _updateAccountingCoreLimits(limits); + } + + /// @notice Sets exited validator ETH amount limiter value. + function setExitedValidatorEthAmountLimit( + uint256 _exitedValidatorEthAmountLimit + ) external onlyRole(EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_exitedValidatorEthAmountLimit, 1, type(uint16).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.exitedValidatorEthAmountLimit = SafeCast.toUint16(_exitedValidatorEthAmountLimit); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the annualBalanceIncreaseBPLimit /// @param _annualBalanceIncreaseBPLimit new annualBalanceIncreaseBPLimit value - function setAnnualBalanceIncreaseBPLimit(uint256 _annualBalanceIncreaseBPLimit) - external - onlyRole(ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.annualBalanceIncreaseBPLimit = _annualBalanceIncreaseBPLimit; - _updateLimits(limitsList); + function setAnnualBalanceIncreaseBPLimit( + uint256 _annualBalanceIncreaseBPLimit + ) external onlyRole(ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE) { + _checkLimitValue(_annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.annualBalanceIncreaseBPLimit = LimitsListPacker.toBasisPoints(_annualBalanceIncreaseBPLimit); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the simulatedShareRateDeviationBPLimit @@ -304,31 +387,55 @@ contract OracleReportSanityChecker is AccessControlEnumerable { external onlyRole(SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.simulatedShareRateDeviationBPLimit = _simulatedShareRateDeviationBPLimit; - _updateLimits(limitsList); + _checkLimitValue(_simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.simulatedShareRateDeviationBPLimit = LimitsListPacker.toBasisPoints(_simulatedShareRateDeviationBPLimit); + _updateAccountingCoreLimits(limits); } - /// @notice Sets the new value for the maxValidatorExitRequestsPerReport - /// @param _maxValidatorExitRequestsPerReport new maxValidatorExitRequestsPerReport value - function setMaxExitRequestsPerOracleReport(uint256 _maxValidatorExitRequestsPerReport) + /// @notice Sets the new value for the maxBalanceExitRequestedPerReportInEth + /// @param _maxBalanceExitRequestedPerReportInEth new maxBalanceExitRequestedPerReportInEth value + function setMaxBalanceExitRequestedPerReportInEth(uint256 _maxBalanceExitRequestedPerReportInEth) external - onlyRole(MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE) + onlyRole(MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxValidatorExitRequestsPerReport = _maxValidatorExitRequestsPerReport; - _updateLimits(limitsList); + _checkLimitValue(_maxBalanceExitRequestedPerReportInEth, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxBalanceExitRequestedPerReportInEth = SafeCast.toUint16(_maxBalanceExitRequestedPerReportInEth); + _updateOperationalLimits(limits); } - /// @notice Sets the new value for the requestTimestampMargin - /// @param _requestTimestampMargin new requestTimestampMargin value - function setRequestTimestampMargin(uint256 _requestTimestampMargin) + /// @notice Sets the new WC 0x01 max effective balance equivalent weight in ETH + function setMaxEffectiveBalanceWeightWCType01(uint256 _maxEffectiveBalanceWeightWCType01) external - onlyRole(REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE) + onlyRole(MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.requestTimestampMargin = _requestTimestampMargin; - _updateLimits(limitsList); + _checkLimitValue(_maxEffectiveBalanceWeightWCType01, 1, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxEffectiveBalanceWeightWCType01 = SafeCast.toUint16(_maxEffectiveBalanceWeightWCType01); + _updateOperationalLimits(limits); + } + + /// @notice Sets the new WC 0x02 max effective balance equivalent weight in ETH + function setMaxEffectiveBalanceWeightWCType02(uint256 _maxEffectiveBalanceWeightWCType02) + external + onlyRole(MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE) + { + _checkLimitValue(_maxEffectiveBalanceWeightWCType02, 1, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxEffectiveBalanceWeightWCType02 = SafeCast.toUint16(_maxEffectiveBalanceWeightWCType02); + _updateOperationalLimits(limits); + } + + /// @notice Sets the new value for the requestTimestampMargin + /// @param _requestTimestampMargin new requestTimestampMargin value + function setRequestTimestampMargin( + uint256 _requestTimestampMargin + ) external onlyRole(REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE) { + _checkLimitValue(_requestTimestampMargin, 0, type(uint32).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.requestTimestampMargin = SafeCast.toUint32(_requestTimestampMargin); + _updateOperationalLimits(limits); } /// @notice Set max positive token rebase allowed per single oracle report token rebase happens @@ -338,35 +445,35 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// e.g.: 1e6 - 0.1%; 1e9 - 100% /// - passing zero value is prohibited /// - to allow unlimited rebases, pass max uint64, i.e.: type(uint64).max - function setMaxPositiveTokenRebase(uint256 _maxPositiveTokenRebase) - external - onlyRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxPositiveTokenRebase = _maxPositiveTokenRebase; - _updateLimits(limitsList); + function setMaxPositiveTokenRebase( + uint256 _maxPositiveTokenRebase + ) external onlyRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE) { + _checkLimitValue(_maxPositiveTokenRebase, 1, type(uint64).max); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.maxPositiveTokenRebase = SafeCast.toUint64(_maxPositiveTokenRebase); + _updateAccountingCoreLimits(limits); } /// @notice Sets the new value for the maxItemsPerExtraDataTransaction /// @param _maxItemsPerExtraDataTransaction new maxItemsPerExtraDataTransaction value - function setMaxItemsPerExtraDataTransaction(uint256 _maxItemsPerExtraDataTransaction) - external - onlyRole(MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxItemsPerExtraDataTransaction = _maxItemsPerExtraDataTransaction; - _updateLimits(limitsList); + function setMaxItemsPerExtraDataTransaction( + uint256 _maxItemsPerExtraDataTransaction + ) external onlyRole(MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE) { + _checkLimitValue(_maxItemsPerExtraDataTransaction, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxItemsPerExtraDataTransaction = SafeCast.toUint16(_maxItemsPerExtraDataTransaction); + _updateOperationalLimits(limits); } /// @notice Sets the new value for the max maxNodeOperatorsPerExtraDataItem /// @param _maxNodeOperatorsPerExtraDataItem new maxNodeOperatorsPerExtraDataItem value - function setMaxNodeOperatorsPerExtraDataItem(uint256 _maxNodeOperatorsPerExtraDataItem) - external - onlyRole(MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.maxNodeOperatorsPerExtraDataItem = _maxNodeOperatorsPerExtraDataItem; - _updateLimits(limitsList); + function setMaxNodeOperatorsPerExtraDataItem( + uint256 _maxNodeOperatorsPerExtraDataItem + ) external onlyRole(MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE) { + _checkLimitValue(_maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); + OperationalLimitsPacked memory limits = _operationalLimits; + limits.maxNodeOperatorsPerExtraDataItem = SafeCast.toUint16(_maxNodeOperatorsPerExtraDataItem); + _updateOperationalLimits(limits); } /// @notice Sets the address of the second opinion oracle and clBalanceOraclesErrorUpperBPLimit value @@ -374,30 +481,59 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// If it's zero address — oracle is disabled. /// Default value is zero address. /// @param _clBalanceOraclesErrorUpperBPLimit new clBalanceOraclesErrorUpperBPLimit value - function setSecondOpinionOracleAndCLBalanceUpperMargin(ISecondOpinionOracle _secondOpinionOracle, uint256 _clBalanceOraclesErrorUpperBPLimit) - external - onlyRole(SECOND_OPINION_MANAGER_ROLE) - { - LimitsList memory limitsList = _limits.unpack(); - limitsList.clBalanceOraclesErrorUpperBPLimit = _clBalanceOraclesErrorUpperBPLimit; - _updateLimits(limitsList); + function setSecondOpinionOracleAndCLBalanceUpperMargin( + ISecondOpinionOracle _secondOpinionOracle, + uint256 _clBalanceOraclesErrorUpperBPLimit + ) external onlyRole(SECOND_OPINION_MANAGER_ROLE) { + _checkLimitValue(_clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.clBalanceOraclesErrorUpperBPLimit = LimitsListPacker.toBasisPoints(_clBalanceOraclesErrorUpperBPLimit); + _updateAccountingCoreLimits(limits); if (_secondOpinionOracle != secondOpinionOracle) { secondOpinionOracle = ISecondOpinionOracle(_secondOpinionOracle); emit SecondOpinionOracleChanged(_secondOpinionOracle); } } - /// @notice Sets the initial slashing and penalties amounts - /// @param _initialSlashingAmountPWei - initial slashing amount (in PWei) - /// @param _inactivityPenaltiesAmountPWei - penalties amount (in PWei) - function setInitialSlashingAndPenaltiesAmount(uint256 _initialSlashingAmountPWei, uint256 _inactivityPenaltiesAmountPWei) + /// @notice Sets the max allowed CL balance decrease in basis points + /// @param _maxCLBalanceDecreaseBP max CL balance decrease over the sliding window (in BP, e.g. 360 = 3.6%) + function setMaxCLBalanceDecreaseBP(uint256 _maxCLBalanceDecreaseBP) external - onlyRole(INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE) + onlyRole(MAX_CL_BALANCE_DECREASE_MANAGER_ROLE) { - LimitsList memory limitsList = _limits.unpack(); - limitsList.initialSlashingAmountPWei = _initialSlashingAmountPWei; - limitsList.inactivityPenaltiesAmountPWei = _inactivityPenaltiesAmountPWei; - _updateLimits(limitsList); + _checkLimitValue(_maxCLBalanceDecreaseBP, 0, MAX_BASIS_POINTS); + AccountingCoreLimitsPacked memory limits = _accountingCoreLimits; + limits.maxCLBalanceDecreaseBP = LimitsListPacker.toBasisPoints(_maxCLBalanceDecreaseBP); + _updateAccountingCoreLimits(limits); + } + + /// @notice One-time migration: seeds initial snapshots into reportData + /// so that the sliding-window CL decrease check has a valid starting point. + /// @dev Permissionless by design: after the first successful call, further calls revert. + function migrateBaselineSnapshot() external { + if (reportData.length != 0) revert MigrationAlreadyDone(); + + address lidoAddr = LIDO_LOCATOR.lido(); + uint256 lidoVersion = IVersioned(lidoAddr).getContractVersion(); + if (lidoVersion != 4) revert UnexpectedLidoVersion(lidoVersion, 4); + + (uint256 migrationCLValidatorsBalance, uint256 migrationCLPendingBalance,, uint256 migrationDeposits) = ILido(lidoAddr) + .getBalanceStats(); + uint256 migrationCLBalance = migrationCLValidatorsBalance + migrationCLPendingBalance; + uint256 migrationCLWithdrawals = MAX_WITHDRAWALS_ETH_BY_CHURN_LIMIT_PER_REPORT; + // Initialize vault state: vault is not drained during migration, + // so after-transfer balance equals current vault balance + _lastVaultBalanceAfterTransfer = LIDO_LOCATOR.withdrawalVault().balance; + + // The decrease formula uses baseline report B[X-k] and sums flows from reports [X-k+1..X]. + // To include migration-time deposits/withdrawals without any special-case branch in formula code: + // 1) store pure baseline point with zero flows; + // 2) store bootstrap flow chunk at the same CL balance right after baseline. + uint256 migrationReportTimestamp = _lastReportTimestamp; + _addReportData(migrationReportTimestamp, migrationCLBalance, 0, 0); + _addReportData(migrationReportTimestamp, migrationCLBalance, migrationDeposits, migrationCLWithdrawals); + + emit BaselineSnapshotMigrated(migrationCLBalance, migrationDeposits, migrationCLWithdrawals); } /// @notice Returns the allowed ETH amount that might be taken from the withdrawal vault and EL @@ -427,12 +563,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { uint256 _sharesRequestedToBurn, uint256 _etherToLockForWithdrawals, uint256 _newSharesToBurnForWithdrawals - ) external view returns ( - uint256 withdrawals, - uint256 elRewards, - uint256 sharesFromWQToBurn, - uint256 sharesToBurn - ) { + ) external view returns (uint256 withdrawals, uint256 elRewards, uint256 sharesFromWQToBurn, uint256 sharesToBurn) { TokenRebaseLimiterData memory tokenRebaseLimiter = PositiveTokenRebaseLimiter.initLimiterState( getMaxPositiveTokenRebase(), _preInternalEther, @@ -466,91 +597,199 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Applies sanity checks to the accounting params of Lido's oracle report /// WARNING. The function has side effects and modifies the state of the contract. - /// It's needed to keep information about exited validators counts and negative rebase values over time. - /// The function called from Lido contract that uses the 'old' Solidity version (0.4.24) and will do a correct - /// call to this method even it's declared as "view" in interface there. + /// It's needed to keep CL balance snapshots for the balance decrease check over a sliding window. /// @param _timeElapsed time elapsed since the previous oracle report - /// @param _preCLBalance sum of all Lido validators' balances on the Consensus Layer before the - /// current oracle report (NB: also include the initial balance of newly appeared validators) - /// @param _postCLBalance sum of all Lido validators' balances on the Consensus Layer after the - /// current oracle report + /// @param _preCLValidatorsBalance sum of all Lido validators' balances on the Consensus Layer + /// before the current oracle report + /// @param _preCLPendingBalance pending deposits balance on the Consensus Layer before the current oracle report + /// @param _postCLValidatorsBalance sum of all Lido validators' balances on the Consensus Layer + /// after the current oracle report + /// @param _postCLPendingBalance pending deposits balance on the Consensus Layer after the current oracle report /// @param _withdrawalVaultBalance withdrawal vault balance on Execution Layer for the report reference slot /// @param _elRewardsVaultBalance el rewards vault balance on Execution Layer for the report reference slot /// @param _sharesRequestedToBurn shares requested to burn for the report reference slot - /// @param _preCLValidators Lido-participating validators on the CL side before the current oracle report - /// @param _postCLValidators Lido-participating validators on the CL side after the current oracle report + /// @param _deposits deposits to the Beacon Chain since the previous oracle report in Wei + /// @param _withdrawalsVaultTransfer ETH amount transferred from withdrawal vault this report function checkAccountingOracleReport( uint256 _timeElapsed, - uint256 _preCLBalance, - uint256 _postCLBalance, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, uint256 _withdrawalVaultBalance, uint256 _elRewardsVaultBalance, uint256 _sharesRequestedToBurn, - uint256 _preCLValidators, - uint256 _postCLValidators + uint256 _deposits, + uint256 _withdrawalsVaultTransfer ) external { if (msg.sender != ACCOUNTING_ADDRESS) { revert CalledNotFromAccounting(); } - LimitsList memory limitsList = _limits.unpack(); - uint256 refSlot = IBaseOracle(LIDO_LOCATOR.accountingOracle()).getLastProcessingRefSlot(); - - address withdrawalVault = LIDO_LOCATOR.withdrawalVault(); // 1. Withdrawals vault reported balance - _checkWithdrawalVaultBalance(withdrawalVault.balance, _withdrawalVaultBalance); - - address elRewardsVault = LIDO_LOCATOR.elRewardsVault(); + _checkWithdrawalVaultBalance(LIDO_LOCATOR.withdrawalVault().balance, _withdrawalVaultBalance); // 2. EL rewards vault reported balance - _checkELRewardsVaultBalance(elRewardsVault.balance, _elRewardsVaultBalance); - + _checkELRewardsVaultBalance(LIDO_LOCATOR.elRewardsVault().balance, _elRewardsVaultBalance); // 3. Burn requests _checkSharesRequestedToBurn(_sharesRequestedToBurn); + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalance, + preCLPendingBalance: _preCLPendingBalance, + postCLValidatorsBalance: _postCLValidatorsBalance, + postCLPendingBalance: _postCLPendingBalance, + deposits: _deposits + }); + _checkAccountingOracleReportCLBalances( + checkParams, + _withdrawalVaultBalance, + _withdrawalsVaultTransfer + ); + } + function _checkAccountingOracleReportCLBalances( + CLBalanceChangeCheckParams memory _checkParams, + uint256 _withdrawalVaultBalance, + uint256 _withdrawalsVaultTransfer + ) internal { + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; + CLBalanceDecreaseCheckParams memory decreaseCheckParams; + decreaseCheckParams.maxCLBalanceDecreaseBP = limitsList.maxCLBalanceDecreaseBP; + decreaseCheckParams.clBalanceOraclesErrorUpperBPLimit = limitsList.clBalanceOraclesErrorUpperBPLimit; + decreaseCheckParams.preCLBalance = + _checkParams.preCLValidatorsBalance + _checkParams.preCLPendingBalance + _checkParams.deposits; + decreaseCheckParams.postCLBalance = _checkParams.postCLValidatorsBalance + _checkParams.postCLPendingBalance; + decreaseCheckParams.withdrawalVaultBalance = _withdrawalVaultBalance; + decreaseCheckParams.withdrawalsVaultTransfer = _withdrawalsVaultTransfer; + decreaseCheckParams.deposits = _checkParams.deposits; + decreaseCheckParams.timeElapsed = _checkParams.timeElapsed; + uint256 clWithdrawals = _getCLWithdrawals(_withdrawalVaultBalance); + _checkWithdrawalsVaultTransfer(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + _checkCLPendingBalanceIncrease(limitsList, _checkParams, clWithdrawals); // 4. Consensus Layer balance decrease - _checkCLBalanceDecrease(limitsList, _preCLBalance, - _postCLBalance, _withdrawalVaultBalance, _postCLValidators, refSlot); - + _checkCLBalanceDecrease(decreaseCheckParams, clWithdrawals); // 5. Consensus Layer annual balances increase - _checkAnnualBalancesIncrease(limitsList, _preCLBalance, _postCLBalance, _timeElapsed); + _checkAnnualBalancesIncrease( + limitsList, + decreaseCheckParams.preCLBalance, + decreaseCheckParams.postCLBalance, + _checkParams.timeElapsed + ); + _finalizePostReportState(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + } + + /// @notice Check total pending CL balance from the current report against protocol state and growth limits. + function checkCLPendingBalanceIncrease( + uint256 _timeElapsed, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, + uint256 _withdrawalVaultBalance, + uint256 _deposits + ) external view { + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalance, + preCLPendingBalance: _preCLPendingBalance, + postCLValidatorsBalance: _postCLValidatorsBalance, + postCLPendingBalance: _postCLPendingBalance, + deposits: _deposits + }); + _checkCLPendingBalanceIncrease(_accountingCoreLimits, checkParams, _getCLWithdrawals(_withdrawalVaultBalance)); + } - // 6. Appeared validators increase - if (_postCLValidators > _preCLValidators) { - _checkAppearedValidatorsChurnLimit(limitsList, (_postCLValidators - _preCLValidators), _timeElapsed); + /// @notice Check that per-module validators CL balances in wei are consistent with reported total validators balance. + function checkCLBalancesConsistency( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _clValidatorsBalanceWei + ) external pure { + _checkCLBalancesConsistency( + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + _clValidatorsBalanceWei + ); + } + + /// @notice Check per-module validators balances consistency and global CL growth budget derived from protocol pending, all in wei. + function checkModuleAndCLBalancesChangeRates( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _preCLValidatorsBalanceWei, + uint256 _preCLPendingBalanceWei, + uint256 _postCLValidatorsBalanceWei, + uint256 _postCLPendingBalanceWei, + uint256 _depositsWei, + uint256 _timeElapsed + ) external view { + CLBalanceChangeCheckParams memory checkParams = CLBalanceChangeCheckParams({ + timeElapsed: _timeElapsed, + preCLValidatorsBalance: _preCLValidatorsBalanceWei, + preCLPendingBalance: _preCLPendingBalanceWei, + postCLValidatorsBalance: _postCLValidatorsBalanceWei, + postCLPendingBalance: _postCLPendingBalanceWei, + deposits: _depositsWei + }); + _checkCLBalancesConsistency( + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + checkParams.postCLValidatorsBalance + ); + + // StakingRouter migration seeds per-module validators balances from active validators count + // using the max effective balance, so those migration values may be higher than the first + // oracle-reported balances. Skip the module validators balance increase check until the + // first report overwrites the migrated accounting state with the actual per-module values. + if (!_isPostMigrationFirstReportDone) { + return; } + + _checkModuleValidatorsBalanceIncrease( + IStakingRouter(LIDO_LOCATOR.stakingRouter()), + _accountingCoreLimits, + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule, + checkParams + ); } /// @notice Applies sanity checks to the number of validator exit requests supplied to ValidatorExitBusOracle - /// @param _exitRequestsCount Number of validator exit requests supplied per oracle report - function checkExitBusOracleReport(uint256 _exitRequestsCount) + /// @notice Checks the total balance of validator exit requests supplied per oracle report + /// @param _maxBalanceExitRequestedPerReportInEth Total balance in ETH of all validators requested to exit in the oracle report + function checkExitBusOracleReport(uint256 _maxBalanceExitRequestedPerReportInEth) external view { - uint256 limit = _limits.unpack().maxValidatorExitRequestsPerReport; - if (_exitRequestsCount > limit) { - revert IncorrectNumberOfExitRequestsPerReport(limit); + uint256 limit = _operationalLimits.maxBalanceExitRequestedPerReportInEth; + if (_maxBalanceExitRequestedPerReportInEth > limit) { + revert IncorrectSumOfExitBalancePerReport(_maxBalanceExitRequestedPerReportInEth); } } - /// @notice Check rate of exited validators per day - /// @param _exitedValidatorsCount Number of validator exited per oracle report - function checkExitedValidatorsRatePerDay(uint256 _exitedValidatorsCount) - external - view - { - uint256 exitedValidatorsLimit = _limits.unpack().exitedValidatorsPerDayLimit; - if (_exitedValidatorsCount > exitedValidatorsLimit) { - revert ExitedValidatorsLimitExceeded(exitedValidatorsLimit, _exitedValidatorsCount); - } + /// @notice Check exited ETH amount rate per day based on exited validators count. + /// @param _newlyExitedValidatorsCount Number of newly exited validators since previous report. + /// @param _timeElapsed Time elapsed in seconds since previous report. + function checkExitedEthAmountPerDay( + uint256 _newlyExitedValidatorsCount, + uint256 _timeElapsed + ) external view { + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; + uint256 exitedEthAmount = _newlyExitedValidatorsCount * uint256(limitsList.exitedValidatorEthAmountLimit) * 1 ether; + uint256 exitedEthAmountPerDay = _normalizePerDay(exitedEthAmount, _timeElapsed); + _checkExitedEthAmountPerDay(limitsList, exitedEthAmountPerDay); + } + + /// @notice Check appeared ETH amount rate per day. + /// @param _appearedEthAmountPerDay Appeared ETH amount per day in Wei. + function checkAppearedEthAmountPerDay(uint256 _appearedEthAmountPerDay) external view { + _checkAppearedEthAmountPerDay(_accountingCoreLimits, _appearedEthAmountPerDay); } /// @notice check the number of node operators reported per extra data item in the accounting oracle report. /// @param _itemIndex Index of item in extra data /// @param _nodeOperatorsCount Number of validator exit requests supplied per oracle report - function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) - external - view - { - uint256 limit = _limits.unpack().maxNodeOperatorsPerExtraDataItem; + function checkNodeOperatorsPerExtraDataItemCount(uint256 _itemIndex, uint256 _nodeOperatorsCount) external view { + uint256 limit = _operationalLimits.maxNodeOperatorsPerExtraDataItem; if (_nodeOperatorsCount > limit) { revert TooManyNodeOpsPerExtraDataItem(_itemIndex, _nodeOperatorsCount); } @@ -558,11 +797,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { /// @notice Check the number of extra data list items per transaction in the accounting oracle report. /// @param _extraDataListItemsCount Number of items per single transaction in the accounting oracle report - function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) - external - view - { - uint256 limit = _limits.unpack().maxItemsPerExtraDataTransaction; + function checkExtraDataItemsCountPerTransaction(uint256 _extraDataListItemsCount) external view { + uint256 limit = _operationalLimits.maxItemsPerExtraDataTransaction; if (_extraDataListItemsCount > limit) { revert TooManyItemsPerExtraDataTransaction(limit, _extraDataListItemsCount); } @@ -574,11 +810,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { function checkWithdrawalQueueOracleReport( uint256 _lastFinalizableRequestId, uint256 _reportTimestamp - ) - external - view - { - LimitsList memory limitsList = _limits.unpack(); + ) external view { + OperationalLimitsPacked memory limitsList = _operationalLimits; address withdrawalQueue = LIDO_LOCATOR.withdrawalQueue(); _checkLastFinalizableId(limitsList, withdrawalQueue, _lastFinalizableRequestId, _reportTimestamp); @@ -597,7 +830,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { uint256 _sharesToBurnForWithdrawals, uint256 _simulatedShareRate ) external view { - LimitsList memory limitsList = _limits.unpack(); + AccountingCoreLimitsPacked memory limitsList = _accountingCoreLimits; // Pretending that withdrawals were not processed // virtually return locked ether back to `_postTotalPooledEther` @@ -610,6 +843,226 @@ contract OracleReportSanityChecker is AccessControlEnumerable { ); } + function _checkCLBalancesConsistency( + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + uint256 _clValidatorsBalanceWei + ) internal pure { + uint256 modulesCount = _stakingModuleIdsWithUpdatedBalance.length; + if (modulesCount != _validatorBalancesWeiByStakingModule.length) { + revert InvalidClBalancesData(); + } + + uint256 validatorBalancesSum; + for (uint256 i = 0; i < modulesCount;) { + validatorBalancesSum += _validatorBalancesWeiByStakingModule[i]; + unchecked { + ++i; + } + } + + if (validatorBalancesSum != _clValidatorsBalanceWei) { + revert InconsistentValidatorsBalanceByModule(_clValidatorsBalanceWei, validatorBalancesSum); + } + } + + function _checkExitedEthAmountPerDay( + AccountingCoreLimitsPacked memory _limitsList, + uint256 _exitedEthAmountPerDay + ) internal pure { + uint256 exitedEthLimitWithConsolidation = + (uint256(_limitsList.exitedEthAmountPerDayLimit) + uint256(_limitsList.consolidationEthAmountPerDayLimit)) * + 1 ether; + if (_exitedEthAmountPerDay > exitedEthLimitWithConsolidation) { + revert ExitedEthAmountPerDayLimitExceeded(exitedEthLimitWithConsolidation, _exitedEthAmountPerDay); + } + } + + function _checkAppearedEthAmountPerDay( + AccountingCoreLimitsPacked memory _limitsList, + uint256 _appearedEthAmountPerDay + ) internal pure { + uint256 appearedEthLimitWithConsolidation = + (uint256(_limitsList.appearedEthAmountPerDayLimit) + uint256(_limitsList.consolidationEthAmountPerDayLimit)) * + 1 ether; + if (_appearedEthAmountPerDay > appearedEthLimitWithConsolidation) { + revert AppearedEthAmountPerDayLimitExceeded(appearedEthLimitWithConsolidation, _appearedEthAmountPerDay); + } + } + + function _normalizePerDay(uint256 _amount, uint256 _timeElapsed) internal pure returns (uint256) { + return (_amount * SECONDS_PER_DAY) / _getTimeElapsedForRateNormalization(_timeElapsed); + } + + function _getTimeElapsedForRateNormalization(uint256 _timeElapsed) internal pure returns (uint256) { + return _timeElapsed == 0 ? 1 : _timeElapsed; + } + + function _getTimeElapsedForAllowanceChecks(uint256 _timeElapsed) internal pure returns (uint256) { + return _timeElapsed == 0 ? DEFAULT_TIME_ELAPSED : _timeElapsed; + } + + function _calculateAmountForPeriod( + uint256 _amountPerDay, + uint256 _effectiveTimeElapsed + ) internal pure returns (uint256) { + return (_amountPerDay * _effectiveTimeElapsed) / SECONDS_PER_DAY; + } + + function _calculateAprAndGiftSafetyCap( + uint256 _preCLValidatorsBalance, + uint256 _annualBalanceIncreaseMultiplier + ) internal pure returns (uint256) { + return (_preCLValidatorsBalance * _annualBalanceIncreaseMultiplier) / ANNUAL_BALANCE_INCREASE_DENOMINATOR; + } + + function _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + AccountingCoreLimitsPacked memory _limitsList, + CLBalanceChangeCheckParams memory _checkParams + ) internal pure returns (ActivationBalanceCheckResult memory result) { + result.effectiveTimeElapsed = _getTimeElapsedForAllowanceChecks(_checkParams.timeElapsed); + + uint256 pendingBalanceWithDeposits = _checkParams.preCLPendingBalance + _checkParams.deposits; + if (_checkParams.postCLPendingBalance > pendingBalanceWithDeposits) { + revert IncorrectTotalPendingBalance(pendingBalanceWithDeposits, _checkParams.postCLPendingBalance); + } + + uint256 activatedBalance = pendingBalanceWithDeposits - _checkParams.postCLPendingBalance; + uint256 appearedEthLimitPerPeriod = _calculateAmountForPeriod( + uint256(_limitsList.appearedEthAmountPerDayLimit) * 1 ether, + result.effectiveTimeElapsed + ); + if (activatedBalance > appearedEthLimitPerPeriod) { + revert IncorrectTotalActivatedBalance(appearedEthLimitPerPeriod, activatedBalance); + } + + result.activatedBalanceWithGap = + activatedBalance + + _calculateAprAndGiftSafetyCap( + _checkParams.preCLValidatorsBalance, + uint256(_limitsList.annualBalanceIncreaseBPLimit) * result.effectiveTimeElapsed + ); + } + + function _checkCLPendingBalanceIncrease( + AccountingCoreLimitsPacked memory _limitsList, + CLBalanceChangeCheckParams memory _checkParams, + uint256 _clWithdrawals + ) internal pure { + if (_clWithdrawals > _checkParams.preCLValidatorsBalance) { + revert InvalidClBalancesData(); + } + + ActivationBalanceCheckResult memory activationCheckResult = _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + _limitsList, + _checkParams + ); + uint256 preCLValidatorsBalanceAfterWithdrawals = _checkParams.preCLValidatorsBalance - _clWithdrawals; + if (_checkParams.postCLValidatorsBalance > preCLValidatorsBalanceAfterWithdrawals) { + uint256 validatorsBalanceIncrease = + _checkParams.postCLValidatorsBalance - preCLValidatorsBalanceAfterWithdrawals; + if (validatorsBalanceIncrease > activationCheckResult.activatedBalanceWithGap) { + revert IncorrectTotalCLBalanceIncrease( + activationCheckResult.activatedBalanceWithGap, + validatorsBalanceIncrease + ); + } + } + } + + function _checkModuleValidatorsBalanceIncrease( + IStakingRouter _stakingRouter, + AccountingCoreLimitsPacked memory _limitsList, + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule, + CLBalanceChangeCheckParams memory _checkParams + ) internal view { + ActivationBalanceCheckResult memory activationCheckResult = _checkCLPendingBalanceAndCalculateActivatedBalanceWithGap( + _limitsList, + _checkParams + ); + + if (_checkParams.postCLValidatorsBalance > _checkParams.preCLValidatorsBalance) { + uint256 validatorsBalanceIncrease = + _checkParams.postCLValidatorsBalance - _checkParams.preCLValidatorsBalance; + if (validatorsBalanceIncrease > activationCheckResult.activatedBalanceWithGap) { + revert IncorrectTotalCLBalanceIncrease( + activationCheckResult.activatedBalanceWithGap, + validatorsBalanceIncrease + ); + } + } + + uint256 totalActivatedInClByModules = _calculateTotalActivatedInClByModules( + _stakingRouter, + _stakingModuleIdsWithUpdatedBalance, + _validatorBalancesWeiByStakingModule + ); + + uint256 consolidationLimitPerPeriodWei = _calculateAmountForPeriod( + uint256(_limitsList.consolidationEthAmountPerDayLimit) * 1 ether, + activationCheckResult.effectiveTimeElapsed + ); + uint256 totalActivatedInClByModulesLimit = + activationCheckResult.activatedBalanceWithGap + consolidationLimitPerPeriodWei; + if (totalActivatedInClByModules > totalActivatedInClByModulesLimit) { + revert IncorrectTotalModuleValidatorsBalanceIncrease( + totalActivatedInClByModulesLimit, + totalActivatedInClByModules + ); + } + } + + function _calculateTotalActivatedInClByModules( + IStakingRouter _stakingRouter, + uint256[] calldata _stakingModuleIdsWithUpdatedBalance, + uint256[] calldata _validatorBalancesWeiByStakingModule + ) internal view returns (uint256 totalActivatedInClByModules) { + uint256 modulesCount = _stakingModuleIdsWithUpdatedBalance.length; + for (uint256 i = 0; i < modulesCount;) { + (bool hasPreviousAccounting, uint64 previousModuleValidatorsBalanceGwei,) = + _getModuleAccountingState(_stakingRouter, _stakingModuleIdsWithUpdatedBalance[i]); + uint256 previousModuleValidatorsBalanceWei = uint256(previousModuleValidatorsBalanceGwei) * 1 gwei; + // Skip module-delta aggregation until the module has previous accounting baseline. + if (hasPreviousAccounting && _validatorBalancesWeiByStakingModule[i] > previousModuleValidatorsBalanceWei) { + totalActivatedInClByModules += + _validatorBalancesWeiByStakingModule[i] - previousModuleValidatorsBalanceWei; + } + + unchecked { + ++i; + } + } + } + + /// @notice Returns stored module accounting state and whether it can be used as previous baseline in sanity checks. + /// @dev All modules existing at release activation get their initial accounting baseline via StakingRouter migration. + /// @dev Modules added after the release have no previous baseline in the first report, so module-delta + /// aggregation is skipped for them until `reportValidatorBalancesByStakingModule(...)` seeds their accounting state. + /// @param _stakingRouter StakingRouter contract used as the source of module accounting state. + /// @param _moduleId Staking module id. + /// @return hasPreviousAccounting True if previous accounting baseline is available for sanity checks. + /// @return previousValidatorsBalanceGwei Previous module validators balance in gwei. + /// @return exitedValidatorsCount Previous module exited validators count. + function _getModuleAccountingState( + IStakingRouter _stakingRouter, + uint256 _moduleId + ) + internal + view + returns ( + bool hasPreviousAccounting, + uint64 previousValidatorsBalanceGwei, + uint64 exitedValidatorsCount + ) + { + (previousValidatorsBalanceGwei, exitedValidatorsCount) = + _stakingRouter.getStakingModuleStateAccounting(_moduleId); + hasPreviousAccounting = + previousValidatorsBalanceGwei != 0 || + exitedValidatorsCount != 0; + } + function _checkWithdrawalVaultBalance( uint256 _actualWithdrawalVaultBalance, uint256 _reportedWithdrawalVaultBalance @@ -636,97 +1089,186 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - function _addReportData(uint256 _timestamp, uint256 _exitedValidatorsCount, uint256 _negativeCLRebase) internal { - reportData.push(ReportData( - SafeCast.toUint64(_timestamp), - SafeCast.toUint64(_exitedValidatorsCount), - SafeCast.toUint128(_negativeCLRebase) - )); + function _addReportData( + uint256 _timestamp, + uint256 _clBalance, + uint256 _deposits, + uint256 _clWithdrawals + ) internal { + reportData.push( + ReportData({ + timestamp: SafeCast.toUint64(_timestamp), + clBalance: SafeCast.toUint128(_clBalance), + deposits: SafeCast.toUint128(_deposits), + clWithdrawals: SafeCast.toUint128(_clWithdrawals) + }) + ); } - function _sumNegativeRebasesNotOlderThan(uint256 _timestamp) internal view returns (uint256) { - uint256 sum; - for (int256 index = int256(reportData.length) - 1; index >= 0; index--) { - if (reportData[uint256(index)].timestamp > SafeCast.toUint64(_timestamp)) { - sum += reportData[uint256(index)].negativeCLRebaseWei; - } else { - break; + function _checkCLBalanceDecrease( + CLBalanceDecreaseCheckParams memory _checkParams, + uint256 _clWithdrawals + ) internal { + // Compute actual CL withdrawals for this period: + // clWithdrawals = current vault balance - vault balance after last report's transfer + uint256 reportTimestamp = _lastReportTimestamp + _checkParams.timeElapsed; + _addReportData(reportTimestamp, _checkParams.postCLBalance, _checkParams.deposits, _clWithdrawals); + _lastReportTimestamp = reportTimestamp; + + // If the CL balance didn't decrease accounting for withdrawals, skip the window check + if (_checkParams.preCLBalance <= _checkParams.postCLBalance) return; + if (_checkParams.preCLBalance - _checkParams.postCLBalance <= _clWithdrawals) return; + + uint256 len = reportData.length; + // Need at least two snapshots to build a window: baseline B[X-k] and current point B[X]. + // With migration we seed them upfront (baseline + bootstrap flow chunk), so checks work immediately. + // Without migration this still works, but the very first report cannot be checked and pre-deploy + // state is not part of the window until enough post-deploy snapshots are accumulated. + if (len < 2) return; + + (uint256 actualCLBalanceDiff, uint256 maxAllowedCLBalanceDiff) = _calcWindowDiff( + _checkParams.maxCLBalanceDecreaseBP, + _checkParams.postCLBalance, + len + ); + + if (actualCLBalanceDiff == 0) return; + uint256 refSlot = IBaseOracle(LIDO_LOCATOR.accountingOracle()).getLastProcessingRefSlot(); + + if (actualCLBalanceDiff > maxAllowedCLBalanceDiff) { + if (address(secondOpinionOracle) == address(0)) { + revert IncorrectCLBalanceDecrease(actualCLBalanceDiff, maxAllowedCLBalanceDiff); } + _askSecondOpinion( + refSlot, + _checkParams.postCLBalance, + _checkParams.withdrawalVaultBalance, + _checkParams.clBalanceOraclesErrorUpperBPLimit + ); + return; } - return sum; + + emit NegativeCLRebaseAccepted( + refSlot, + _checkParams.postCLBalance, + actualCLBalanceDiff, + maxAllowedCLBalanceDiff + ); } - function _exitedValidatorsAtTimestamp(uint256 _timestamp) internal view returns (uint256) { - for (int256 index = int256(reportData.length) - 1; index >= 0; index--) { - if (reportData[uint256(index)].timestamp <= SafeCast.toUint64(_timestamp)) { - return reportData[uint256(index)].totalExitedValidators; - } + function _getCLWithdrawals(uint256 _withdrawalVaultBalance) internal view returns (uint256) { + if (_withdrawalVaultBalance < _lastVaultBalanceAfterTransfer) { + revert IncorrectCLWithdrawalsVaultBalance(_withdrawalVaultBalance, _lastVaultBalanceAfterTransfer); } - return 0; + return _withdrawalVaultBalance - _lastVaultBalanceAfterTransfer; } - function _checkCLBalanceDecrease( - LimitsList memory _limitsList, - uint256 _preCLBalance, - uint256 _postCLBalance, + function _checkWithdrawalsVaultTransfer( uint256 _withdrawalVaultBalance, - uint256 _postCLValidators, - uint256 _refSlot - ) internal { - uint256 reportTimestamp = GENESIS_TIME + _refSlot * SECONDS_PER_SLOT; + uint256 _withdrawalsVaultTransfer + ) internal pure { + // In the current Accounting flow `withdrawalsVaultTransfer` comes from `smoothenTokenRebase()`, + // where it is capped by `_withdrawalVaultBalance`, so the subtraction below cannot underflow. + // Keep this explicit guard anyway because `checkAccountingOracleReport` still receives it as an external input. + if (_withdrawalsVaultTransfer > _withdrawalVaultBalance) { + revert IncorrectWithdrawalsVaultTransfer(_withdrawalVaultBalance, _withdrawalsVaultTransfer); + } + } - // Checking exitedValidators against StakingRouter - StakingRouter stakingRouter = StakingRouter(payable(LIDO_LOCATOR.stakingRouter())); - uint256[] memory ids = stakingRouter.getStakingModuleIds(); + /// @notice Finalizes sanity-check state after a successful accounting report. + /// @dev Stores the withdrawals vault balance after the current report transfer so the next report can derive + /// actual CL withdrawals as `current vault balance - last vault balance after transfer`. + /// @dev Marks the post-migration first report as completed so subsequent reports stop skipping + /// `_checkModuleValidatorsBalanceIncrease(...)`; this is needed because StakingRouter migration can seed + /// per-module validators balances above the first oracle-reported values. + /// @param _withdrawalVaultBalance Withdrawal vault balance reported for the current report, before transfer. + /// @param _withdrawalsVaultTransfer ETH amount transferred from the withdrawal vault during the current report. + function _finalizePostReportState( + uint256 _withdrawalVaultBalance, + uint256 _withdrawalsVaultTransfer + ) internal { + _lastVaultBalanceAfterTransfer = _withdrawalVaultBalance - _withdrawalsVaultTransfer; + _isPostMigrationFirstReportDone = true; + } - uint256 stakingRouterExitedValidators; - for (uint256 i = 0; i < ids.length; i++) { - StakingRouter.StakingModule memory module = stakingRouter.getStakingModule(ids[i]); - stakingRouterExitedValidators += module.exitedValidatorsCount; + function _calcWindowDiff( + uint256 _maxDecreaseBP, + uint256 _postCLBalance, + uint256 _reportCount + ) internal view returns (uint256 actualCLBalanceDiff, uint256 maxAllowedCLBalanceDiff) { + // Window formula: + // adjustedBase = B[baseline] + sum(deposits) - sum(clWithdrawals) + // actualDiff = abs(B[baseline] - B[current]) + // maxAllowed = adjustedBase * limitBP / 10_000 + uint256 lastIndex = _reportCount - 1; + uint256 lastTimestamp = reportData[lastIndex].timestamp; + uint256 windowStart = lastTimestamp > CL_BALANCE_WINDOW ? lastTimestamp - CL_BALANCE_WINDOW : 0; + uint256 baselineIndex = _findWindowStartIndex(lastIndex, windowStart); + + uint256 baselineBalance = reportData[baselineIndex].clBalance; + actualCLBalanceDiff = baselineBalance > _postCLBalance + ? baselineBalance - _postCLBalance + : _postCLBalance - baselineBalance; + + uint256 totalDeposits; + uint256 totalCLWithdrawals; + for (uint256 i = baselineIndex + 1; i <= lastIndex; ++i) { + totalDeposits += reportData[i].deposits; + totalCLWithdrawals += reportData[i].clWithdrawals; } - if (_preCLBalance <= _postCLBalance + _withdrawalVaultBalance) { - _addReportData(reportTimestamp, stakingRouterExitedValidators, 0); - // If the CL balance is not decreased, we don't need to check anything here - return; + uint256 adjustedBase = baselineBalance + totalDeposits; + if (adjustedBase < totalCLWithdrawals) { + revert IncorrectCLBalanceDecreaseWindowData(baselineBalance, totalDeposits, totalCLWithdrawals); } - _addReportData(reportTimestamp, stakingRouterExitedValidators, _preCLBalance - (_postCLBalance + _withdrawalVaultBalance)); + adjustedBase -= totalCLWithdrawals; - // NOTE. Values of 18 and 54 days are taken from spec. Check the details here - // https://github.com/lidofinance/lido-improvement-proposals/blob/develop/LIPS/lip-23.md - uint256 negativeCLRebaseSum = _sumNegativeRebasesNotOlderThan(reportTimestamp - 18 days); - uint256 maxAllowedCLRebaseNegativeSum = - _limitsList.initialSlashingAmountPWei * ONE_PWEI * (_postCLValidators - _exitedValidatorsAtTimestamp(reportTimestamp - 18 days)) + - _limitsList.inactivityPenaltiesAmountPWei * ONE_PWEI * (_postCLValidators - _exitedValidatorsAtTimestamp(reportTimestamp - 54 days)); - - if (negativeCLRebaseSum <= maxAllowedCLRebaseNegativeSum) { - // If the rebase diff is less or equal max allowed sum, we accept the report - emit NegativeCLRebaseAccepted(_refSlot, _postCLBalance + _withdrawalVaultBalance, negativeCLRebaseSum, maxAllowedCLRebaseNegativeSum); - return; - } + maxAllowedCLBalanceDiff = (adjustedBase * _maxDecreaseBP) / MAX_BASIS_POINTS; + } - // If there is no negative rebase oracle, then we don't need to check it's report - if (address(secondOpinionOracle) == address(0)) { - // If there is no oracle and the diff is more than limit, we revert - revert IncorrectCLBalanceDecrease(negativeCLRebaseSum, maxAllowedCLRebaseNegativeSum); + function _findWindowStartIndex( + uint256 _lastIndex, + uint256 _windowStart + ) internal view returns (uint256 windowStartIndex) { + windowStartIndex = _lastIndex; + while (windowStartIndex > 0 && reportData[windowStartIndex - 1].timestamp >= _windowStart) { + --windowStartIndex; } - _askSecondOpinion(_refSlot, _postCLBalance, _withdrawalVaultBalance, _limitsList); } - function _askSecondOpinion(uint256 _refSlot, uint256 _postCLBalance, uint256 _withdrawalVaultBalance, LimitsList memory _limitsList) internal { - (bool success, uint256 clOracleBalanceGwei, uint256 oracleWithdrawalVaultBalanceWei,,) = secondOpinionOracle.getReport(_refSlot); + function _askSecondOpinion( + uint256 _refSlot, + uint256 _postCLBalance, + uint256 _withdrawalVaultBalance, + uint256 _clBalanceOraclesErrorUpperBPLimit + ) internal { + (bool success, uint256 clOracleBalanceGwei, uint256 oracleWithdrawalVaultBalanceWei, , ) = secondOpinionOracle + .getReport(_refSlot); if (success) { uint256 clBalanceWei = clOracleBalanceGwei * 1 gwei; if (clBalanceWei < _postCLBalance) { - revert NegativeRebaseFailedCLBalanceMismatch(_postCLBalance, clBalanceWei, _limitsList.clBalanceOraclesErrorUpperBPLimit); + revert NegativeRebaseFailedCLBalanceMismatch( + _postCLBalance, + clBalanceWei, + _clBalanceOraclesErrorUpperBPLimit + ); } - if (MAX_BASIS_POINTS * (clBalanceWei - _postCLBalance) > - _limitsList.clBalanceOraclesErrorUpperBPLimit * clBalanceWei) { - revert NegativeRebaseFailedCLBalanceMismatch(_postCLBalance, clBalanceWei, _limitsList.clBalanceOraclesErrorUpperBPLimit); + if ( + MAX_BASIS_POINTS * (clBalanceWei - _postCLBalance) > + _clBalanceOraclesErrorUpperBPLimit * clBalanceWei + ) { + revert NegativeRebaseFailedCLBalanceMismatch( + _postCLBalance, + clBalanceWei, + _clBalanceOraclesErrorUpperBPLimit + ); } if (oracleWithdrawalVaultBalanceWei != _withdrawalVaultBalance) { - revert NegativeRebaseFailedWithdrawalVaultBalanceMismatch(_withdrawalVaultBalance, oracleWithdrawalVaultBalanceWei); + revert NegativeRebaseFailedWithdrawalVaultBalanceMismatch( + _withdrawalVaultBalance, + oracleWithdrawalVaultBalanceWei + ); } emit NegativeCLRebaseConfirmed(_refSlot, _postCLBalance, _withdrawalVaultBalance); } else { @@ -735,7 +1277,7 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } function _checkAnnualBalancesIncrease( - LimitsList memory _limitsList, + AccountingCoreLimitsPacked memory _limitsList, uint256 _preCLBalance, uint256 _postCLBalance, uint256 _timeElapsed @@ -748,13 +1290,10 @@ contract OracleReportSanityChecker is AccessControlEnumerable { if (_preCLBalance >= _postCLBalance) return; - if (_timeElapsed == 0) { - _timeElapsed = DEFAULT_TIME_ELAPSED; - } + _timeElapsed = _getTimeElapsedForAllowanceChecks(_timeElapsed); uint256 balanceIncrease = _postCLBalance - _preCLBalance; - uint256 annualBalanceIncrease = ((365 days * MAX_BASIS_POINTS * balanceIncrease) / - _preCLBalance) / + uint256 annualBalanceIncrease = (ANNUAL_BALANCE_INCREASE_DENOMINATOR * balanceIncrease) / _preCLBalance / _timeElapsed; if (annualBalanceIncrease > _limitsList.annualBalanceIncreaseBPLimit) { @@ -762,22 +1301,8 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } } - function _checkAppearedValidatorsChurnLimit( - LimitsList memory _limitsList, - uint256 _appearedValidators, - uint256 _timeElapsed - ) internal pure { - if (_timeElapsed == 0) { - _timeElapsed = DEFAULT_TIME_ELAPSED; - } - - uint256 appearedLimit = (_limitsList.appearedValidatorsPerDayLimit * _timeElapsed) / SECONDS_PER_DAY; - - if (_appearedValidators > appearedLimit) revert IncorrectAppearedValidators(_appearedValidators); - } - function _checkLastFinalizableId( - LimitsList memory _limitsList, + OperationalLimitsPacked memory _limitsList, address _withdrawalQueue, uint256 _lastFinalizableId, uint256 _reportTimestamp @@ -787,12 +1312,12 @@ contract OracleReportSanityChecker is AccessControlEnumerable { IWithdrawalQueue.WithdrawalRequestStatus[] memory statuses = IWithdrawalQueue(_withdrawalQueue) .getWithdrawalStatus(requestIds); - if (_reportTimestamp < statuses[0].timestamp + _limitsList.requestTimestampMargin) + if (_reportTimestamp < statuses[0].timestamp + uint256(_limitsList.requestTimestampMargin)) revert IncorrectRequestFinalization(statuses[0].timestamp); } function _checkSimulatedShareRate( - LimitsList memory _limitsList, + AccountingCoreLimitsPacked memory _limitsList, uint256 _noWithdrawalsPostInternalEther, uint256 _noWithdrawalsPostInternalShares, uint256 _simulatedShareRate @@ -837,92 +1362,135 @@ contract OracleReportSanityChecker is AccessControlEnumerable { } function _updateLimits(LimitsList memory _newLimitsList) internal { - LimitsList memory _oldLimitsList = _limits.unpack(); - if (_oldLimitsList.exitedValidatorsPerDayLimit != _newLimitsList.exitedValidatorsPerDayLimit) { - _checkLimitValue(_newLimitsList.exitedValidatorsPerDayLimit, 0, type(uint16).max); - emit ExitedValidatorsPerDayLimitSet(_newLimitsList.exitedValidatorsPerDayLimit); - } - if (_oldLimitsList.appearedValidatorsPerDayLimit != _newLimitsList.appearedValidatorsPerDayLimit) { - _checkLimitValue(_newLimitsList.appearedValidatorsPerDayLimit, 0, type(uint16).max); - emit AppearedValidatorsPerDayLimitSet(_newLimitsList.appearedValidatorsPerDayLimit); - } - if (_oldLimitsList.annualBalanceIncreaseBPLimit != _newLimitsList.annualBalanceIncreaseBPLimit) { - _checkLimitValue(_newLimitsList.annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); - emit AnnualBalanceIncreaseBPLimitSet(_newLimitsList.annualBalanceIncreaseBPLimit); + _validateLimitsList(_newLimitsList); + _updateAccountingCoreLimits(_newLimitsList.packAccountingCore()); + _updateOperationalLimits(_newLimitsList.packOperational()); + } + + function _checkLimitValue(uint256 _value, uint256 _minAllowedValue, uint256 _maxAllowedValue) internal pure { + if (_value > _maxAllowedValue || _value < _minAllowedValue) { + revert IncorrectLimitValue(_value, _minAllowedValue, _maxAllowedValue); } - if (_oldLimitsList.simulatedShareRateDeviationBPLimit != _newLimitsList.simulatedShareRateDeviationBPLimit) { - _checkLimitValue(_newLimitsList.simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); - emit SimulatedShareRateDeviationBPLimitSet(_newLimitsList.simulatedShareRateDeviationBPLimit); + } + + function _validateLimitsList(LimitsList memory _limitsList) internal pure { + _checkLimitValue(_limitsList.exitedEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.appearedEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.consolidationEthAmountPerDayLimit, 0, type(uint32).max); + _checkLimitValue(_limitsList.exitedValidatorEthAmountLimit, 1, type(uint16).max); + _checkLimitValue(_limitsList.annualBalanceIncreaseBPLimit, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.simulatedShareRateDeviationBPLimit, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.maxBalanceExitRequestedPerReportInEth, 0, type(uint16).max); + _checkLimitValue(_limitsList.maxEffectiveBalanceWeightWCType01, 1, type(uint16).max); + _checkLimitValue(_limitsList.maxEffectiveBalanceWeightWCType02, 1, type(uint16).max); + _checkLimitValue(_limitsList.maxItemsPerExtraDataTransaction, 0, type(uint16).max); + _checkLimitValue(_limitsList.maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); + _checkLimitValue(_limitsList.requestTimestampMargin, 0, type(uint32).max); + _checkLimitValue(_limitsList.maxPositiveTokenRebase, 1, type(uint64).max); + _checkLimitValue(_limitsList.maxCLBalanceDecreaseBP, 0, MAX_BASIS_POINTS); + _checkLimitValue(_limitsList.clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); + } + + function _updateAccountingCoreLimits(AccountingCoreLimitsPacked memory _newLimits) internal { + AccountingCoreLimitsPacked memory _oldLimits = _accountingCoreLimits; + + if (_oldLimits.exitedEthAmountPerDayLimit != _newLimits.exitedEthAmountPerDayLimit) { + emit ExitedEthAmountPerDayLimitSet(_newLimits.exitedEthAmountPerDayLimit); } - if (_oldLimitsList.maxValidatorExitRequestsPerReport != _newLimitsList.maxValidatorExitRequestsPerReport) { - _checkLimitValue(_newLimitsList.maxValidatorExitRequestsPerReport, 0, type(uint16).max); - emit MaxValidatorExitRequestsPerReportSet(_newLimitsList.maxValidatorExitRequestsPerReport); + if (_oldLimits.appearedEthAmountPerDayLimit != _newLimits.appearedEthAmountPerDayLimit) { + emit AppearedEthAmountPerDayLimitSet(_newLimits.appearedEthAmountPerDayLimit); } - if (_oldLimitsList.maxItemsPerExtraDataTransaction != _newLimitsList.maxItemsPerExtraDataTransaction) { - _checkLimitValue(_newLimitsList.maxItemsPerExtraDataTransaction, 0, type(uint16).max); - emit MaxItemsPerExtraDataTransactionSet(_newLimitsList.maxItemsPerExtraDataTransaction); + if (_oldLimits.consolidationEthAmountPerDayLimit != _newLimits.consolidationEthAmountPerDayLimit) { + emit ConsolidationEthAmountPerDayLimitSet(_newLimits.consolidationEthAmountPerDayLimit); } - if (_oldLimitsList.maxNodeOperatorsPerExtraDataItem != _newLimitsList.maxNodeOperatorsPerExtraDataItem) { - _checkLimitValue(_newLimitsList.maxNodeOperatorsPerExtraDataItem, 0, type(uint16).max); - emit MaxNodeOperatorsPerExtraDataItemSet(_newLimitsList.maxNodeOperatorsPerExtraDataItem); + if (_oldLimits.exitedValidatorEthAmountLimit != _newLimits.exitedValidatorEthAmountLimit) { + emit ExitedValidatorEthAmountLimitSet(_newLimits.exitedValidatorEthAmountLimit); } - if (_oldLimitsList.requestTimestampMargin != _newLimitsList.requestTimestampMargin) { - _checkLimitValue(_newLimitsList.requestTimestampMargin, 0, type(uint32).max); - emit RequestTimestampMarginSet(_newLimitsList.requestTimestampMargin); + if (_oldLimits.annualBalanceIncreaseBPLimit != _newLimits.annualBalanceIncreaseBPLimit) { + emit AnnualBalanceIncreaseBPLimitSet(_newLimits.annualBalanceIncreaseBPLimit); } - if (_oldLimitsList.maxPositiveTokenRebase != _newLimitsList.maxPositiveTokenRebase) { - _checkLimitValue(_newLimitsList.maxPositiveTokenRebase, 1, type(uint64).max); - emit MaxPositiveTokenRebaseSet(_newLimitsList.maxPositiveTokenRebase); + if (_oldLimits.simulatedShareRateDeviationBPLimit != _newLimits.simulatedShareRateDeviationBPLimit) { + emit SimulatedShareRateDeviationBPLimitSet(_newLimits.simulatedShareRateDeviationBPLimit); } - if (_oldLimitsList.initialSlashingAmountPWei != _newLimitsList.initialSlashingAmountPWei) { - _checkLimitValue(_newLimitsList.initialSlashingAmountPWei, 0, type(uint16).max); - emit InitialSlashingAmountSet(_newLimitsList.initialSlashingAmountPWei); + if (_oldLimits.maxPositiveTokenRebase != _newLimits.maxPositiveTokenRebase) { + emit MaxPositiveTokenRebaseSet(_newLimits.maxPositiveTokenRebase); } - if (_oldLimitsList.inactivityPenaltiesAmountPWei != _newLimitsList.inactivityPenaltiesAmountPWei) { - _checkLimitValue(_newLimitsList.inactivityPenaltiesAmountPWei, 0, type(uint16).max); - emit InactivityPenaltiesAmountSet(_newLimitsList.inactivityPenaltiesAmountPWei); + if (_oldLimits.maxCLBalanceDecreaseBP != _newLimits.maxCLBalanceDecreaseBP) { + emit MaxCLBalanceDecreaseBPSet(_newLimits.maxCLBalanceDecreaseBP); } - if (_oldLimitsList.clBalanceOraclesErrorUpperBPLimit != _newLimitsList.clBalanceOraclesErrorUpperBPLimit) { - _checkLimitValue(_newLimitsList.clBalanceOraclesErrorUpperBPLimit, 0, MAX_BASIS_POINTS); - emit CLBalanceOraclesErrorUpperBPLimitSet(_newLimitsList.clBalanceOraclesErrorUpperBPLimit); + if (_oldLimits.clBalanceOraclesErrorUpperBPLimit != _newLimits.clBalanceOraclesErrorUpperBPLimit) { + emit CLBalanceOraclesErrorUpperBPLimitSet(_newLimits.clBalanceOraclesErrorUpperBPLimit); } - _limits = _newLimitsList.pack(); + + _accountingCoreLimits = _newLimits; } - function _checkLimitValue(uint256 _value, uint256 _minAllowedValue, uint256 _maxAllowedValue) internal pure { - if (_value > _maxAllowedValue || _value < _minAllowedValue) { - revert IncorrectLimitValue(_value, _minAllowedValue, _maxAllowedValue); + function _updateOperationalLimits(OperationalLimitsPacked memory _newLimits) internal { + OperationalLimitsPacked memory _oldLimits = _operationalLimits; + + if (_oldLimits.maxBalanceExitRequestedPerReportInEth != _newLimits.maxBalanceExitRequestedPerReportInEth) { + emit MaxBalanceExitRequestedPerReportInEthSet(_newLimits.maxBalanceExitRequestedPerReportInEth); + } + if (_oldLimits.maxEffectiveBalanceWeightWCType01 != _newLimits.maxEffectiveBalanceWeightWCType01) { + emit MaxEffectiveBalanceWeightWCType01Set(_newLimits.maxEffectiveBalanceWeightWCType01); + } + if (_oldLimits.maxEffectiveBalanceWeightWCType02 != _newLimits.maxEffectiveBalanceWeightWCType02) { + emit MaxEffectiveBalanceWeightWCType02Set(_newLimits.maxEffectiveBalanceWeightWCType02); + } + if (_oldLimits.maxItemsPerExtraDataTransaction != _newLimits.maxItemsPerExtraDataTransaction) { + emit MaxItemsPerExtraDataTransactionSet(_newLimits.maxItemsPerExtraDataTransaction); + } + if (_oldLimits.maxNodeOperatorsPerExtraDataItem != _newLimits.maxNodeOperatorsPerExtraDataItem) { + emit MaxNodeOperatorsPerExtraDataItemSet(_newLimits.maxNodeOperatorsPerExtraDataItem); } + if (_oldLimits.requestTimestampMargin != _newLimits.requestTimestampMargin) { + emit RequestTimestampMarginSet(_newLimits.requestTimestampMargin); + } + + _operationalLimits = _newLimits; } - event ExitedValidatorsPerDayLimitSet(uint256 exitedValidatorsPerDayLimit); - event AppearedValidatorsPerDayLimitSet(uint256 appearedValidatorsPerDayLimit); + event ExitedEthAmountPerDayLimitSet(uint256 exitedEthAmountPerDayLimit); + event AppearedEthAmountPerDayLimitSet(uint256 appearedEthAmountPerDayLimit); + event ConsolidationEthAmountPerDayLimitSet(uint256 consolidationEthAmountPerDayLimit); + event ExitedValidatorEthAmountLimitSet(uint256 exitedValidatorEthAmountLimit); event SecondOpinionOracleChanged(ISecondOpinionOracle indexed secondOpinionOracle); event AnnualBalanceIncreaseBPLimitSet(uint256 annualBalanceIncreaseBPLimit); event SimulatedShareRateDeviationBPLimitSet(uint256 simulatedShareRateDeviationBPLimit); event MaxPositiveTokenRebaseSet(uint256 maxPositiveTokenRebase); - event MaxValidatorExitRequestsPerReportSet(uint256 maxValidatorExitRequestsPerReport); + event MaxBalanceExitRequestedPerReportInEthSet(uint256 maxBalanceExitRequestedPerReportInEth); + event MaxEffectiveBalanceWeightWCType01Set(uint256 maxEffectiveBalanceWeightWCType01); + event MaxEffectiveBalanceWeightWCType02Set(uint256 maxEffectiveBalanceWeightWCType02); event MaxItemsPerExtraDataTransactionSet(uint256 maxItemsPerExtraDataTransaction); event MaxNodeOperatorsPerExtraDataItemSet(uint256 maxNodeOperatorsPerExtraDataItem); event RequestTimestampMarginSet(uint256 requestTimestampMargin); - event InitialSlashingAmountSet(uint256 initialSlashingAmountPWei); - event InactivityPenaltiesAmountSet(uint256 inactivityPenaltiesAmountPWei); + event MaxCLBalanceDecreaseBPSet(uint256 maxCLBalanceDecreaseBP); event CLBalanceOraclesErrorUpperBPLimitSet(uint256 clBalanceOraclesErrorUpperBPLimit); event NegativeCLRebaseConfirmed(uint256 refSlot, uint256 clBalanceWei, uint256 withdrawalVaultBalance); - event NegativeCLRebaseAccepted(uint256 refSlot, uint256 clTotalBalance, uint256 clBalanceDecrease, uint256 maxAllowedCLRebaseNegativeSum); + event NegativeCLRebaseAccepted( + uint256 refSlot, + uint256 clTotalBalance, + uint256 clBalanceDecrease, + uint256 maxAllowedDecrease + ); error IncorrectLimitValue(uint256 value, uint256 minAllowedValue, uint256 maxAllowedValue); error IncorrectWithdrawalsVaultBalance(uint256 actualWithdrawalVaultBalance); error IncorrectELRewardsVaultBalance(uint256 actualELRewardsVaultBalance); error IncorrectSharesRequestedToBurn(uint256 actualSharesToBurn); error IncorrectCLBalanceIncrease(uint256 annualBalanceDiff); - error IncorrectAppearedValidators(uint256 appearedValidatorsLimit); - error IncorrectNumberOfExitRequestsPerReport(uint256 maxRequestsCount); - error IncorrectExitedValidators(uint256 exitedValidatorsLimit); + error InvalidClBalancesData(); + error InconsistentValidatorsBalanceByModule(uint256 expected, uint256 actual); + error IncorrectTotalPendingBalance(uint256 maxAllowed, uint256 actual); + error IncorrectTotalActivatedBalance(uint256 maxAllowed, uint256 actual); + error IncorrectTotalCLBalanceIncrease(uint256 maxAllowed, uint256 actual); + error IncorrectTotalModuleValidatorsBalanceIncrease(uint256 maxAllowed, uint256 actual); + error AppearedEthAmountPerDayLimitExceeded(uint256 limitPerDay, uint256 appearedPerDay); + error IncorrectSumOfExitBalancePerReport(uint256 maxBalanceSum); error IncorrectRequestFinalization(uint256 requestCreationBlock); error IncorrectSimulatedShareRate(uint256 simulatedShareRate, uint256 actualShareRate); error TooManyItemsPerExtraDataTransaction(uint256 maxItemsCount, uint256 receivedItemsCount); - error ExitedValidatorsLimitExceeded(uint256 limitPerDay, uint256 exitedPerDay); + error ExitedEthAmountPerDayLimitExceeded(uint256 limitPerDay, uint256 exitedPerDay); error TooManyNodeOpsPerExtraDataItem(uint256 itemIndex, uint256 nodeOpsCount); error AdminCannotBeZero(); @@ -931,27 +1499,51 @@ contract OracleReportSanityChecker is AccessControlEnumerable { error NegativeRebaseFailedWithdrawalVaultBalanceMismatch(uint256 reportedValue, uint256 provedValue); error NegativeRebaseFailedSecondOpinionReportIsNotReady(); error CalledNotFromAccounting(); + error IncorrectCLWithdrawalsVaultBalance( + uint256 withdrawalVaultBalance, + uint256 lastWithdrawalVaultBalanceAfterTransfer + ); + error IncorrectWithdrawalsVaultTransfer(uint256 withdrawalVaultBalance, uint256 withdrawalsVaultTransfer); + error IncorrectCLBalanceDecreaseWindowData( + uint256 baselineBalance, + uint256 totalDeposits, + uint256 totalCLWithdrawals + ); + error MigrationAlreadyDone(); + error UnexpectedLidoVersion(uint256 actual, uint256 expected); + + event BaselineSnapshotMigrated(uint256 clBalance, uint256 deposits, uint256 clWithdrawals); } library LimitsListPacker { error BasisPointsOverflow(uint256 value, uint256 maxValue); - function pack(LimitsList memory _limitsList) internal pure returns (LimitsListPacked memory res) { - res.exitedValidatorsPerDayLimit = SafeCast.toUint16(_limitsList.exitedValidatorsPerDayLimit); - res.appearedValidatorsPerDayLimit = SafeCast.toUint16(_limitsList.appearedValidatorsPerDayLimit); - res.annualBalanceIncreaseBPLimit = _toBasisPoints(_limitsList.annualBalanceIncreaseBPLimit); - res.simulatedShareRateDeviationBPLimit = _toBasisPoints(_limitsList.simulatedShareRateDeviationBPLimit); - res.requestTimestampMargin = SafeCast.toUint32(_limitsList.requestTimestampMargin); + function packAccountingCore( + LimitsList memory _limitsList + ) internal pure returns (AccountingCoreLimitsPacked memory res) { + res.exitedEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.exitedEthAmountPerDayLimit); + res.appearedEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.appearedEthAmountPerDayLimit); + res.consolidationEthAmountPerDayLimit = SafeCast.toUint32(_limitsList.consolidationEthAmountPerDayLimit); + res.annualBalanceIncreaseBPLimit = toBasisPoints(_limitsList.annualBalanceIncreaseBPLimit); + res.simulatedShareRateDeviationBPLimit = toBasisPoints(_limitsList.simulatedShareRateDeviationBPLimit); res.maxPositiveTokenRebase = SafeCast.toUint64(_limitsList.maxPositiveTokenRebase); - res.maxValidatorExitRequestsPerReport = SafeCast.toUint16(_limitsList.maxValidatorExitRequestsPerReport); + res.maxCLBalanceDecreaseBP = toBasisPoints(_limitsList.maxCLBalanceDecreaseBP); + res.clBalanceOraclesErrorUpperBPLimit = toBasisPoints(_limitsList.clBalanceOraclesErrorUpperBPLimit); + res.exitedValidatorEthAmountLimit = SafeCast.toUint16(_limitsList.exitedValidatorEthAmountLimit); + } + + function packOperational( + LimitsList memory _limitsList + ) internal pure returns (OperationalLimitsPacked memory res) { + res.maxBalanceExitRequestedPerReportInEth = SafeCast.toUint16(_limitsList.maxBalanceExitRequestedPerReportInEth); + res.maxEffectiveBalanceWeightWCType01 = SafeCast.toUint16(_limitsList.maxEffectiveBalanceWeightWCType01); + res.maxEffectiveBalanceWeightWCType02 = SafeCast.toUint16(_limitsList.maxEffectiveBalanceWeightWCType02); res.maxItemsPerExtraDataTransaction = SafeCast.toUint16(_limitsList.maxItemsPerExtraDataTransaction); res.maxNodeOperatorsPerExtraDataItem = SafeCast.toUint16(_limitsList.maxNodeOperatorsPerExtraDataItem); - res.initialSlashingAmountPWei = SafeCast.toUint16(_limitsList.initialSlashingAmountPWei); - res.inactivityPenaltiesAmountPWei = SafeCast.toUint16(_limitsList.inactivityPenaltiesAmountPWei); - res.clBalanceOraclesErrorUpperBPLimit = _toBasisPoints(_limitsList.clBalanceOraclesErrorUpperBPLimit); + res.requestTimestampMargin = SafeCast.toUint32(_limitsList.requestTimestampMargin); } - function _toBasisPoints(uint256 _value) private pure returns (uint16) { + function toBasisPoints(uint256 _value) internal pure returns (uint16) { if (_value > MAX_BASIS_POINTS) { revert BasisPointsOverflow(_value, MAX_BASIS_POINTS); } @@ -960,18 +1552,24 @@ library LimitsListPacker { } library LimitsListUnpacker { - function unpack(LimitsListPacked memory _limitsList) internal pure returns (LimitsList memory res) { - res.exitedValidatorsPerDayLimit = _limitsList.exitedValidatorsPerDayLimit; - res.appearedValidatorsPerDayLimit = _limitsList.appearedValidatorsPerDayLimit; - res.annualBalanceIncreaseBPLimit = _limitsList.annualBalanceIncreaseBPLimit; - res.simulatedShareRateDeviationBPLimit = _limitsList.simulatedShareRateDeviationBPLimit; - res.requestTimestampMargin = _limitsList.requestTimestampMargin; - res.maxPositiveTokenRebase = _limitsList.maxPositiveTokenRebase; - res.maxValidatorExitRequestsPerReport = _limitsList.maxValidatorExitRequestsPerReport; - res.maxItemsPerExtraDataTransaction = _limitsList.maxItemsPerExtraDataTransaction; - res.maxNodeOperatorsPerExtraDataItem = _limitsList.maxNodeOperatorsPerExtraDataItem; - res.initialSlashingAmountPWei = _limitsList.initialSlashingAmountPWei; - res.inactivityPenaltiesAmountPWei = _limitsList.inactivityPenaltiesAmountPWei; - res.clBalanceOraclesErrorUpperBPLimit = _limitsList.clBalanceOraclesErrorUpperBPLimit; + function unpack( + AccountingCoreLimitsPacked memory _accountingLimits, + OperationalLimitsPacked memory _operationalLimitsPacked + ) internal pure returns (LimitsList memory res) { + res.exitedEthAmountPerDayLimit = _accountingLimits.exitedEthAmountPerDayLimit; + res.appearedEthAmountPerDayLimit = _accountingLimits.appearedEthAmountPerDayLimit; + res.annualBalanceIncreaseBPLimit = _accountingLimits.annualBalanceIncreaseBPLimit; + res.simulatedShareRateDeviationBPLimit = _accountingLimits.simulatedShareRateDeviationBPLimit; + res.maxBalanceExitRequestedPerReportInEth = _operationalLimitsPacked.maxBalanceExitRequestedPerReportInEth; + res.maxEffectiveBalanceWeightWCType01 = _operationalLimitsPacked.maxEffectiveBalanceWeightWCType01; + res.maxEffectiveBalanceWeightWCType02 = _operationalLimitsPacked.maxEffectiveBalanceWeightWCType02; + res.maxItemsPerExtraDataTransaction = _operationalLimitsPacked.maxItemsPerExtraDataTransaction; + res.maxNodeOperatorsPerExtraDataItem = _operationalLimitsPacked.maxNodeOperatorsPerExtraDataItem; + res.requestTimestampMargin = _operationalLimitsPacked.requestTimestampMargin; + res.maxPositiveTokenRebase = _accountingLimits.maxPositiveTokenRebase; + res.maxCLBalanceDecreaseBP = _accountingLimits.maxCLBalanceDecreaseBP; + res.clBalanceOraclesErrorUpperBPLimit = _accountingLimits.clBalanceOraclesErrorUpperBPLimit; + res.consolidationEthAmountPerDayLimit = _accountingLimits.consolidationEthAmountPerDayLimit; + res.exitedValidatorEthAmountLimit = _accountingLimits.exitedValidatorEthAmountLimit; } } diff --git a/contracts/common/interfaces/ILido.sol b/contracts/common/interfaces/ILido.sol index 3293823f7b..a285688b3d 100644 --- a/contracts/common/interfaces/ILido.sol +++ b/contracts/common/interfaces/ILido.sol @@ -41,21 +41,30 @@ interface ILido is IERC20, IVersioned { view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance); + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ); + function processClStateUpdate( uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance + uint256 _clValidatorsBalance, + uint256 _clPendingBalance ) external; function collectRewardsAndProcessWithdrawals( uint256 _reportTimestamp, uint256 _reportClBalance, - uint256 _adjustedPreCLBalance, + uint256 _principalCLBalance, uint256 _withdrawalsToWithdraw, uint256 _elRewardsToWithdraw, uint256 _lastWithdrawalRequestToFinalize, - uint256 _simulatedShareRate, + uint256 _withdrawalsShareRate, uint256 _etherToLockOnWithdrawalQueue ) external; diff --git a/contracts/common/interfaces/ILidoLocator.sol b/contracts/common/interfaces/ILidoLocator.sol index 84b7239964..92f7973f5f 100644 --- a/contracts/common/interfaces/ILidoLocator.sol +++ b/contracts/common/interfaces/ILidoLocator.sol @@ -26,6 +26,10 @@ interface ILidoLocator { function vaultFactory() external view returns (address); function lazyOracle() external view returns (address); function operatorGrid() external view returns (address); + function topUpGateway() external view returns (address); + function validatorExitDelayVerifier() external view returns (address); + function triggerableWithdrawalsGateway() external view returns (address); + function consolidationGateway() external view returns (address); /// @notice Returns core Lido protocol component addresses in a single call /// @dev This function provides a gas-efficient way to fetch multiple component addresses in a single call diff --git a/contracts/common/interfaces/IOracleReportSanityChecker.sol b/contracts/common/interfaces/IOracleReportSanityChecker.sol index a32d8d8162..107f52f9dd 100644 --- a/contracts/common/interfaces/IOracleReportSanityChecker.sol +++ b/contracts/common/interfaces/IOracleReportSanityChecker.sol @@ -7,8 +7,8 @@ pragma solidity >=0.4.24; interface IOracleReportSanityChecker { function smoothenTokenRebase( - uint256 _preTotalPooledEther, - uint256 _preTotalShares, + uint256 _preInternalEther, + uint256 _preInternalShares, uint256 _preCLBalance, uint256 _postCLBalance, uint256 _withdrawalVaultBalance, @@ -21,15 +21,28 @@ interface IOracleReportSanityChecker { // function checkAccountingOracleReport( uint256 _timeElapsed, - uint256 _preCLBalance, - uint256 _postCLBalance, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, uint256 _withdrawalVaultBalance, uint256 _elRewardsVaultBalance, uint256 _sharesRequestedToBurn, - uint256 _preCLValidators, - uint256 _postCLValidators + uint256 _deposits, + uint256 _withdrawalsVaultTransfer ) external; + // + function checkCLPendingBalanceIncrease( + uint256 _timeElapsed, + uint256 _preCLValidatorsBalance, + uint256 _preCLPendingBalance, + uint256 _postCLValidatorsBalance, + uint256 _postCLPendingBalance, + uint256 _withdrawalVaultBalance, + uint256 _deposits + ) external view; + // function checkWithdrawalQueueOracleReport( uint256 _lastFinalizableRequestId, @@ -38,10 +51,10 @@ interface IOracleReportSanityChecker { // function checkSimulatedShareRate( - uint256 _postTotalPooledEther, - uint256 _postTotalShares, - uint256 _etherLockedOnWithdrawalQueue, - uint256 _sharesBurntDueToWithdrawals, + uint256 _postInternalEther, + uint256 _postInternalShares, + uint256 _etherToFinalizeWQ, + uint256 _sharesToBurnForWithdrawals, uint256 _simulatedShareRate ) external view; } diff --git a/contracts/common/interfaces/IStakingModule.sol b/contracts/common/interfaces/IStakingModule.sol index 6641aa6a02..b075e23021 100644 --- a/contracts/common/interfaces/IStakingModule.sol +++ b/contracts/common/interfaces/IStakingModule.sol @@ -75,11 +75,10 @@ interface IStakingModule { /// official Deposit Contract. This value is a cumulative counter: even when the validator /// goes into EXITED state this counter is not decreasing /// @return depositableValidatorsCount number of validators in the set available for deposit - function getStakingModuleSummary() external view returns ( - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ); + function getStakingModuleSummary() + external + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount); /// @notice Returns all-validators summary belonging to the node operator with the given id /// @param _nodeOperatorId id of the operator to return report for @@ -96,16 +95,21 @@ interface IStakingModule { /// Deposit Contract. This value is a cumulative counter: even when the validator goes into /// EXITED state this counter is not decreasing /// @return depositableValidatorsCount number of validators in the set available for deposit - function getNodeOperatorSummary(uint256 _nodeOperatorId) external view returns ( - uint256 targetLimitMode, - uint256 targetValidatorsCount, - uint256 stuckValidatorsCount, - uint256 refundedValidatorsCount, - uint256 stuckPenaltyEndTimestamp, - uint256 totalExitedValidators, - uint256 totalDepositedValidators, - uint256 depositableValidatorsCount - ); + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ); /// @notice Returns a counter that MUST change its value whenever the deposit data set changes. /// Below is the typical list of actions that requires an update of the nonce: @@ -134,11 +138,10 @@ interface IStakingModule { /// the returned ids is not defined and might change between calls. /// @dev This view must not revert in case of invalid data passed. When `_offset` exceeds the /// total node operators count or when `_limit` is equal to 0 MUST be returned empty array. - function getNodeOperatorIds(uint256 _offset, uint256 _limit) - external - view - returns (uint256[] memory nodeOperatorIds); - + function getNodeOperatorIds( + uint256 _offset, + uint256 _limit + ) external view returns (uint256[] memory nodeOperatorIds); /// @notice Called by StakingRouter to signal that stETH rewards were minted for this module. /// @param _totalShares Amount of stETH shares that were minted to reward all node operators. @@ -176,10 +179,7 @@ interface IStakingModule { /// 'unsafely' means that this method can both increase and decrease exited and stuck counters /// @param _nodeOperatorId Id of the node operator /// @param _exitedValidatorsCount New number of EXITED validators for the node operator - function unsafeUpdateValidatorsCount( - uint256 _nodeOperatorId, - uint256 _exitedValidatorsCount - ) external; + function unsafeUpdateValidatorsCount(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount) external; /// @notice Obtains deposit data to be used by StakingRouter to deposit to the Ethereum Deposit /// contract @@ -189,9 +189,10 @@ interface IStakingModule { /// IMPORTANT: _depositCalldata MUST NOT modify the deposit data set of the staking module /// @return publicKeys Batch of the concatenated public validators keys /// @return signatures Batch of the concatenated deposit signatures for returned public keys - function obtainDepositData(uint256 _depositsCount, bytes calldata _depositCalldata) - external - returns (bytes memory publicKeys, bytes memory signatures); + function obtainDepositData( + uint256 _depositsCount, + bytes calldata _depositCalldata + ) external returns (bytes memory publicKeys, bytes memory signatures); /// @notice Called by StakingRouter after it finishes updating exited and stuck validators /// counts for this module's node operators. diff --git a/contracts/common/interfaces/IStakingModuleV2.sol b/contracts/common/interfaces/IStakingModuleV2.sol new file mode 100644 index 0000000000..a891c3569a --- /dev/null +++ b/contracts/common/interfaces/IStakingModuleV2.sol @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +interface IStakingModuleV2 { + // Top ups + /// @notice Validates provided keys and calculates deposit allocations for top-up + /// @dev Reverts if any key doesn't belong to the module or data is invalid + /// @param depositAmount Total ether amount available for top-up (must be multiple of 1 gwei) + /// @param pubkeys List of validator public keys to top up + /// @param keyIndices Indices of keys within their respective operators + /// @param operatorIds Node operator IDs that own the keys + /// @param topUpLimits Maximum amount that can be deposited per key based on Consensus Layer data and SR internal logic. + /// @return allocations Amount to deposit to each corresponding key + /// @dev allocations list can contain zero values + /// @dev sum of allocations can be less or equal to maxDepositAmount + /// @dev Values depositAmount, topUpLimits, allocations are denominated in wei + function allocateDeposits( + uint256 depositAmount, + bytes[] calldata pubkeys, + uint256[] calldata keyIndices, + uint256[] calldata operatorIds, + uint256[] calldata topUpLimits + ) external returns (uint256[] memory allocations); + + /// @notice returns the total amount of ETH staked in the module, in wei + function getTotalModuleStake() external view returns (uint256); +} diff --git a/contracts/common/interfaces/ReportValues.sol b/contracts/common/interfaces/ReportValues.sol index db2293a1b8..0a04fafb65 100644 --- a/contracts/common/interfaces/ReportValues.sol +++ b/contracts/common/interfaces/ReportValues.sol @@ -10,10 +10,10 @@ struct ReportValues { uint256 timestamp; /// @notice seconds elapsed since the previous report uint256 timeElapsed; - /// @notice total number of Lido validators on Consensus Layers (exited included) - uint256 clValidators; - /// @notice sum of all Lido validators' balances on Consensus Layer - uint256 clBalance; + /// @notice Validators balance without pending deposits + uint256 clValidatorsBalance; + /// @notice Pending deposits balance on Consensus Layer + uint256 clPendingBalance; /// @notice withdrawal vault balance uint256 withdrawalVaultBalance; /// @notice elRewards vault balance diff --git a/contracts/common/interfaces/TopUpWitness.sol b/contracts/common/interfaces/TopUpWitness.sol new file mode 100644 index 0000000000..ada957af0c --- /dev/null +++ b/contracts/common/interfaces/TopUpWitness.sol @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line +pragma solidity >=0.8.9; + +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/ValidatorWitness.sol"; + +struct TopUpData { + uint256 moduleId; + uint256[] keyIndices; + uint256[] operatorIds; + uint256[] validatorIndices; + BeaconRootData beaconRootData; + ValidatorWitness[] validatorWitness; + uint256[] pendingBalanceGwei; +} diff --git a/contracts/common/interfaces/ValidatorWitness.sol b/contracts/common/interfaces/ValidatorWitness.sol new file mode 100644 index 0000000000..c89728dc75 --- /dev/null +++ b/contracts/common/interfaces/ValidatorWitness.sol @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +// solhint-disable-next-line +pragma solidity >=0.8.9; + +struct BeaconRootData { + uint64 childBlockTimestamp; // for EIP-4788 lookup + uint64 slot; // header slot + uint64 proposerIndex; // header proposer +} + +struct ValidatorWitness { + // Merkle path: Validator[i] → … → state_root → beacon_block_root + bytes32[] proofValidator; + // Full Validator container fields (minus WC) + bytes pubkey; + uint64 effectiveBalance; + uint64 activationEligibilityEpoch; + uint64 activationEpoch; + uint64 exitEpoch; + uint64 withdrawableEpoch; + bool slashed; +} diff --git a/contracts/common/lib/RateLimit.sol b/contracts/common/lib/RateLimit.sol new file mode 100644 index 0000000000..434ec83e9a --- /dev/null +++ b/contracts/common/lib/RateLimit.sol @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +/* solhint-disable one-contract-per-file */ + +/* See contracts/COMPILERS.md */ +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +struct LimitData { + uint32 maxLimit; // Maximum limit + uint32 prevLimit; // Limit left after previous operations + uint32 prevTimestamp; // Timestamp of the last update + uint32 frameDurationInSec; // Seconds that should pass to restore part of the limit + uint32 itemsPerFrame; // Restored items per frame +} +library RateLimitStorage { + struct DataStorage { + LimitData _limitData; + } + + function getStorageLimit(bytes32 _position) internal view returns (LimitData memory) { + return _getDataStorage(_position)._limitData; + } + + function setStorageLimit(bytes32 _position, LimitData memory _data) internal { + _getDataStorage(_position)._limitData = _data; + } + + function _getDataStorage(bytes32 _position) private pure returns (DataStorage storage $) { + assembly { + $.slot := _position + } + } +} +// A replenishing quota per time frame +library RateLimit { + /// @notice Error when new value for remaining limit exceeds maximum limit. + error LimitExceeded(); + + /// @notice Error when max limit exceeds uint32 max. + error TooLargeMaxLimit(); + + /// @notice Error when frame duration exceeds uint32 max. + error TooLargeFrameDuration(); + + /// @notice Error when items per frame exceed the maximum limit. + error TooLargeItemsPerFrame(); + + /// @notice Error when frame duration is zero. + error ZeroFrameDuration(); + + function calculateCurrentLimit( + LimitData memory _data, + uint256 timestamp + ) internal pure returns (uint256 currentLimit) { + uint256 secondsPassed = timestamp - _data.prevTimestamp; + + if (secondsPassed < _data.frameDurationInSec || _data.itemsPerFrame == 0) { + return _data.prevLimit; + } + + uint256 framesPassed = secondsPassed / _data.frameDurationInSec; + uint256 restoredLimit = framesPassed * _data.itemsPerFrame; + + uint256 newLimit = _data.prevLimit + restoredLimit; + if (newLimit > _data.maxLimit) { + newLimit = _data.maxLimit; + } + + return newLimit; + } + + function updatePrevLimit( + LimitData memory _data, + uint256 newLimit, + uint256 timestamp + ) internal pure returns (LimitData memory) { + if (_data.maxLimit < newLimit) revert LimitExceeded(); + + uint256 secondsPassed = timestamp - _data.prevTimestamp; + uint256 framesPassed = secondsPassed / _data.frameDurationInSec; + uint32 passedTime = uint32(framesPassed) * _data.frameDurationInSec; + + _data.prevLimit = uint32(newLimit); + _data.prevTimestamp += passedTime; + + return _data; + } + + function setLimits( + LimitData memory _data, + uint256 maxLimit, + uint256 itemsPerFrame, + uint256 frameDurationInSec, + uint256 timestamp + ) internal pure returns (LimitData memory) { + if (maxLimit > type(uint32).max) revert TooLargeMaxLimit(); + if (frameDurationInSec > type(uint32).max) revert TooLargeFrameDuration(); + if (itemsPerFrame > maxLimit) revert TooLargeItemsPerFrame(); + if (frameDurationInSec == 0) revert ZeroFrameDuration(); + + _data.itemsPerFrame = uint32(itemsPerFrame); + _data.frameDurationInSec = uint32(frameDurationInSec); + + if ( + // new maxLimit is smaller than prev remaining limit + maxLimit < _data.prevLimit || + // previously items were unlimited + _data.maxLimit == 0 + ) { + _data.prevLimit = uint32(maxLimit); + } + + _data.maxLimit = uint32(maxLimit); + _data.prevTimestamp = uint32(timestamp); + + return _data; + } + + function isLimitSet(LimitData memory _data) internal pure returns (bool) { + return _data.maxLimit != 0; + } +} diff --git a/contracts/common/lib/WithdrawalCredentials.sol b/contracts/common/lib/WithdrawalCredentials.sol new file mode 100644 index 0000000000..c016093ca7 --- /dev/null +++ b/contracts/common/lib/WithdrawalCredentials.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-3.0 +// solhint-disable-next-line lido/fixed-compiler-version +pragma solidity >=0.8.9 <0.9.0; + +/** + * @title Withdrawal credentials helpers. + * @author KRogLA + * @notice Provides functionality for managing withdrawal credentials + * @dev WC bytes layout: [0] = prefix (0x00/0x01/0x02), [1..11] = zero, [12..31] = execution address (20b) + */ +library WithdrawalCredentials { + // Withdrawal Credentials types + uint8 public constant WC_TYPE_01 = 0x01; + uint8 public constant WC_TYPE_02 = 0x02; + + /// @notice Get the current prefix (0x00/0x01/0x02) + function getType(bytes32 wc) internal pure returns (uint8) { + return uint8(uint256(wc) >> 248); + } + + /// @notice Extract the execution address from the WC (low 20 bytes) + function getAddr(bytes32 wc) internal pure returns (address) { + return address(uint160(uint256(wc))); + } + + /// @notice Set 1st byte to wcType (0x00/0x01/0x02), keep the rest + function setType(bytes32 wc, uint8 wcType) internal pure returns (bytes32) { + return bytes32((uint256(wc) & type(uint248).max) | (uint256(wcType) << 248)); + } + + function isTypeValid(uint256 wcType) internal pure returns (bool) { + return isType1(wcType) || isType2(wcType); + } + + function isType1(bytes32 wc) internal pure returns (bool) { + return isType1(getType(wc)); + } + + function isType2(bytes32 wc) internal pure returns (bool) { + return isType2(getType(wc)); + } + + function isType1(uint256 wcType) internal pure returns (bool) { + return wcType == WC_TYPE_01; + } + + function isType2(uint256 wcType) internal pure returns (bool) { + return wcType == WC_TYPE_02; + } +} diff --git a/contracts/upgrade/V3Addresses.sol b/contracts/upgrade/V3Addresses.sol index 4021b654fc..0dc4642b6b 100644 --- a/contracts/upgrade/V3Addresses.sol +++ b/contracts/upgrade/V3Addresses.sol @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0 pragma solidity 0.8.25; -import {IAccessControlEnumerable} from "@openzeppelin/contracts-v4.4/access/AccessControlEnumerable.sol"; +import {IAccessControlEnumerable as IAccessControlEnumerableV5} from + "@openzeppelin/contracts-v5.2/access/extensions/IAccessControlEnumerable.sol"; + import {ILidoLocator} from "contracts/common/interfaces/ILidoLocator.sol"; + interface IVaultsAdapter { function evmScriptExecutor() external view returns (address); } -interface IStakingRouter is IAccessControlEnumerable { +interface IStakingRouter is IAccessControlEnumerableV5 { struct StakingModule { uint24 id; address stakingModuleAddress; @@ -23,9 +26,10 @@ interface IStakingRouter is IAccessControlEnumerable { uint16 priorityExitShareThreshold; uint64 maxDepositsPerBlock; uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; } - function getStakingModules() external view returns (StakingModule[] memory res); + function getStakingModules() external view returns (StakingModule[] memory); } interface ICSModule { @@ -38,7 +42,6 @@ interface ICSModule { * This contract centralizes address management for V3Template and V3VoteScript. */ contract V3Addresses { - struct V3AddressesParams { // Old implementations address oldLocatorImpl; @@ -163,9 +166,7 @@ contract V3Addresses { address public immutable ORACLE_DAEMON_CONFIG; address public immutable RESEAL_MANAGER; - constructor( - V3AddressesParams memory params - ) { + constructor(V3AddressesParams memory params) { if (params.newLocatorImpl == params.oldLocatorImpl) { revert NewAndOldLocatorImplementationsMustBeDifferent(); } @@ -177,7 +178,6 @@ contract V3Addresses { // // Set directly from passed parameters // - ILidoLocator newLocatorImpl = ILidoLocator(params.newLocatorImpl); OLD_LOCATOR_IMPL = params.oldLocatorImpl; OLD_ACCOUNTING_ORACLE_IMPL = params.oldAccountingOracleImpl; @@ -214,7 +214,6 @@ contract V3Addresses { // // Discovered via other contracts // - OLD_BURNER = ILidoLocator(params.oldLocatorImpl).burner(); LIDO = newLocatorImpl.lido(); diff --git a/contracts/upgrade/V3Template.sol b/contracts/upgrade/V3Template.sol index f6d549d054..685f0642ea 100644 --- a/contracts/upgrade/V3Template.sol +++ b/contracts/upgrade/V3Template.sol @@ -5,6 +5,7 @@ pragma solidity 0.8.25; import {IAccessControlEnumerable} from "@openzeppelin/contracts-v4.4/access/AccessControlEnumerable.sol"; + import {UpgradeableBeacon} from "@openzeppelin/contracts-v5.2/proxy/beacon/UpgradeableBeacon.sol"; import {IBurner as IBurnerWithoutAccessControl} from "contracts/common/interfaces/IBurner.sol"; diff --git a/contracts/upgrade/V3TemporaryAdmin.sol b/contracts/upgrade/V3TemporaryAdmin.sol index bf2c5ac507..1a85d398c6 100644 --- a/contracts/upgrade/V3TemporaryAdmin.sol +++ b/contracts/upgrade/V3TemporaryAdmin.sol @@ -40,6 +40,7 @@ interface IStakingRouter { uint16 priorityExitShareThreshold; uint64 maxDepositsPerBlock; uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; } function getStakingModules() external view returns (StakingModule[] memory res); diff --git a/foundry.lock b/foundry.lock new file mode 100644 index 0000000000..fa8a276a48 --- /dev/null +++ b/foundry.lock @@ -0,0 +1,5 @@ +{ + "foundry/lib/forge-std": { + "rev": "662ae0d6936654c5d1fb79fc15f521de28edb60e" + } +} \ No newline at end of file diff --git a/foundry.toml b/foundry.toml index f379f00057..bbc8491b3b 100644 --- a/foundry.toml +++ b/foundry.toml @@ -22,6 +22,8 @@ match_path = '**/test/**/*.t.sol' # Enable latest EVM features evm_version = "prague" +optimizer = true +optimizer_runs = 200 # https://book.getfoundry.sh/reference/config/testing#fuzz # fuzz = { runs = 256 } @@ -33,15 +35,25 @@ evm_version = "prague" fmt = { int_types = 'long' } -# add via_ir profile +# profiles required by compilation_restrictions additional_compiler_profiles = [ - { name = "v3", version = "0.8.25", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, - { name = "vaultHub", version = "0.8.25", optimizer = true, optimizer_runs = 100, via_ir = true, evm_version = "cancun" }, + { name = "solc0424", optimizer = true, optimizer_runs = 200, evm_version = "constantinople" }, + { name = "solc06x_089", optimizer = true, optimizer_runs = 200, evm_version = "istanbul" }, + { name = "solc0825", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { name = "v3", optimizer = true, optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { name = "vaultHub", optimizer = true, optimizer_runs = 100, via_ir = true, evm_version = "cancun" }, ] -# enforce compiling 0.8.25 contract with via_ir +# align compiler settings with hardhat.config.ts compilation_restrictions = [ - { paths = "contracts/0.8.25/**", optimizer_runs = 200, via_ir = true }, - { paths = "contracts/0.8.25/vaults/VaultHub.sol", optimizer_runs = 100, via_ir = true }, - { paths = "contracts/upgrade/**", optimizer_runs = 200, via_ir = true }, + { paths = "contracts/0.4.24/**", optimizer_runs = 200, evm_version = "constantinople" }, + { paths = "contracts/0.6.11/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.6.12/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.8.9/**", optimizer_runs = 200, evm_version = "istanbul" }, + { paths = "contracts/0.8.25/**", optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + { paths = "contracts/upgrade/**", optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, + # NB: Foundry cannot safely mirror Hardhat's strict VaultHub runs=100 override + # together with a runs=200 rule on the same import graph, so we keep a merge-safe + # range here for compatibility. + { paths = "contracts/0.8.25/vaults/VaultHub.sol", min_optimizer_runs = 100, max_optimizer_runs = 200, via_ir = true, evm_version = "cancun" }, ] diff --git a/foundry/lib/forge-std b/foundry/lib/forge-std index 8f24d6b04c..ffa2ee0d92 160000 --- a/foundry/lib/forge-std +++ b/foundry/lib/forge-std @@ -1 +1 @@ -Subproject commit 8f24d6b04c92975e0795b5868aa0d783251cdeaa +Subproject commit ffa2ee0d921b4163b7abd0f1122df93ead205805 diff --git a/hardhat.config.ts b/hardhat.config.ts index 8237b57020..b3fa372b7f 100644 --- a/hardhat.config.ts +++ b/hardhat.config.ts @@ -65,6 +65,7 @@ const config: HardhatUserConfig = { "local-devnet": { url: process.env.LOCAL_RPC_URL || RPC_URL, accounts: [process.env.LOCAL_DEVNET_PK || ZERO_PK], + chainId: parseInt(process.env.LOCAL_DEVNET_CHAIN_ID || "32382", 10), }, // testnets "sepolia": { @@ -147,7 +148,9 @@ const config: HardhatUserConfig = { }, }, ], - apiKey: process.env.LOCAL_DEVNET_EXPLORER_API_URL ? "local-devnet" : process.env.ETHERSCAN_API_KEY || "", + apiKey: process.env.LOCAL_DEVNET_EXPLORER_API_URL + ? { "local-devnet": "local-devnet" } + : process.env.ETHERSCAN_API_KEY || "", }, solidity: { compilers: [ diff --git a/lib/config-schemas.ts b/lib/config-schemas.ts index 3c33c968bb..2f059358d0 100644 --- a/lib/config-schemas.ts +++ b/lib/config-schemas.ts @@ -79,6 +79,44 @@ const TriggerableWithdrawalsGatewaySchema = z.object({ frameDurationInSec: PositiveIntSchema, }); +// Consolidation gateway schema +const ConsolidationGatewaySchema = z.object({ + maxConsolidationRequestsLimit: PositiveIntSchema, + consolidationsPerFrame: PositiveIntSchema, + frameDurationInSec: PositiveIntSchema, + gIFirstValidatorPrev: HexStringSchema, + gIFirstValidatorCurr: HexStringSchema, + pivotSlot: NonNegativeIntSchema, +}); + +const ConsolidationBusSchema = z.object({ + initialBatchSize: PositiveIntSchema, + initialMaxGroupsInBatch: PositiveIntSchema, + initialExecutionDelay: NonNegativeIntSchema, +}); + +const ConsolidationMigratorSchema = z.object({ + sourceModuleId: PositiveIntSchema, + targetModuleId: PositiveIntSchema, +}); + +// Top-up gateway schema +const TopUpGatewaySchema = z.object({ + maxValidatorsPerTopUp: PositiveIntSchema, + minBlockDistance: PositiveIntSchema, + maxRootAge: PositiveIntSchema, + targetBalanceGwei: PositiveIntSchema, + minTopUpGwei: PositiveIntSchema, + gIFirstValidatorPrev: HexStringSchema, + gIFirstValidatorCurr: HexStringSchema, + pivotSlot: NonNegativeIntSchema, +}); + +const StakingRouterSchema = z.object({ + maxEBType1: BigIntStringSchema, + maxEBType2: BigIntStringSchema, +}); + // Easy track schema const EasyTrackSchema = z.object({ VaultsAdapter: EthereumAddressSchema, @@ -134,6 +172,7 @@ export const UpgradeParametersSchema = z.object({ burner: BurnerSchema, oracleVersions: OracleVersionsSchema.optional(), aragonAppVersions: AragonAppVersionsSchema.optional(), + consolidationGateway: ConsolidationGatewaySchema, v3VoteScript: V3VoteScriptSchema, }); @@ -203,19 +242,22 @@ const DepositSecurityModuleSchema = z.object({ // Oracle report sanity checker schema const OracleReportSanityCheckerSchema = z.object({ - exitedValidatorsPerDayLimit: PositiveIntSchema, - appearedValidatorsPerDayLimit: PositiveIntSchema, + exitedEthAmountPerDayLimit: PositiveIntSchema, + appearedEthAmountPerDayLimit: PositiveIntSchema, deprecatedOneOffCLBalanceDecreaseBPLimit: BasisPointsSchema, annualBalanceIncreaseBPLimit: BasisPointsSchema, simulatedShareRateDeviationBPLimit: BasisPointsSchema, - maxValidatorExitRequestsPerReport: PositiveIntSchema, + maxBalanceExitRequestedPerReportInEth: PositiveIntSchema, + maxEffectiveBalanceWeightWCType01: PositiveIntSchema, + maxEffectiveBalanceWeightWCType02: PositiveIntSchema, maxItemsPerExtraDataTransaction: PositiveIntSchema, maxNodeOperatorsPerExtraDataItem: PositiveIntSchema, requestTimestampMargin: PositiveIntSchema, maxPositiveTokenRebase: PositiveIntSchema, - initialSlashingAmountPWei: PositiveIntSchema, - inactivityPenaltiesAmountPWei: PositiveIntSchema, + maxCLBalanceDecreaseBP: BasisPointsSchema, clBalanceOraclesErrorUpperBPLimit: BasisPointsSchema, + consolidationEthAmountPerDayLimit: NonNegativeIntSchema, + exitedValidatorEthAmountLimit: PositiveIntSchema, }); // Oracle daemon config schema @@ -278,8 +320,13 @@ export const ScratchParametersSchema = z.object({ withdrawalQueueERC721: WithdrawalQueueERC721Schema, validatorExitDelayVerifier: ValidatorExitDelayVerifierSchema, triggerableWithdrawalsGateway: TriggerableWithdrawalsGatewaySchema, + consolidationGateway: ConsolidationGatewaySchema, + consolidationBus: ConsolidationBusSchema, + consolidationMigrator: ConsolidationMigratorSchema, predepositGuarantee: PredepositGuaranteeSchema.omit({ genesisForkVersion: true }), operatorGrid: OperatorGridSchema, + topUpGateway: TopUpGatewaySchema, + stakingRouter: StakingRouterSchema, }); // Inferred types from zod schemas diff --git a/lib/constants.ts b/lib/constants.ts index 2c5cd8723f..dbca56ac83 100644 --- a/lib/constants.ts +++ b/lib/constants.ts @@ -64,8 +64,27 @@ export const TOTAL_BASIS_POINTS = 100_00n; export const ABNORMALLY_HIGH_FEE_THRESHOLD_BP = 1_00n; export const MAX_FEE_BP = 65_535n; + export const MAX_RESERVE_RATIO_BP = 99_99n; export const LIMITER_PRECISION_BASE = 10n ** 9n; export const DISCONNECT_NOT_INITIATED = 2n ** 48n - 1n; + +export const WITHDRAWAL_CREDENTIALS_TYPE_01 = 0x01; +export const WITHDRAWAL_CREDENTIALS_TYPE_02 = 0x02; + +export const MAX_EFFECTIVE_BALANCE_WC_TYPE_01 = 32n * 10n ** 18n; // 32 ETH +export const MAX_EFFECTIVE_BALANCE_WC_TYPE_02 = 2048n * 10n ** 18n; // 2048 ETH + +export enum WithdrawalCredentialsType { + WC0x01 = WITHDRAWAL_CREDENTIALS_TYPE_01, + WC0x02 = WITHDRAWAL_CREDENTIALS_TYPE_02, +} + +export enum StakingModuleStatus { + Active = 0, + DepositsPaused = 1, + Stopped = 2, +} + export const MAX_SANE_SETTLED_GROWTH = MAX_INT104; diff --git a/lib/deploy.ts b/lib/deploy.ts index f7d5e41ddf..4a92f3acc9 100644 --- a/lib/deploy.ts +++ b/lib/deploy.ts @@ -120,11 +120,12 @@ export async function deployWithoutProxy( constructorArgs: ConvertibleToString[] = [], addressFieldName = "address", withStateFile = true, + signerOrOptions?: Signer | FactoryOptions, fields: Record = {}, ): Promise { logWithConstructorArgs(`Deploying: ${yl(artifactName)} (without proxy)`, constructorArgs); - const contract = await deployContract(artifactName, constructorArgs, deployer, withStateFile); + const contract = await deployContract(artifactName, constructorArgs, deployer, withStateFile, signerOrOptions); if (withStateFile) { const contractPath = await getContractPath(artifactName); @@ -257,14 +258,15 @@ async function getLocatorConfig(locatorAddress: string) { "oracleDaemonConfig", "validatorExitDelayVerifier", "triggerableWithdrawalsGateway", + "consolidationGateway", "accounting", - "wstETH", "predepositGuarantee", + "wstETH", "vaultHub", "vaultFactory", "lazyOracle", "operatorGrid", - "vaultFactory", + "topUpGateway", ]) as (keyof LidoLocator.ConfigStruct)[]; const config = await Promise.all(locatorKeys.map((name) => locator[name]())); diff --git a/lib/index.ts b/lib/index.ts index 65bad93d60..0756f20fea 100644 --- a/lib/index.ts +++ b/lib/index.ts @@ -26,3 +26,4 @@ export * from "./string"; export * from "./storage"; export * from "./time"; export * from "./units"; +export * from "./wc"; diff --git a/lib/oracle.ts b/lib/oracle.ts index 7677a0002b..6d55319106 100644 --- a/lib/oracle.ts +++ b/lib/oracle.ts @@ -35,10 +35,12 @@ export const EXTRA_DATA_TYPE_EXITED_VALIDATORS = 2n; export const DEFAULT_REPORT_FIELDS: OracleReport = { consensusVersion: 1n, refSlot: 0n, - numValidators: 0n, - clBalanceGwei: 0n, + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 0n, stakingModuleIdsWithNewlyExitedValidators: [], numExitedValidatorsByStakingModule: [], + stakingModuleIdsWithUpdatedBalance: [], + validatorBalancesGweiByStakingModule: [], withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, @@ -56,10 +58,12 @@ export function getReportDataItems(r: OracleReport) { return [ r.consensusVersion, r.refSlot, - r.numValidators, - r.clBalanceGwei, + r.clValidatorsBalanceGwei, + r.clPendingBalanceGwei, r.stakingModuleIdsWithNewlyExitedValidators, r.numExitedValidatorsByStakingModule, + r.stakingModuleIdsWithUpdatedBalance, + r.validatorBalancesGweiByStakingModule, r.withdrawalVaultBalance, r.elRewardsVaultBalance, r.sharesRequestedToBurn, @@ -77,7 +81,7 @@ export function getReportDataItems(r: OracleReport) { export function calcReportDataHash(reportItems: ReportAsArray) { const data = ethers.AbiCoder.defaultAbiCoder().encode( [ - "(uint256, uint256, uint256, uint256, uint256[], uint256[], uint256, uint256, uint256, uint256[], uint256, bool, bytes32, string, uint256, bytes32, uint256)", + "(uint256, uint256, uint256, uint256, uint256[], uint256[], uint256[], uint256[], uint256, uint256, uint256, uint256[], uint256, bool, bytes32, string, uint256, bytes32, uint256)", ], [reportItems], ); diff --git a/lib/protocol/discover.ts b/lib/protocol/discover.ts index 3580876608..726a90360b 100644 --- a/lib/protocol/discover.ts +++ b/lib/protocol/discover.ts @@ -123,6 +123,12 @@ const getCoreContracts = async ( "TriggerableWithdrawalsGateway", config.get("triggerableWithdrawalsGateway") || (await locator.triggerableWithdrawalsGateway()), ), + consolidationGateway: loadContract( + "ConsolidationGateway", + config.get("consolidationGateway") || (await locator.consolidationGateway()), + ), + consolidationBus: loadContract("ConsolidationBus", config.get("consolidationBus")), + consolidationMigrator: loadContract("ConsolidationMigrator", config.get("consolidationMigrator")), accounting: loadContract("Accounting", config.get("accounting") || (await locator.accounting())), }), })) as CoreContracts; @@ -246,6 +252,9 @@ export async function discover(skipV3Contracts: boolean) { "Burner": foundationContracts.burner.address, "wstETH": contracts.wstETH.address, "Triggered Withdrawal Gateway": contracts.triggerableWithdrawalsGateway?.address, + "Consolidation Gateway": contracts.consolidationGateway?.address, + "Consolidation Bus": contracts.consolidationBus?.address, + "Consolidation Migrator": contracts.consolidationMigrator?.address, // Vaults "Staking Vault Factory": contracts.stakingVaultFactory?.address, "Staking Vault Beacon": contracts.stakingVaultBeacon?.address, diff --git a/lib/protocol/helpers/accounting.ts b/lib/protocol/helpers/accounting.ts index 0e34ab8309..b7f36e59c7 100644 --- a/lib/protocol/helpers/accounting.ts +++ b/lib/protocol/helpers/accounting.ts @@ -5,11 +5,10 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { AccountingOracle } from "typechain-types"; -import { ReportValuesStruct } from "typechain-types/contracts/0.8.9/Accounting"; +import { ReportValuesStruct } from "typechain-types/contracts/0.8.9/Accounting.sol/Accounting"; import { advanceChainTime, - BigIntMath, certainAddress, ether, EXTRA_DATA_FORMAT_EMPTY, @@ -27,6 +26,7 @@ import { ProtocolContext } from "../types"; export type OracleReportParams = { clDiff?: bigint; clAppearedValidators?: bigint; + clPendingBalanceGwei?: bigint; elRewardsVaultBalance?: bigint | null; withdrawalVaultBalance?: bigint | null; sharesRequestedToBurn?: bigint | null; @@ -43,6 +43,8 @@ export type OracleReportParams = { extraDataList?: Uint8Array; stakingModuleIdsWithNewlyExitedValidators?: bigint[]; numExitedValidatorsByStakingModule?: bigint[]; + stakingModuleIdsWithUpdatedBalance?: bigint[]; + validatorBalancesGweiByStakingModule?: bigint[]; reportElVault?: boolean; reportWithdrawalsVault?: boolean; reportBurner?: boolean; @@ -59,6 +61,54 @@ type OracleReportResults = { export const ZERO_HASH = new Uint8Array(32).fill(0); const ZERO_BYTES32 = "0x" + Buffer.from(ZERO_HASH).toString("hex"); const SHARE_RATE_PRECISION = 10n ** 27n; +const CL_BALANCE_DECREASE_WINDOW_RESET_SECONDS = 37n * 24n * 60n * 60n; + +type StakingModuleWithBalanceGwei = { + moduleId: bigint; + moduleBalanceGwei: bigint; +}; + +type StakingModuleWithReportedBalanceGwei = { + moduleId: bigint; + moduleReportedBalanceGwei: bigint; +}; + +/** + * Build module balances in gwei with exact total conservation. + * Uses proportional split over remaining totals; the last module gets the remainder. + */ +const buildConservedModuleBalancesGwei = ( + totalBalanceGwei: bigint, + modulesWithBalance: StakingModuleWithBalanceGwei[], +): StakingModuleWithReportedBalanceGwei[] => { + if (modulesWithBalance.length === 0) return []; + + const totalModulesBalanceGwei = modulesWithBalance.reduce((sum, module) => sum + module.moduleBalanceGwei, 0n); + if (totalModulesBalanceGwei === 0n) { + return modulesWithBalance.map(({ moduleId }) => ({ moduleId, moduleReportedBalanceGwei: 0n })); + } + + let remainingTotalBalanceGwei = totalBalanceGwei; + let remainingModulesBalanceGwei = totalModulesBalanceGwei; + const modulesWithReportedBalances: StakingModuleWithReportedBalanceGwei[] = []; + + for (let index = 0; index < modulesWithBalance.length; ++index) { + const { moduleId, moduleBalanceGwei } = modulesWithBalance[index]; + const isLastModule = index === modulesWithBalance.length - 1; + + const moduleReportedBalanceGwei = + isLastModule || remainingModulesBalanceGwei === 0n + ? remainingTotalBalanceGwei + : (remainingTotalBalanceGwei * moduleBalanceGwei) / remainingModulesBalanceGwei; + + modulesWithReportedBalances.push({ moduleId, moduleReportedBalanceGwei }); + + remainingTotalBalanceGwei -= moduleReportedBalanceGwei; + remainingModulesBalanceGwei -= moduleBalanceGwei; + } + + return modulesWithReportedBalances; +}; /** * Prepare and push oracle report. @@ -66,8 +116,9 @@ const SHARE_RATE_PRECISION = 10n ** 27n; export const report = async ( ctx: ProtocolContext, { - clDiff = ether("0.01"), + clDiff, clAppearedValidators = 0n, + clPendingBalanceGwei = 0n, elRewardsVaultBalance = null, withdrawalVaultBalance = null, sharesRequestedToBurn = null, @@ -83,6 +134,8 @@ export const report = async ( extraDataList = new Uint8Array(), stakingModuleIdsWithNewlyExitedValidators = [], numExitedValidatorsByStakingModule = [], + stakingModuleIdsWithUpdatedBalance = [], + validatorBalancesGweiByStakingModule = [], reportElVault = true, reportWithdrawalsVault = true, reportBurner = true, @@ -90,7 +143,8 @@ export const report = async ( vaultsDataTreeCid = "", }: OracleReportParams = {}, ): Promise => { - const { hashConsensus, lido, elRewardsVault, withdrawalVault, burner, accountingOracle } = ctx.contracts; + const { hashConsensus, lido, elRewardsVault, withdrawalVault, burner, accountingOracle, oracleReportSanityChecker } = + ctx.contracts; if (waitNextReportTime) { await waitNextAvailableReportTime(ctx); @@ -98,14 +152,10 @@ export const report = async ( refSlot = refSlot ?? (await hashConsensus.getCurrentFrame()).refSlot; - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); - const postCLBalance = beaconBalance + clDiff; - const postBeaconValidators = beaconValidators + clAppearedValidators; - - log.debug("Beacon", { - "Beacon validators": postBeaconValidators, - "Beacon balance": formatEther(postCLBalance), - }); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport, depositedSinceLastReport } = + await lido.getBalanceStats(); + clDiff = clDiff ?? depositedSinceLastReport; + const preCLBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; elRewardsVaultBalance = elRewardsVaultBalance ?? (await ethers.provider.getBalance(elRewardsVault.address)); withdrawalVaultBalance = withdrawalVaultBalance ?? (await ethers.provider.getBalance(withdrawalVault.address)); @@ -126,6 +176,30 @@ export const report = async ( withdrawalVaultBalance = reportWithdrawalsVault ? withdrawalVaultBalance : 0n; elRewardsVaultBalance = reportElVault ? elRewardsVaultBalance : 0n; + if (reportWithdrawalsVault) { + const lastVaultBalanceAfterTransfer = BigInt(await ethers.provider.getStorage(oracleReportSanityChecker, 4n)); + if (withdrawalVaultBalance < lastVaultBalanceAfterTransfer) { + throw new Error("Reported withdrawal vault balance is below last vault balance after transfer"); + } + // Sync _lastVaultBalanceAfterTransfer with the current vault balance so the pending check + // does not interpret test-funded vault balance as CL withdrawals (zero-sum rebalancing). + // The contract will update _lastVaultBalanceAfterTransfer = vaultBalance - transfer after the report. + if (withdrawalVaultBalance > lastVaultBalanceAfterTransfer) { + await ethers.provider.send("hardhat_setStorageAt", [ + await oracleReportSanityChecker.getAddress(), + ethers.toBeHex(4n, 32), + ethers.toBeHex(withdrawalVaultBalance, 32), + ]); + } + } + + const postCLBalance = preCLBalance + clDiff; + + log.debug("Beacon", { + "Beacon validators delta": clAppearedValidators, + "Beacon balance": formatEther(postCLBalance), + }); + if (sharesRequestedToBurn === null && reportBurner) { const [coverShares, nonCoverShares] = await burner.getSharesRequestedToBurn(); sharesRequestedToBurn = coverShares + nonCoverShares; @@ -141,8 +215,8 @@ export const report = async ( const simulatedReport = await simulateReport(ctx, { refSlot, - beaconValidators: postBeaconValidators, - clBalance: postCLBalance, + clValidatorsBalance: postCLBalance, + clPendingBalance: 0n, withdrawalVaultBalance, elRewardsVaultBalance, }); @@ -173,17 +247,41 @@ export const report = async ( } isBunkerMode = (await lido.getTotalPooledEther()) > postTotalPooledEther; - log.debug("Bunker Mode", { "Is Active": isBunkerMode }); } + if (stakingModuleIdsWithUpdatedBalance.length === 0) { + validatorBalancesGweiByStakingModule = []; + const moduleIds = await ctx.contracts.stakingRouter.getStakingModuleIds(); + + const modulesWithBalance: StakingModuleWithBalanceGwei[] = []; + for (const moduleId of moduleIds) { + const moduleBalance = await ctx.contracts.stakingRouter.getModuleValidatorsBalance(moduleId); + modulesWithBalance.push({ moduleId, moduleBalanceGwei: moduleBalance / ONE_GWEI }); + } + + const activeModulesWithBalance = modulesWithBalance.filter(({ moduleBalanceGwei }) => moduleBalanceGwei > 0n); + const modulesWithReportedBalance = new Map( + buildConservedModuleBalancesGwei(postCLBalance / ONE_GWEI, activeModulesWithBalance).map( + ({ moduleId, moduleReportedBalanceGwei }) => [moduleId, moduleReportedBalanceGwei], + ), + ); + + for (const { moduleId } of modulesWithBalance) { + stakingModuleIdsWithUpdatedBalance.push(moduleId); + validatorBalancesGweiByStakingModule.push(modulesWithReportedBalance.get(moduleId) ?? 0n); + } + } + const reportData = { consensusVersion: await accountingOracle.getConsensusVersion(), refSlot, - numValidators: postBeaconValidators, - clBalanceGwei: postCLBalance / ONE_GWEI, + clValidatorsBalanceGwei: postCLBalance / ONE_GWEI - clPendingBalanceGwei, + clPendingBalanceGwei, stakingModuleIdsWithNewlyExitedValidators, numExitedValidatorsByStakingModule, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn: sharesRequestedToBurn ?? 0n, @@ -209,17 +307,59 @@ export const report = async ( }); }; +export const getDepositedSinceLastReport = async (ctx: ProtocolContext): Promise => { + const { depositedSinceLastReport } = await ctx.contracts.lido.getBalanceStats(); + return depositedSinceLastReport; +}; + +/** + * Submit report with an effective CL delta between reports. + * + * `report()` expects `clDiff` as raw `postCLBalance - preCLBalance`. + * Since `preCLBalance` is based on last report snapshot, deposits made after that + * snapshot must be added to preserve the intended effective delta. + */ +export const reportWithEffectiveClDiff = async ( + ctx: ProtocolContext, + effectiveClDiff: bigint, + params: Omit = {}, +): Promise => { + const depositedSinceLastReport = await getDepositedSinceLastReport(ctx); + return report(ctx, { ...params, clDiff: depositedSinceLastReport + effectiveClDiff }); +}; + +export const resetCLBalanceDecreaseWindow = async ( + ctx: ProtocolContext, + params: Omit = {}, +): Promise => { + // Move report timestamp beyond the 36-day window and submit an effective neutral report. + await advanceChainTime(CL_BALANCE_DECREASE_WINDOW_RESET_SECONDS); + return reportWithEffectiveClDiff(ctx, 0n, { + excludeVaultsBalances: true, + skipWithdrawals: true, + ...params, + }); +}; + export async function reportWithoutExtraData( ctx: ProtocolContext, numExitedValidatorsByStakingModule: bigint[], stakingModuleIdsWithNewlyExitedValidators: bigint[], extraData: ReturnType, + { + effectiveClDiff, + }: { + effectiveClDiff?: bigint; + } = {}, ) { const { accountingOracle } = ctx.contracts; const { extraDataItemsCount, extraDataChunks, extraDataChunkHashes } = extraData; + const clDiff = effectiveClDiff === undefined ? undefined : (await getDepositedSinceLastReport(ctx)) + effectiveClDiff; + const reportData: Partial = { + ...(clDiff === undefined ? {} : { clDiff }), excludeVaultsBalances: true, extraDataFormat: EXTRA_DATA_FORMAT_LIST, extraDataHash: extraDataChunkHashes[0], @@ -292,6 +432,22 @@ export const getReportTimeElapsed = async (ctx: ProtocolContext) => { }; }; +export const getNextReportContext = async ( + ctx: ProtocolContext, +): Promise<{ nextReportRefSlot: bigint; reportTimeElapsed: bigint }> => { + const { accountingOracle, hashConsensus } = ctx.contracts; + + const lastProcessingRefSlot = await accountingOracle.getLastProcessingRefSlot(); + const currentFrame = await hashConsensus.getCurrentFrame(); + const frameConfig = await hashConsensus.getFrameConfig(); + const chainConfig = await hashConsensus.getChainConfig(); + + const nextReportRefSlot = currentFrame.refSlot + frameConfig.epochsPerFrame * chainConfig.slotsPerEpoch; + const reportTimeElapsed = (nextReportRefSlot - lastProcessingRefSlot) * chainConfig.secondsPerSlot; + + return { nextReportRefSlot, reportTimeElapsed }; +}; + /** * Wait for the next available report time. * Returns the report timestamp and the ref slot of the next frame. @@ -330,8 +486,8 @@ export const waitNextAvailableReportTime = async ( type SimulateReportParams = { refSlot: bigint; - beaconValidators: bigint; - clBalance: bigint; + clValidatorsBalance: bigint; + clPendingBalance: bigint; withdrawalVaultBalance: bigint; elRewardsVaultBalance: bigint; }; @@ -348,7 +504,13 @@ type SimulateReportResult = { */ export const simulateReport = async ( ctx: ProtocolContext, - { refSlot, beaconValidators, clBalance, withdrawalVaultBalance, elRewardsVaultBalance }: SimulateReportParams, + { + refSlot, + clValidatorsBalance, + clPendingBalance, + withdrawalVaultBalance, + elRewardsVaultBalance, + }: SimulateReportParams, ): Promise => { const { hashConsensus, accounting } = ctx.contracts; @@ -357,17 +519,18 @@ export const simulateReport = async ( log.debug("Simulating oracle report", { "Ref Slot": refSlot, - "Beacon Validators": beaconValidators, - "CL Balance": formatEther(clBalance), + "CL Validators Balance": formatEther(clValidatorsBalance), + "CL Pending Balance": formatEther(clPendingBalance), "Withdrawal Vault Balance": formatEther(withdrawalVaultBalance), "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), }); const reportValues: ReportValuesStruct = { timestamp: reportTimestamp, + // timeElapsed: (await getReportTimeElapsed(ctx)).timeElapsed, timeElapsed: /* 1 day */ 86_400n, - clValidators: beaconValidators, - clBalance, + clValidatorsBalance, + clPendingBalance, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn: 0n, @@ -393,7 +556,6 @@ export const simulateReport = async ( }; type HandleOracleReportParams = { - beaconValidators: bigint; clBalance: bigint; sharesRequestedToBurn: bigint; withdrawalVaultBalance: bigint; @@ -405,7 +567,6 @@ type HandleOracleReportParams = { export const handleOracleReport = async ( ctx: ProtocolContext, { - beaconValidators, clBalance, sharesRequestedToBurn, withdrawalVaultBalance, @@ -425,7 +586,6 @@ export const handleOracleReport = async ( try { log.debug("Handle oracle report", { "Ref Slot": refSlot, - "Beacon Validators": beaconValidators, "CL Balance": formatEther(clBalance), "Withdrawal Vault Balance": formatEther(withdrawalVaultBalance), "El Rewards Vault Balance": formatEther(elRewardsVaultBalance), @@ -435,8 +595,8 @@ export const handleOracleReport = async ( await accounting.connect(accountingOracleAccount).handleOracleReport({ timestamp: reportTimestamp, timeElapsed, // 1 day - clValidators: beaconValidators, - clBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, @@ -472,8 +632,7 @@ const getFinalizationBatches = async ( const bufferedEther = await lido.getBufferedEther(); const unfinalizedSteth = await withdrawalQueue.unfinalizedStETH(); - - const reservedBuffer = BigIntMath.min(bufferedEther, unfinalizedSteth); + const reservedBuffer = await lido.getWithdrawalsReserve(); const availableEth = limitedWithdrawalVaultBalance + limitedElRewardsVaultBalance + reservedBuffer; const blockTimestamp = await getCurrentBlockTimestamp(); @@ -545,12 +704,14 @@ const getFinalizationBatches = async ( export type OracleReportSubmitParams = { refSlot: bigint; clBalance: bigint; - numValidators: bigint; + clPendingBalanceGwei?: bigint; withdrawalVaultBalance: bigint; elRewardsVaultBalance: bigint; sharesRequestedToBurn: bigint; stakingModuleIdsWithNewlyExitedValidators?: bigint[]; numExitedValidatorsByStakingModule?: bigint[]; + stakingModuleIdsWithUpdatedBalance?: bigint[]; + validatorBalancesGweiByStakingModule?: bigint[]; withdrawalFinalizationBatches?: bigint[]; simulatedShareRate?: bigint; isBunkerMode?: boolean; @@ -568,6 +729,43 @@ type OracleReportSubmitResult = { extraDataTx: ContractTransactionResponse; }; +export const submitReportDataWithConsensus = async ( + ctx: ProtocolContext, + data: AccountingOracle.ReportDataStruct, +): Promise => { + const { accountingOracle } = ctx.contracts; + + const reportHash = calcReportDataHash(getReportDataItems(data)); + const submitter = await reachConsensus(ctx, { + refSlot: BigInt(data.refSlot), + reportHash, + consensusVersion: BigInt(data.consensusVersion), + }); + const oracleVersion = await accountingOracle.getContractVersion(); + + return accountingOracle.connect(submitter).submitReportData(data, oracleVersion); +}; + +export const submitReportDataWithConsensusAndEmptyExtraData = async ( + ctx: ProtocolContext, + data: AccountingOracle.ReportDataStruct, +): Promise<{ reportTx: ContractTransactionResponse; extraDataTx: ContractTransactionResponse }> => { + const { accountingOracle } = ctx.contracts; + + const reportHash = calcReportDataHash(getReportDataItems(data)); + const submitter = await reachConsensus(ctx, { + refSlot: BigInt(data.refSlot), + reportHash, + consensusVersion: BigInt(data.consensusVersion), + }); + const oracleVersion = await accountingOracle.getContractVersion(); + + const reportTx = await accountingOracle.connect(submitter).submitReportData(data, oracleVersion); + const extraDataTx = await accountingOracle.connect(submitter).submitReportExtraDataEmpty(); + + return { reportTx, extraDataTx }; +}; + /** * Main function to push oracle report to the protocol. */ @@ -576,12 +774,14 @@ const submitReport = async ( { refSlot, clBalance, - numValidators, + clPendingBalanceGwei = 0n, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, stakingModuleIdsWithNewlyExitedValidators = [], numExitedValidatorsByStakingModule = [], + stakingModuleIdsWithUpdatedBalance = [], + validatorBalancesGweiByStakingModule = [], withdrawalFinalizationBatches = [], simulatedShareRate = 0n, isBunkerMode = false, @@ -598,12 +798,15 @@ const submitReport = async ( log.debug("Pushing oracle report", { "Ref slot": refSlot, "CL balance": formatEther(clBalance), - "Validators": numValidators, + // TODO: Add proper validator count logging "Withdrawal vault": formatEther(withdrawalVaultBalance), "El rewards vault": formatEther(elRewardsVaultBalance), "Shares requested to burn": sharesRequestedToBurn, "Staking module ids with newly exited validators": stakingModuleIdsWithNewlyExitedValidators, "Num exited validators by staking module": numExitedValidatorsByStakingModule, + "Staking module ids with updated active balance": stakingModuleIdsWithUpdatedBalance, + "Validator balances by staking module": validatorBalancesGweiByStakingModule, + "CL pending balance (gwei)": clPendingBalanceGwei, "Withdrawal finalization batches": withdrawalFinalizationBatches, "Is bunker mode": isBunkerMode, "Vaults data tree root": vaultsDataTreeRoot, @@ -616,17 +819,23 @@ const submitReport = async ( const consensusVersion = await accountingOracle.getConsensusVersion(); const oracleVersion = await accountingOracle.getContractVersion(); + const clBalanceGwei = clBalance / ONE_GWEI; + if (clPendingBalanceGwei > clBalanceGwei) { + throw new Error("Reported pending CL balance exceeds total CL balance"); + } const data = { consensusVersion, refSlot, - clBalanceGwei: clBalance / ONE_GWEI, - numValidators, + clValidatorsBalanceGwei: clBalanceGwei - clPendingBalanceGwei, + clPendingBalanceGwei, withdrawalVaultBalance, elRewardsVaultBalance, sharesRequestedToBurn, stakingModuleIdsWithNewlyExitedValidators, numExitedValidatorsByStakingModule, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, withdrawalFinalizationBatches, simulatedShareRate, isBunkerMode, @@ -746,10 +955,12 @@ const reachConsensus = async ( export const getReportDataItems = (data: AccountingOracle.ReportDataStruct) => [ data.consensusVersion, data.refSlot, - data.numValidators, - data.clBalanceGwei, + data.clValidatorsBalanceGwei, + data.clPendingBalanceGwei, data.stakingModuleIdsWithNewlyExitedValidators, data.numExitedValidatorsByStakingModule, + data.stakingModuleIdsWithUpdatedBalance, + data.validatorBalancesGweiByStakingModule, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, @@ -770,10 +981,13 @@ export const calcReportDataHash = (items: ReturnType) const types = [ "uint256", // consensusVersion "uint256", // refSlot - "uint256", // numValidators - "uint256", // clBalanceGwei + // TODO: Update types to match new balance-based structure + "uint256", // clValidatorsBalanceGwei + "uint256", // clPendingBalanceGwei "uint256[]", // stakingModuleIdsWithNewlyExitedValidators "uint256[]", // numExitedValidatorsByStakingModule + "uint256[]", // stakingModuleIdsWithUpdatedBalance + "uint256[]", // validatorBalancesGweiByStakingModule "uint256", // withdrawalVaultBalance "uint256", // elRewardsVaultBalance "uint256", // sharesRequestedToBurn @@ -825,7 +1039,9 @@ export const ensureOracleCommitteeMembers = async (ctx: ProtocolContext, minMemb log(`Adding oracle committee member ${count}`); const address = getOracleCommitteeMemberAddress(count); - await hashConsensus.connect(agentSigner).addMember(address, quorum); + if (!(await hashConsensus.getIsMember(address))) { + await hashConsensus.connect(agentSigner).addMember(address, quorum); + } addresses.push(address); diff --git a/lib/protocol/helpers/index.ts b/lib/protocol/helpers/index.ts index ba41a60322..e357076135 100644 --- a/lib/protocol/helpers/index.ts +++ b/lib/protocol/helpers/index.ts @@ -1,25 +1,44 @@ -export { depositAndReportValidators, ensureStakeLimit, unpauseStaking } from "./staking"; +export { + depositAndReportValidators, + getCurrentModuleAccountingReportParams, + depositValidatorsWithoutReport, + ensureStakeLimit, + seedProtocolPendingBaseline, + getStakingModuleBalances, + unpauseStaking, +} from "./staking"; export { finalizeWQViaElVault, finalizeWQViaSubmit, unpauseWithdrawalQueue } from "./withdrawal"; -export { setMaxPositiveTokenRebase } from "./sanity-checker"; +export { setMaxPositiveTokenRebase, updateOracleReportLimits } from "./sanity-checker"; export { calcReportDataHash, ensureHashConsensusInitialEpoch, ensureOracleCommitteeMembers, getReportDataItems, + getNextReportContext, getReportTimeElapsed, waitNextAvailableReportTime, handleOracleReport, OracleReportParams, OracleReportSubmitParams, report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, + submitReportDataWithConsensus, + submitReportDataWithConsensusAndEmptyExtraData, + getDepositedSinceLastReport, } from "./accounting"; export { ensureDsmGuardians } from "./dsm"; +export { + norSdvtEnsureOperators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, +} from "./nor-sdvt"; export { ensurePredepositGuaranteeUnpaused } from "./pdg"; -export { norSdvtEnsureOperators } from "./nor-sdvt"; export { calcNodeOperatorRewards } from "./staking-module"; export * from "./vaults"; diff --git a/lib/protocol/helpers/sanity-checker.ts b/lib/protocol/helpers/sanity-checker.ts index 7a0a8c54eb..6dfd0829c5 100644 --- a/lib/protocol/helpers/sanity-checker.ts +++ b/lib/protocol/helpers/sanity-checker.ts @@ -12,3 +12,45 @@ export const setMaxPositiveTokenRebase = async (ctx: ProtocolContext, maxPositiv await sanityChecker.connect(agent).revokeRole(MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE, agent.address); return initialMaxPositiveTokenRebase; }; + +export const updateOracleReportLimits = async ( + ctx: ProtocolContext, + patch: Partial< + Awaited> + >, +) => { + const { oracleReportSanityChecker: sanityChecker } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + const currentLimits = await sanityChecker.getOracleReportLimits(); + const secondOpinionOracle = await sanityChecker.secondOpinionOracle(); + const role = await sanityChecker.ALL_LIMITS_MANAGER_ROLE(); + const nextLimits = { + exitedEthAmountPerDayLimit: patch.exitedEthAmountPerDayLimit ?? currentLimits.exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit: patch.appearedEthAmountPerDayLimit ?? currentLimits.appearedEthAmountPerDayLimit, + annualBalanceIncreaseBPLimit: patch.annualBalanceIncreaseBPLimit ?? currentLimits.annualBalanceIncreaseBPLimit, + simulatedShareRateDeviationBPLimit: + patch.simulatedShareRateDeviationBPLimit ?? currentLimits.simulatedShareRateDeviationBPLimit, + maxBalanceExitRequestedPerReportInEth: + patch.maxBalanceExitRequestedPerReportInEth ?? currentLimits.maxBalanceExitRequestedPerReportInEth, + maxEffectiveBalanceWeightWCType01: + patch.maxEffectiveBalanceWeightWCType01 ?? currentLimits.maxEffectiveBalanceWeightWCType01, + maxEffectiveBalanceWeightWCType02: + patch.maxEffectiveBalanceWeightWCType02 ?? currentLimits.maxEffectiveBalanceWeightWCType02, + maxItemsPerExtraDataTransaction: + patch.maxItemsPerExtraDataTransaction ?? currentLimits.maxItemsPerExtraDataTransaction, + maxNodeOperatorsPerExtraDataItem: + patch.maxNodeOperatorsPerExtraDataItem ?? currentLimits.maxNodeOperatorsPerExtraDataItem, + requestTimestampMargin: patch.requestTimestampMargin ?? currentLimits.requestTimestampMargin, + maxPositiveTokenRebase: patch.maxPositiveTokenRebase ?? currentLimits.maxPositiveTokenRebase, + maxCLBalanceDecreaseBP: patch.maxCLBalanceDecreaseBP ?? currentLimits.maxCLBalanceDecreaseBP, + clBalanceOraclesErrorUpperBPLimit: + patch.clBalanceOraclesErrorUpperBPLimit ?? currentLimits.clBalanceOraclesErrorUpperBPLimit, + consolidationEthAmountPerDayLimit: + patch.consolidationEthAmountPerDayLimit ?? currentLimits.consolidationEthAmountPerDayLimit, + exitedValidatorEthAmountLimit: patch.exitedValidatorEthAmountLimit ?? currentLimits.exitedValidatorEthAmountLimit, + }; + + await sanityChecker.connect(agent).grantRole(role, agent.address); + await sanityChecker.connect(agent).setOracleReportLimits(nextLimits, secondOpinionOracle); + await sanityChecker.connect(agent).revokeRole(role, agent.address); +}; diff --git a/lib/protocol/helpers/share-rate.ts b/lib/protocol/helpers/share-rate.ts index 88a56495f2..b13832b187 100644 --- a/lib/protocol/helpers/share-rate.ts +++ b/lib/protocol/helpers/share-rate.ts @@ -28,16 +28,14 @@ async function changeInternalEther(ctx: ProtocolContext, internalEtherDelta: big const accountingSigner = await impersonate(accounting, ether("1")); - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); - - await lido - .connect(accountingSigner) - .processClStateUpdate( - await getCurrentBlockTimestamp(), - beaconValidators, - beaconValidators, - beaconBalance + internalEtherDelta, - ); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const beaconBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; + + await lido.connect(accountingSigner).processClStateUpdate( + await getCurrentBlockTimestamp(), + beaconBalance + internalEtherDelta, // new clValidatorsBalance + 0n, // new clPendingBalance + ); } export const ensureExactShareRate = async (ctx: ProtocolContext, targetShareRate: bigint) => { diff --git a/lib/protocol/helpers/staking.ts b/lib/protocol/helpers/staking.ts index 0354ef00ce..c27c00d5de 100644 --- a/lib/protocol/helpers/staking.ts +++ b/lib/protocol/helpers/staking.ts @@ -1,16 +1,33 @@ import { ethers, ZeroAddress } from "ethers"; -import { BigIntMath, certainAddress, ether, impersonate, log } from "lib"; -import { TOTAL_BASIS_POINTS } from "lib/constants"; - -import { ZERO_HASH } from "test/deploy"; +import { + BigIntMath, + certainAddress, + ether, + impersonate, + log, + ONE_GWEI, + StakingModuleStatus, + TOTAL_BASIS_POINTS, +} from "lib"; + +import { ZERO_HASH } from "test/suite"; import { ProtocolContext } from "../types"; -import { report } from "./accounting"; +import { report, submitReportDataWithConsensusAndEmptyExtraData } from "./accounting"; const DEPOSIT_SIZE = ether("32"); +export type StakingModuleBalances = { + validatorsBalanceGwei: bigint; +}; + +export type ModuleAccountingReportParams = { + stakingModuleIdsWithUpdatedBalance: bigint[]; + validatorBalancesGweiByStakingModule: bigint[]; +}; + export const unpauseStaking = async (ctx: ProtocolContext) => { const { lido } = ctx.contracts; if (await lido.isStakingPaused()) { @@ -21,12 +38,6 @@ export const unpauseStaking = async (ctx: ProtocolContext) => { } }; -export enum StakingModuleStatus { - Active = 0, - DepositsPaused = 1, - Stopped = 2, -} - export const getStakingModuleStatuses = async ( ctx: ProtocolContext, ): Promise<{ [moduleId: number]: StakingModuleStatus }> => { @@ -54,6 +65,41 @@ export const getStakingModuleManagerSigner = async (ctx: ProtocolContext) => { return await impersonate(await stakingRouter.getRoleMember(role, 0n), ether("100000")); }; +export const getStakingModuleBalances = async ( + ctx: ProtocolContext, + moduleId: bigint, +): Promise => { + const [validatorsBalanceGwei] = await ctx.contracts.stakingRouter.getStakingModuleStateAccounting(moduleId); + return { validatorsBalanceGwei }; +}; + +const buildModuleAccountingReportParams = async ( + ctx: ProtocolContext, + { + validatorsDeltaGweiByModule = new Map(), + }: { + validatorsDeltaGweiByModule?: Map; + } = {}, +): Promise => { + const { stakingRouter } = ctx.contracts; + + const stakingModuleIds = await stakingRouter.getStakingModuleIds(); + // Router balance reporting now requires all registered modules in router order. + const stakingModuleIdsWithUpdatedBalance = [...stakingModuleIds]; + const validatorBalancesGweiByStakingModule: bigint[] = []; + + for (const moduleId of stakingModuleIds) { + const [currentValidatorsBalanceGwei] = await stakingRouter.getStakingModuleStateAccounting(moduleId); + const validatorsBalanceGwei = currentValidatorsBalanceGwei + (validatorsDeltaGweiByModule.get(moduleId) ?? 0n); + validatorBalancesGweiByStakingModule.push(validatorsBalanceGwei); + } + + return { + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + }; +}; + export const setModuleStakeShareLimit = async (ctx: ProtocolContext, moduleId: bigint, stakeShareLimit: bigint) => { const { stakingRouter } = ctx.contracts; @@ -120,13 +166,127 @@ export const setStakingLimit = async ( await acl.connect(agentSigner).revokePermission(agentAddress, lido.address, role); }; +export const ensureCanDeposit = async (ctx: ProtocolContext) => { + const { accountingOracle } = ctx.contracts; + const { extraDataSubmitted } = await accountingOracle.getProcessingState(); + const getLastProcessingRefSlot = await accountingOracle.getLastProcessingRefSlot(); + + if (!extraDataSubmitted && getLastProcessingRefSlot != 0n) { + // send dummy report to shift last processing ref slot and unlock deposits + await report(ctx, { skipWithdrawals: true }); + } +}; + +const depositValidatorsViaRouter = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { + const { depositSecurityModule, stakingRouter } = ctx.contracts; + + const managerSigner = await getStakingModuleManagerSigner(ctx); + if (!managerSigner) { + throw new Error("staking module manager signer is required for deposit setup"); + } + + const moduleConfig = await stakingRouter.getStakingModule(moduleId); + const shouldRestoreMaxDepositsPerBlock = moduleConfig.maxDepositsPerBlock > depositsCount; + + if (shouldRestoreMaxDepositsPerBlock) { + await stakingRouter + .connect(managerSigner) + .updateStakingModule( + moduleId, + moduleConfig.stakeShareLimit, + moduleConfig.priorityExitShareThreshold, + moduleConfig.stakingModuleFee, + moduleConfig.treasuryFee, + depositsCount, + moduleConfig.minDepositBlockDistance, + ); + } + + try { + const dsmSigner = await impersonate(await depositSecurityModule.getAddress(), ether("1")); + await stakingRouter.connect(dsmSigner).deposit(moduleId, ZERO_HASH); + } finally { + if (shouldRestoreMaxDepositsPerBlock) { + await stakingRouter + .connect(managerSigner) + .updateStakingModule( + moduleId, + moduleConfig.stakeShareLimit, + moduleConfig.priorityExitShareThreshold, + moduleConfig.stakingModuleFee, + moduleConfig.treasuryFee, + moduleConfig.maxDepositsPerBlock, + moduleConfig.minDepositBlockDistance, + ); + } + } +}; + +export const depositValidatorsWithoutReport = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { + const { lido } = ctx.contracts; + + const ethToDeposit = depositsCount * DEPOSIT_SIZE; + const depositableEther = await lido.getDepositableEther(); + if (depositableEther < ethToDeposit) { + throw new Error(`Not enough depositable ether for staking module ${moduleId}`); + } + + await ensureCanDeposit(ctx); + await setModuleStakeShareLimit(ctx, moduleId, TOTAL_BASIS_POINTS); + + const { validatorsBalanceGwei: validatorsBefore } = await getStakingModuleBalances(ctx, moduleId); + const depositedBefore = (await lido.getBalanceStats()).depositedSinceLastReport; + + await depositValidatorsViaRouter(ctx, moduleId, depositsCount); + + const { validatorsBalanceGwei: validatorsAfter } = await getStakingModuleBalances(ctx, moduleId); + const { depositedSinceLastReport } = await lido.getBalanceStats(); + + if (depositedSinceLastReport - depositedBefore !== ethToDeposit) { + throw new Error(`Deposited ${depositedSinceLastReport - depositedBefore} wei, expected ${ethToDeposit}`); + } + + if (validatorsAfter !== validatorsBefore) { + throw new Error(`Validators balance changed before report: ${validatorsAfter} != ${validatorsBefore}`); + } +}; + +export const getCurrentModuleAccountingReportParams = async ( + ctx: ProtocolContext, +): Promise => { + return buildModuleAccountingReportParams(ctx); +}; + +export const seedProtocolPendingBaseline = async ( + ctx: ProtocolContext, + moduleId: bigint, + depositsCount: bigint = 1n, +) => { + await depositValidatorsWithoutReport(ctx, moduleId, depositsCount); + + const { depositedSinceLastReport } = await ctx.contracts.lido.getBalanceStats(); + const { data } = await report(ctx, { + clDiff: depositedSinceLastReport, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + ...(await buildModuleAccountingReportParams(ctx)), + }); + + const pendingBaselineGwei = depositedSinceLastReport / ONE_GWEI; + return submitReportDataWithConsensusAndEmptyExtraData(ctx, { + ...data, + clValidatorsBalanceGwei: BigInt(data.clValidatorsBalanceGwei) - pendingBaselineGwei, + clPendingBalanceGwei: pendingBaselineGwei, + }); +}; + export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: bigint, depositsCount: bigint) => { - const { lido, depositSecurityModule, withdrawalQueue, stakingRouter } = ctx.contracts; + const { lido, withdrawalQueue, stakingRouter } = ctx.contracts; const ethToDeposit = depositsCount * DEPOSIT_SIZE; const submitValue = (await withdrawalQueue.unfinalizedStETH()) + ethToDeposit; const ethHolder = await impersonate(certainAddress("provision:eth:whale"), submitValue + ether("1")); - const dsmSigner = await impersonate(depositSecurityModule.address, ether("100000")); const managerSigner = await getStakingModuleManagerSigner(ctx); await lido.connect(ethHolder).submit(ZeroAddress, { value: submitValue }); @@ -137,7 +297,7 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: } const isMaxDepositsCountNotEnough = async () => { - const maxDepositsCount = await stakingRouter.getStakingModuleMaxDepositsCount(moduleId, depositableEther); + const maxDepositsCount = await stakingRouter.getStakingModuleMaxDepositsCount(moduleId, ethToDeposit); return maxDepositsCount < depositsCount; }; @@ -159,12 +319,19 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: throw new Error(`Not enough max deposits count for staking module ${moduleId}`); } - const numDepositedBefore = (await lido.getBeaconStat()).depositedValidators; + const getTotalDepositedValidators = async () => { + const moduleDigests = await stakingRouter.getAllStakingModuleDigests(); + return moduleDigests.reduce((sum, digest) => sum + digest.summary.totalDepositedValidators, 0n); + }; + + const numDepositedBefore = await getTotalDepositedValidators(); + + await ensureCanDeposit(ctx); - // Deposit validators - await lido.connect(dsmSigner).deposit(depositsCount, moduleId, ZERO_HASH); + // Deposit validators via StakingRouter (DSM calls SR which pulls ETH from Lido) + await depositValidatorsViaRouter(ctx, moduleId, depositsCount); - const numDepositedAfter = (await lido.getBeaconStat()).depositedValidators; + const numDepositedAfter = await getTotalDepositedValidators(); if (numDepositedAfter !== numDepositedBefore + depositsCount) { throw new Error(`Deposited ${numDepositedAfter} validators, expected ${numDepositedBefore + depositsCount}`); @@ -177,28 +344,30 @@ export const depositAndReportValidators = async (ctx: ProtocolContext, moduleId: await stakingRouter.connect(managerSigner).setStakingModuleStatus(mId, originalStatus); } - const before = await lido.getBeaconStat(); + const before = await lido.getBalanceStats(); log.debug("Validators on beacon chain before provisioning", { "Module ID to deposit": moduleId, - "Deposited": before.depositedValidators, - "Total": before.beaconValidators, - "Balance": before.beaconBalance, + "Deposited": before.depositedSinceLastReport, + "Active": before.clValidatorsBalanceAtLastReport, + "Pending": before.clPendingBalanceAtLastReport, }); // Add new validators to beacon chain + const validatorsDeltaGweiByModule = new Map([[moduleId, ethToDeposit / ONE_GWEI]]); await report(ctx, { clDiff: ethToDeposit, clAppearedValidators: depositsCount, skipWithdrawals: true, + ...(await buildModuleAccountingReportParams(ctx, { validatorsDeltaGweiByModule })), }); - const after = await lido.getBeaconStat(); + const after = await lido.getBalanceStats(); log.debug("Validators on beacon chain after depositing", { "Module ID deposited": moduleId, - "Deposited": after.depositedValidators, - "Total": after.beaconValidators, - "Balance": after.beaconBalance, + "Deposited": after.depositedSinceLastReport, + "Active": after.clValidatorsBalanceAtLastReport, + "Pending": after.clPendingBalanceAtLastReport, }); }; diff --git a/lib/protocol/networks.ts b/lib/protocol/networks.ts index f67bf96721..1ea292c060 100644 --- a/lib/protocol/networks.ts +++ b/lib/protocol/networks.ts @@ -81,6 +81,9 @@ const defaultEnv = { stakingVaultFactory: "STAKING_VAULT_FACTORY_ADDRESS", stakingVaultBeacon: "STAKING_VAULT_BEACON_ADDRESS", validatorConsolidationRequests: "VALIDATOR_CONSOLIDATION_REQUESTS_ADDRESS", + // consolidation + consolidationBus: "CONSOLIDATION_BUS_ADDRESS", + consolidationMigrator: "CONSOLIDATION_MIGRATOR_ADDRESS", } as ProtocolNetworkItems; const getPrefixedEnv = (prefix: string, obj: ProtocolNetworkItems) => @@ -101,6 +104,8 @@ async function getLocalNetworkConfig(network: string, source: "fork" | "scratch" stakingVaultBeacon: config[Sk.stakingVaultBeacon].address, operatorGrid: config[Sk.operatorGrid].proxy.address, validatorConsolidationRequests: config[Sk.validatorConsolidationRequests].address, + consolidationBus: config[Sk.consolidationBus].proxy.address, + consolidationMigrator: config[Sk.consolidationMigrator].proxy.address, }; return new ProtocolNetworkConfig(getPrefixedEnv(network.toUpperCase(), defaultEnv), defaults, `${network}-${source}`); } @@ -146,6 +151,7 @@ async function getForkingNetworkConfig(): Promise { export async function getNetworkConfig(network: string): Promise { switch (network) { case "hardhat": + case "localhost": if (getMode() === "scratch") { return getLocalNetworkConfig(network, "scratch"); } diff --git a/lib/protocol/provision.ts b/lib/protocol/provision.ts index 45f5f88527..36902cdc7f 100644 --- a/lib/protocol/provision.ts +++ b/lib/protocol/provision.ts @@ -1,3 +1,5 @@ +import { ZeroAddress } from "ethers"; + import { certainAddress, ether, impersonate, log } from "lib"; import { ensureEIP4788BeaconBlockRootContractPresent, @@ -20,6 +22,30 @@ import { ProtocolContext } from "./types"; let alreadyProvisioned = false; +const ensureNonZeroDepositsReserveTarget = async (ctx: ProtocolContext, target: bigint = ether("8")) => { + const { acl, lido } = ctx.contracts; + if ((await lido.getDepositsReserveTarget()) > 0n) return; + + const role = await lido.BUFFER_RESERVE_MANAGER_ROLE(); + const agent = await ctx.getSigner("agent"); + const hasRole = await acl["hasPermission(address,address,bytes32)"](agent.address, lido.address, role); + if (!hasRole) { + const permissionManager = await acl.getPermissionManager(lido.address, role); + if (permissionManager === ZeroAddress) { + const voting = await ctx.getSigner("voting"); + await acl.connect(voting).createPermission(agent.address, lido.address, role, agent.address); + } else { + if (permissionManager.toLowerCase() !== agent.address.toLowerCase()) { + throw new Error(`BUFFER_RESERVE_MANAGER_ROLE manager must be agent, got: ${permissionManager}`); + } + await acl.connect(agent).grantPermission(agent.address, lido.address, role); + } + } + + await lido.connect(agent).setDepositsReserveTarget(target); + log.debug("Set non-zero deposits reserve target", { target: target.toString() }); +}; + /** * In order to make the protocol fully operational from scratch deploy, the additional steps are required: */ @@ -56,6 +82,7 @@ export const provision = async (ctx: ProtocolContext) => { // await ethHolder.sendTransaction({ to: ctx.contracts.lido.address, value: ether("100000") }); await ensureStakeLimit(ctx); + await ensureNonZeroDepositsReserveTarget(ctx); await ensureDsmGuardians(ctx, 3n, 2n); diff --git a/lib/protocol/types.ts b/lib/protocol/types.ts index c4de561d2e..2d61740e78 100644 --- a/lib/protocol/types.ts +++ b/lib/protocol/types.ts @@ -7,6 +7,9 @@ import { AccountingOracle, ACL, Burner, + ConsolidationBus, + ConsolidationGateway, + ConsolidationMigrator, DepositSecurityModule, HashConsensus, ICSModule, @@ -56,6 +59,9 @@ export type ProtocolNetworkItems = { validatorExitDelayVerifier: string; validatorsExitBusOracle: string; triggerableWithdrawalsGateway: string; + consolidationGateway: string; + consolidationBus: string; + consolidationMigrator: string; withdrawalQueue: string; withdrawalVault: string; oracleDaemonConfig: string; @@ -102,6 +108,9 @@ export interface ContractTypes { ICSModule: ICSModule; WstETH: WstETH; TriggerableWithdrawalsGateway: TriggerableWithdrawalsGateway; + ConsolidationGateway: ConsolidationGateway; + ConsolidationBus: ConsolidationBus; + ConsolidationMigrator: ConsolidationMigrator; VaultFactory: VaultFactory; UpgradeableBeacon: UpgradeableBeacon; VaultHub: VaultHub; @@ -136,6 +145,9 @@ export type CoreContracts = { oracleDaemonConfig: LoadedContract; wstETH: LoadedContract; triggerableWithdrawalsGateway: LoadedContract; + consolidationGateway: LoadedContract; + consolidationBus: LoadedContract; + consolidationMigrator: LoadedContract; }; export type AragonContracts = { diff --git a/lib/state-file.ts b/lib/state-file.ts index b16dda074f..608127a39c 100644 --- a/lib/state-file.ts +++ b/lib/state-file.ts @@ -98,6 +98,9 @@ export enum Sk { // Triggerable withdrawals validatorExitDelayVerifier = "validatorExitDelayVerifier", triggerableWithdrawalsGateway = "triggerableWithdrawalsGateway", + consolidationGateway = "consolidationGateway", + consolidationBus = "consolidationBus", + consolidationMigrator = "consolidationMigrator", // Vaults predepositGuarantee = "predepositGuarantee", stakingVaultImplementation = "stakingVaultImplementation", @@ -110,10 +113,14 @@ export enum Sk { operatorGrid = "operatorGrid", validatorConsolidationRequests = "validatorConsolidationRequests", lazyOracle = "lazyOracle", + topUpGateway = "topUpGateway", v3TemporaryAdmin = "v3TemporaryAdmin", // Dual Governance dgDualGovernance = "dg:dualGovernance", dgEmergencyProtectedTimelock = "dg:emergencyProtectedTimelock", + depositsTempStorage = "depositsTempStorage", + beaconChainDepositor = "beaconChainDepositor", + srLib = "srLib", // Easy Track easyTrack = "easyTrack", easyTrackEVMScriptExecutor = "easyTrackEVMScriptExecutor", @@ -152,6 +159,7 @@ export function getAddress(contractKey: Sk, state: DeploymentState): string { case Sk.vaultHub: case Sk.dgDualGovernance: case Sk.dgEmergencyProtectedTimelock: + case Sk.topUpGateway: return state[contractKey].proxy.address; case Sk.apmRegistryFactory: case Sk.callsScript: @@ -180,10 +188,16 @@ export function getAddress(contractKey: Sk, state: DeploymentState): string { case Sk.tokenRebaseNotifierV3: case Sk.validatorExitDelayVerifier: case Sk.triggerableWithdrawalsGateway: + case Sk.consolidationGateway: + case Sk.consolidationBus: + case Sk.consolidationMigrator: case Sk.stakingVaultFactory: case Sk.minFirstAllocationStrategy: case Sk.validatorConsolidationRequests: case Sk.v3VoteScript: + case Sk.depositsTempStorage: + case Sk.beaconChainDepositor: + case Sk.vaultsAdapter: case Sk.easyTrack: case Sk.gateSealFactory: return state[contractKey].address; diff --git a/lib/storage.ts b/lib/storage.ts index 627559b018..398c935fcf 100644 --- a/lib/storage.ts +++ b/lib/storage.ts @@ -4,6 +4,13 @@ import { getStorageAt } from "@nomicfoundation/hardhat-network-helpers"; import { streccak } from "lib"; +const MASK_128_BITS = (1n << 128n) - 1n; + +export type Uint128Pair = { + low: bigint; + high: bigint; +}; + /** * @dev Get the storage at a given position for a given contract * @param contract - The contract to get the storage at @@ -13,3 +20,29 @@ import { streccak } from "lib"; export async function getStorageAtPosition(contract: AddressLike, positionTag: string): Promise { return getStorageAt(await resolveAddress(contract), streccak(positionTag)); } + +/** + * @dev Splits a uint256 slot value into low/high uint128 parts. + * @param value - Raw value returned by getStorageAtPosition (hex string or bigint) + * @returns Parsed low and high 128-bit values + */ +export function splitStorageUint256ToUint128Pair(value: string | bigint): Uint128Pair { + const rawValue = typeof value === "bigint" ? value : BigInt(value); + return { + low: rawValue & MASK_128_BITS, + high: rawValue >> 128n, + }; +} + +/** + * @dev Reads storage at a tagged position and returns low/high uint128 parts. + * @param contract - The contract to read storage from + * @param positionTag - The tag of the position to read + * @returns Parsed low and high 128-bit values + */ +export async function getStorageAtPositionAsUint128Pair( + contract: AddressLike, + positionTag: string, +): Promise { + return splitStorageUint256ToUint128Pair(await getStorageAtPosition(contract, positionTag)); +} diff --git a/lib/top-ups.ts b/lib/top-ups.ts new file mode 100644 index 0000000000..ca901b505a --- /dev/null +++ b/lib/top-ups.ts @@ -0,0 +1,45 @@ +import { ethers } from "hardhat"; + +import { SSZBLSHelpers, SSZValidatorsMerkleTree } from "typechain-types"; + +import { generateValidator } from "lib"; + +const DEFAULT_GI_VALIDATOR_0 = "0x0000000000000000000000000000000000000000000000000096000000000028"; + +export const prepareLocalMerkleTree = async (giValidator0: string = DEFAULT_GI_VALIDATOR_0) => { + // deploy helper tree validators+balances + const stateTree: SSZValidatorsMerkleTree = await ethers.deployContract("SSZValidatorsMerkleTree", [giValidator0], {}); + + // generate first validator + const firstValidator = generateValidator(); + + await stateTree.addValidatorsLeaf(firstValidator.container); + + // Index of first validator leafCount-1 + const validatorsLeafCount = await stateTree.validatorsLeafCount(); + + const firstValidatorLeafIndex = validatorsLeafCount - 1n; + + // generalized for validators[firstValidatorLeafIndex] + const gIFirstValidator = await stateTree.getValidatorGeneralizedIndex(firstValidatorLeafIndex); + if (BigInt(gIFirstValidator) >> 8n === 0n) throw new Error("Broken GIndex setup"); + + const addValidator = async (validator: SSZBLSHelpers.ValidatorStruct) => { + await stateTree.addValidatorsLeaf(validator); + + const newValidatorsLeafCount = await stateTree.validatorsLeafCount(); + const validatorIndex = Number(newValidatorsLeafCount - 1n - firstValidatorLeafIndex); + + return { + validatorIndex, + }; + }; + + return { + stateTree, + gIFirstValidator, + firstValidatorLeafIndex, + firstValidator, + addValidator, + }; +}; diff --git a/lib/wc.ts b/lib/wc.ts new file mode 100644 index 0000000000..d392ca3207 --- /dev/null +++ b/lib/wc.ts @@ -0,0 +1,36 @@ +import { + MAX_EFFECTIVE_BALANCE_WC_TYPE_01, + MAX_EFFECTIVE_BALANCE_WC_TYPE_02, + WithdrawalCredentialsType, +} from "./constants"; +import { de0x, en0x, randomString } from "./string"; + +/** + * Returns the max effective balance for the given withdrawal credentials type + */ +export const wcTypeMaxEB = (withdrawalType: WithdrawalCredentialsType): bigint => { + switch (withdrawalType) { + case WithdrawalCredentialsType.WC0x01: + return MAX_EFFECTIVE_BALANCE_WC_TYPE_01; + case WithdrawalCredentialsType.WC0x02: + return MAX_EFFECTIVE_BALANCE_WC_TYPE_02; + default: { + const _exhaustive: never = withdrawalType; + return _exhaustive; + } + } +}; + +/** + * Generates random Winthdrawal Credentials of type 0x01 + */ +export const randomWCType1 = () => { + return en0x(WithdrawalCredentialsType.WC0x01) + de0x(randomString(31)); +}; + +/** + * Generates random Winthdrawal Credentials of type 0x02 + */ +export const randomWCType2 = () => { + return en0x(WithdrawalCredentialsType.WC0x02) + de0x(randomString(31)); +}; diff --git a/package.json b/package.json index 7dc605ffff..8440b8a2ba 100644 --- a/package.json +++ b/package.json @@ -18,10 +18,10 @@ "format": "prettier . --check", "format:fix": "prettier . --write", "check": "yarn lint && yarn format && yarn typecheck", - "test": "hardhat test test/**/*.test.ts --parallel", + "test": "NODE_OPTIONS='--max-old-space-size=10240' hardhat test test/**/*.test.ts --parallel", "test:bls:blst": "SKIP_INTERFACES_CHECK=true SKIP_LINT_SOLIDITY=true SKIP_GAS_REPORT=true hardhat test test/common/bls.blst.e2e.fuzz.test.ts", "test:forge": "forge test", - "test:coverage": "COVERAGE=unit hardhat coverage", + "test:coverage": "NODE_OPTIONS='--max-old-space-size=16384' COVERAGE=unit hardhat coverage", "test:coverage:integration": "COVERAGE=integration MODE=scratch hardhat coverage", "test:coverage:full": "COVERAGE=full MODE=scratch hardhat coverage", "test:sequential": "hardhat test test/**/*.test.ts", @@ -39,6 +39,7 @@ "test:integration:scratch:trace": "MODE=scratch hardhat test test/integration/**/*.ts --trace --disabletracer", "test:integration:scratch:fulltrace": "MODE=scratch hardhat test test/integration/**/*.ts --fulltrace --disabletracer", "test:integration:fork:local": "MODE=scratch hardhat test test/integration/**/*.ts --network local", + "scratch:deploy:localhost": "SKIP_INTERFACES_CHECK=true SKIP_CONTRACT_SIZE=true SKIP_GAS_REPORT=true GENESIS_TIME=1639659600 GAS_PRIORITY_FEE=1 GAS_MAX_FEE=100 DEPLOYER=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 MODE=scratch hardhat run scripts/scratch/scratch-deploy.ts --network localhost", "test:fork:pdg-validator": "MODE=forking hardhat test test/integration/vaults/scenario/pdg-specific-validator.integration.ts", "validate:configs": "yarn hardhat validate-configs", "typecheck": "tsc --noEmit", diff --git a/remappings.txt b/remappings.txt new file mode 100644 index 0000000000..4b57a1c03c --- /dev/null +++ b/remappings.txt @@ -0,0 +1,9 @@ +@aragon/=node_modules/@aragon/ +@openzeppelin/=node_modules/@openzeppelin/ +ens/=node_modules/@aragon/os/contracts/lib/ens/ +forge-std/=foundry/lib/forge-std/src/ +hardhat/=node_modules/hardhat/ +math/=node_modules/@aragon/os/contracts/lib/math/ +misc/=node_modules/@aragon/os/contracts/lib/misc/ +openzeppelin-solidity/=node_modules/openzeppelin-solidity/ +token/=node_modules/@aragon/os/contracts/lib/token/ diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts index 3096a7067c..d0763af7a8 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc2.ts @@ -118,6 +118,7 @@ export async function main(): Promise { oracleDaemonConfig: await locator.oracleDaemonConfig(), validatorExitDelayVerifier: await locator.validatorExitDelayVerifier(), triggerableWithdrawalsGateway: await locator.triggerableWithdrawalsGateway(), + consolidationGateway: await locator.consolidationGateway(), accounting: await locator.accounting(), predepositGuarantee: await locator.predepositGuarantee(), wstETH: wstethAddress, @@ -125,6 +126,7 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts index 04bf18628a..3df8d8aa9f 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc3.ts @@ -108,6 +108,8 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + consolidationGateway: await locator.consolidationGateway(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts index 6562b48993..5e25aa7718 100644 --- a/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts +++ b/scripts/archive/stvaults-hoodi/upgrade/steps/0100-upgrade-hoodi-to-v3-rc5.ts @@ -103,6 +103,8 @@ export async function main(): Promise { vaultFactory: newVaultFactoryAddress, lazyOracle: await locator.lazyOracle(), operatorGrid: await locator.operatorGrid(), + consolidationGateway: await locator.consolidationGateway(), + topUpGateway: await locator.topUpGateway(), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); const newLocatorAddress = await lidoLocatorImpl.getAddress(); diff --git a/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts b/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts index 891a3e6f54..e29ca6db60 100644 --- a/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts +++ b/scripts/archive/stvaults/steps/0100-deploy-v3-contracts.ts @@ -62,6 +62,8 @@ export async function main() { const accounting = await deployBehindOssifiableProxy(Sk.accounting, "Accounting", proxyContractsOwner, deployer, [ locatorAddress, lidoAddress, + Number(chainSpec.secondsPerSlot), + Number(chainSpec.genesisTime), ]); // @@ -311,6 +313,20 @@ export async function main() { ]); console.log("VaultFactory address", await vaultFactory.getAddress()); + const consolidationGateway = await deployWithoutProxy(Sk.consolidationGateway, "ConsolidationGateway", deployer, [ + agentAddress, // TODO: check + locator.address, + // ToDo: Replace dummy parameters with real ones + 10, // maxConsolidationRequestsLimit, + 1, // consolidationsPerFrame, + 60, // frameDurationInSec + pdgDeployParams.gIndex, // gIFirstValidatorPrev + pdgDeployParams.gIndexAfterChange, // gIFirstValidatorCurr + pdgDeployParams.changeSlot, // pivotSlot + ]); + + console.log("ConsolidationGateway address", await consolidationGateway.getAddress()); + // // Deploy new LidoLocator implementation // @@ -330,6 +346,7 @@ export async function main() { oracleDaemonConfig: await locator.oracleDaemonConfig(), validatorExitDelayVerifier: getAddress(Sk.validatorExitDelayVerifier, state), triggerableWithdrawalsGateway: getAddress(Sk.triggerableWithdrawalsGateway, state), + consolidationGateway: consolidationGateway.address, accounting: accounting.address, predepositGuarantee: predepositGuarantee.address, wstETH: wstethAddress, @@ -337,6 +354,7 @@ export async function main() { vaultFactory: vaultFactory.address, lazyOracle: lazyOracle.address, operatorGrid: operatorGrid.address, + topUpGateway: getAddress(Sk.topUpGateway, state), }; const lidoLocatorImpl = await deployImplementation(Sk.lidoLocator, "LidoLocator", deployer, [locatorConfig]); diff --git a/scripts/defaults/local-devnet-defaults.json b/scripts/defaults/local-devnet-defaults.json index 3bacca983d..57f21c30a9 100644 --- a/scripts/defaults/local-devnet-defaults.json +++ b/scripts/defaults/local-devnet-defaults.json @@ -111,19 +111,20 @@ }, "oracleReportSanityChecker": { "deployParameters": { - "exitedValidatorsPerDayLimit": 1500, - "appearedValidatorsPerDayLimit": 1500, + "exitedEthAmountPerDayLimit": 57600, + "appearedEthAmountPerDayLimit": 57600, "deprecatedOneOffCLBalanceDecreaseBPLimit": 500, "annualBalanceIncreaseBPLimit": 1000, "simulatedShareRateDeviationBPLimit": 250, - "maxValidatorExitRequestsPerReport": 2000, + "maxBalanceExitRequestedPerReportInEth": 64000, "maxItemsPerExtraDataTransaction": 8, "maxNodeOperatorsPerExtraDataItem": 24, "requestTimestampMargin": 128, "maxPositiveTokenRebase": 5000000, - "initialSlashingAmountPWei": 1000, - "inactivityPenaltiesAmountPWei": 101, - "clBalanceOraclesErrorUpperBPLimit": 50 + "maxCLBalanceDecreaseBP": 360, + "clBalanceOraclesErrorUpperBPLimit": 50, + "consolidationEthAmountPerDayLimit": 57600, + "exitedValidatorEthAmountLimit": 32 } }, "oracleDaemonConfig": { diff --git a/scripts/scratch/deploy-params-testnet.toml b/scripts/scratch/deploy-params-testnet.toml index 86e7c04b48..5138e680d9 100644 --- a/scripts/scratch/deploy-params-testnet.toml +++ b/scripts/scratch/deploy-params-testnet.toml @@ -102,19 +102,22 @@ pauseIntentValidityPeriodBlocks = 6646 # Pause intent validity period in blocks # Oracle report sanity checker configuration [oracleReportSanityChecker] -exitedValidatorsPerDayLimit = 1500 # Exited validators per day limit -appearedValidatorsPerDayLimit = 1500 # Appeared validators per day limit +exitedEthAmountPerDayLimit = 57600 # Exited ETH amount per day limit +appearedEthAmountPerDayLimit = 57600 # Appeared ETH amount per day limit deprecatedOneOffCLBalanceDecreaseBPLimit = 500 # Deprecated one-off CL balance decrease limit (BP) annualBalanceIncreaseBPLimit = 1000 # Annual balance increase limit (BP) simulatedShareRateDeviationBPLimit = 250 # Simulated share rate deviation limit (BP) -maxValidatorExitRequestsPerReport = 2000 # Maximum validator exit requests per report +maxBalanceExitRequestedPerReportInEth = 64000 # Maximum exit ETH per report +maxEffectiveBalanceWeightWCType01 = 32 # maxEB equivalent weight for WC type 1 +maxEffectiveBalanceWeightWCType02 = 2048 # maxEB equivalent weight for WC type 2 maxItemsPerExtraDataTransaction = 8 # Maximum items per extra data transaction maxNodeOperatorsPerExtraDataItem = 24 # Maximum node operators per extra data item requestTimestampMargin = 128 # Request timestamp margin maxPositiveTokenRebase = 5000000 # Maximum positive token rebase -initialSlashingAmountPWei = 1000 # Initial slashing amount (pWei) -inactivityPenaltiesAmountPWei = 101 # Inactivity penalties amount (pWei) +maxCLBalanceDecreaseBP = 360 # Max CL balance decrease over sliding window (BP, 360 = 3.6%) clBalanceOraclesErrorUpperBPLimit = 50 # CL balance oracles error upper limit (BP) +consolidationEthAmountPerDayLimit = 57600 # Consolidation ETH amount per day limit +exitedValidatorEthAmountLimit = 32 # Exited validator ETH amount limit in ETH units # Oracle daemon configuration [oracleDaemonConfig] @@ -169,6 +172,24 @@ maxExitRequestsLimit = 13000 # Maximum number of exit requests that can exitsPerFrame = 1 # Number of exits processed per frame frameDurationInSec = 48 # Duration of each processing frame in seconds +[consolidationGateway] +maxConsolidationRequestsLimit = 8000 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 48 # Duration of each processing frame in seconds +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (before fork) +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for first validator (after fork) +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +[consolidationBus] +initialBatchSize = 350 # Max number of requests in a batch +initialMaxGroupsInBatch = 10 # Max source groups in a batch +initialExecutionDelay = 0 # Delay before pending batch execution + +[consolidationMigrator] +sourceModuleId = 1 # Source staking module ID +targetModuleId = 1 # Target staking module ID, for scratch deploy testing, we use moduleId=1 which corresponds to NOR. + + # Predeposit guarantee configuration for validator deposit guarantees [predepositGuarantee] gIndex = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for state verification @@ -185,3 +206,20 @@ forcedRebalanceThresholdBP = 4975 # Threshold for forced rebalancing in basi infraFeeBP = 100 # Infrastructure fee in basis points (5%) liquidityFeeBP = 650 # Liquidity provision fee in basis points (4%) reservationFeeBP = 0 # Reservation fee in basis points (1%) + +# Top-up gateway configuration for validator top-ups via Merkle proofs +[topUpGateway] +maxValidatorsPerTopUp = 100 # Maximum validators per top-up call +minBlockDistance = 1 # Minimum block distance between top-ups +maxRootAge = 300 # Maximum allowed age of beacon root relative to current block timestamp +targetBalanceGwei = 2046750000000 +minTopUpGwei = 1000000000 +# Generalized indices for validator/balance/pending state verification +gIFirstValidatorPrev = "0x0000000000000000000000000000000000000000000000000096000000000028" +gIFirstValidatorCurr = "0x0000000000000000000000000000000000000000000000000096000000000028" +pivotSlot = 0 # Pivot slot for fork-aware gIndex selection + +# StakingRouter configuration +[stakingRouter] +maxEBType1 = "32000000000000000000" # Max EB value for WC type 1 +maxEBType2 = "2048000000000000000000" # Max EB value for WC type 2 diff --git a/scripts/scratch/scratch-deploy.ts b/scripts/scratch/scratch-deploy.ts new file mode 100644 index 0000000000..3bcf7762f1 --- /dev/null +++ b/scripts/scratch/scratch-deploy.ts @@ -0,0 +1,12 @@ +import { getProtocolContext } from "lib/protocol/context"; + +async function main() { + console.log("Starting scratch deploy..."); + await getProtocolContext(); + console.log("Scratch deploy complete!"); +} + +main().catch((error) => { + console.error(error); + process.exit(1); +}); diff --git a/scripts/scratch/steps/0020-deploy-aragon-env.ts b/scripts/scratch/steps/0020-deploy-aragon-env.ts index f7436fdae6..57d994c114 100644 --- a/scripts/scratch/steps/0020-deploy-aragon-env.ts +++ b/scripts/scratch/steps/0020-deploy-aragon-env.ts @@ -147,7 +147,7 @@ export async function main() { if (state[Sk.miniMeTokenFactory].address) { log(`Using pre-deployed MiniMeTokenFactory: ${cy(state[Sk.miniMeTokenFactory].address)}`); } else { - await deployWithoutProxy(Sk.miniMeTokenFactory, "MiniMeTokenFactory", deployer, [], "address", true, { + await deployWithoutProxy(Sk.miniMeTokenFactory, "MiniMeTokenFactory", deployer, [], "address", true, undefined, { contractName: "MiniMeTokenFactory", }); } diff --git a/scripts/scratch/steps/0083-deploy-core.ts b/scripts/scratch/steps/0083-deploy-core.ts index f09153734a..5c0ddd5eff 100644 --- a/scripts/scratch/steps/0083-deploy-core.ts +++ b/scripts/scratch/steps/0083-deploy-core.ts @@ -1,6 +1,13 @@ import { ethers } from "hardhat"; -import { StakingRouter, TriggerableWithdrawalsGateway } from "typechain-types"; +import { + ConsolidationBus, + ConsolidationGateway, + ConsolidationMigrator, + StakingRouter, + TopUpGateway, + TriggerableWithdrawalsGateway, +} from "typechain-types"; import { getContractPath, loadContract } from "lib/contract"; import { @@ -10,6 +17,8 @@ import { deployWithoutProxy, makeTx, } from "lib/deploy"; +import { EIP7002_ADDRESS } from "lib/eips/eip7002"; +import { EIP7251_ADDRESS } from "lib/eips/eip7251"; import { log } from "lib/log"; import { readNetworkState, Sk, updateObjectInState } from "lib/state-file"; import { en0x } from "lib/string"; @@ -37,8 +46,8 @@ export async function main() { const hashConsensusForAccountingParams = state[Sk.hashConsensusForAccountingOracle].deployParameters; const hashConsensusForExitBusParams = state[Sk.hashConsensusForValidatorsExitBusOracle].deployParameters; const withdrawalQueueERC721Params = state[Sk.withdrawalQueueERC721].deployParameters; - const minFirstAllocationStrategyAddress = state[Sk.minFirstAllocationStrategy].address; const validatorExitDelayVerifierParams = state[Sk.validatorExitDelayVerifier].deployParameters; + const stakingRouterParams = state[Sk.stakingRouter].deployParameters; const proxyContractsOwner = deployer; const admin = deployer; @@ -149,24 +158,38 @@ export async function main() { // Deploy StakingRouter // + // deploy beacon chain depositor + const beaconChainDepositor = await deployWithoutProxy(Sk.beaconChainDepositor, "BeaconChainDepositor", deployer); + + // deploy SRLib + const minFirstAllocationStrategy = await deployWithoutProxy( + Sk.minFirstAllocationStrategy, + "MinFirstAllocationStrategy", + deployer, + ); + + const srLib = await deployWithoutProxy(Sk.srLib, "SRLib", deployer, [], "address", true, { + libraries: { + MinFirstAllocationStrategy: minFirstAllocationStrategy.address, + }, + }); + const stakingRouter_ = await deployBehindOssifiableProxy( Sk.stakingRouter, "StakingRouter", proxyContractsOwner, deployer, - [depositContract], + [depositContract, lidoAddress, locator.address, stakingRouterParams.maxEBType1, stakingRouterParams.maxEBType2], null, true, { - libraries: { MinFirstAllocationStrategy: minFirstAllocationStrategyAddress }, + libraries: { + BeaconChainDepositor: beaconChainDepositor.address, + SRLib: srLib.address, + }, }, ); - const withdrawalCredentials = `0x010000000000000000000000${withdrawalsManagerProxy.address.slice(2)}`; - const stakingRouterAdmin = deployer; const stakingRouter = await loadContract("StakingRouter", stakingRouter_.address); - await makeTx(stakingRouter, "initialize", [stakingRouterAdmin, lidoAddress, withdrawalCredentials], { - from: deployer, - }); // // Deploy or use predefined DepositSecurityModule @@ -178,7 +201,7 @@ export async function main() { await deployWithoutProxy(Sk.depositSecurityModule, "DepositSecurityModule", deployer, [ lidoAddress, depositContract, - stakingRouter.address, + stakingRouter_.address, depositSecurityModuleParams.pauseIntentValidityPeriodBlocks, depositSecurityModuleParams.maxOperatorsPerUnvetting, ]) @@ -189,14 +212,60 @@ export async function main() { ); } + // + // Deploy TopUpGateway behind OssifiableProxy (before StakingRouter initialization) + // + + const topUpGatewayParams = state[Sk.topUpGateway].deployParameters; + const topUpGateway_ = await deployBehindOssifiableProxy( + Sk.topUpGateway, + "TopUpGateway", + proxyContractsOwner, + deployer, + [ + locator.address, + topUpGatewayParams.gIFirstValidatorPrev, + topUpGatewayParams.gIFirstValidatorCurr, + topUpGatewayParams.pivotSlot, + chainSpec.slotsPerEpoch, + ], + ); + const topUpGateway = await loadContract("TopUpGateway", topUpGateway_.address); + await makeTx( + topUpGateway, + "initialize", + [ + admin, + topUpGatewayParams.maxValidatorsPerTopUp, + topUpGatewayParams.minBlockDistance, + topUpGatewayParams.maxRootAge, + topUpGatewayParams.targetBalanceGwei, + topUpGatewayParams.minTopUpGwei, + ], + { from: deployer }, + ); + + // + // Initialize StakingRouter with all required parameters + // + + const withdrawalCredentials = `0x010000000000000000000000${withdrawalsManagerProxy.address.slice(2)}`; + const stakingRouterAdmin = deployer; + await makeTx(stakingRouter, "initialize", [stakingRouterAdmin, withdrawalCredentials], { from: deployer }); + // // Deploy Accounting // - const accounting = await deployBehindOssifiableProxy(Sk.accounting, "Accounting", proxyContractsOwner, deployer, [ - locator.address, - lidoAddress, - ]); + const accounting = await deployBehindOssifiableProxy( + Sk.accounting, + "Accounting", + proxyContractsOwner, + deployer, + [locator.address, lidoAddress], + null, + true, + ); // // Deploy AccountingOracle and its HashConsensus @@ -306,6 +375,109 @@ export async function main() { { from: deployer }, ); + // + // Deploy Consolidation Gateway + // + + const consolidationGatewayParams = state[Sk.consolidationGateway].deployParameters; + const consolidationGateway_ = await deployWithoutProxy(Sk.consolidationGateway, "ConsolidationGateway", deployer, [ + admin, + locator.address, + consolidationGatewayParams.maxConsolidationRequestsLimit, + consolidationGatewayParams.consolidationsPerFrame, + consolidationGatewayParams.frameDurationInSec, + consolidationGatewayParams.gIFirstValidatorPrev, + consolidationGatewayParams.gIFirstValidatorCurr, + consolidationGatewayParams.pivotSlot, + ]); + + const consolidationGateway = await loadContract( + "ConsolidationGateway", + consolidationGateway_.address, + ); + + // + // Deploy Consolidation Bus + // + + const consolidationBusParams = state[Sk.consolidationBus].deployParameters; + const consolidationBus_ = await deployBehindOssifiableProxy( + Sk.consolidationBus, + "ConsolidationBus", + proxyContractsOwner, + deployer, + [consolidationGateway_.address], + ); + + const consolidationBus = await loadContract("ConsolidationBus", consolidationBus_.address); + + await makeTx( + consolidationBus, + "initialize", + [ + admin, + consolidationBusParams.initialBatchSize, + consolidationBusParams.initialMaxGroupsInBatch, + consolidationBusParams.initialExecutionDelay, + ], + { from: deployer }, + ); + + // Grant MANAGE_ROLE to deployer for testing + await makeTx(consolidationBus, "grantRole", [await consolidationBus.MANAGE_ROLE(), deployer], { from: deployer }); + + // Grant ADD_CONSOLIDATION_REQUEST_ROLE on Gateway to Bus + await makeTx( + consolidationGateway, + "grantRole", + [await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(), consolidationBus_.address], + { from: deployer }, + ); + + // Also grant ADD_CONSOLIDATION_REQUEST_ROLE to deployer for direct testing + await makeTx( + consolidationGateway, + "grantRole", + [await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(), deployer], + { from: deployer }, + ); + + // + // Deploy Consolidation Migrator + // + const consolidationMigratorParams = state[Sk.consolidationMigrator].deployParameters; + + const consolidationMigrator_ = await deployBehindOssifiableProxy( + Sk.consolidationMigrator, + "ConsolidationMigrator", + proxyContractsOwner, + deployer, + [ + stakingRouter_.address, + consolidationBus_.address, + consolidationMigratorParams.sourceModuleId, + consolidationMigratorParams.targetModuleId, + ], + ); + + const consolidationMigrator = await loadContract( + "ConsolidationMigrator", + consolidationMigrator_.address, + ); + + await makeTx(consolidationMigrator, "initialize", [admin], { from: deployer }); + + // Grant ALLOW_PAIR_ROLE to deployer for testing + await makeTx(consolidationMigrator, "grantRole", [await consolidationMigrator.ALLOW_PAIR_ROLE(), deployer], { + from: deployer, + }); + + // Register ConsolidationMigrator as publisher on ConsolidationBus + + await makeTx(consolidationBus, "grantRole", [await consolidationBus.PUBLISH_ROLE(), consolidationMigrator_.address], { + from: deployer, + }); + // // Deploy ValidatorExitDelayVerifier // @@ -344,6 +516,9 @@ export async function main() { lidoAddress, treasuryAddress, triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, ]); await makeTx(withdrawalsManagerProxy, "proxy_upgradeTo", [withdrawalVaultImpl.address, "0x"], { from: deployer }); @@ -365,26 +540,30 @@ export async function main() { // const sanityCheckerParams = state["oracleReportSanityChecker"].deployParameters; - const oracleReportSanityCheckerArgs = [ - locator.address, - accountingOracle.address, - accounting.address, - admin, - [ - sanityCheckerParams.exitedValidatorsPerDayLimit, - sanityCheckerParams.appearedValidatorsPerDayLimit, - sanityCheckerParams.annualBalanceIncreaseBPLimit, - sanityCheckerParams.simulatedShareRateDeviationBPLimit, - sanityCheckerParams.maxValidatorExitRequestsPerReport, - sanityCheckerParams.maxItemsPerExtraDataTransaction, - sanityCheckerParams.maxNodeOperatorsPerExtraDataItem, - sanityCheckerParams.requestTimestampMargin, - sanityCheckerParams.maxPositiveTokenRebase, - sanityCheckerParams.initialSlashingAmountPWei, - sanityCheckerParams.inactivityPenaltiesAmountPWei, - sanityCheckerParams.clBalanceOraclesErrorUpperBPLimit, - ], - ]; + // TODO: set final NEW sanity limits in deploy params before release deployment: + // - exitedEthAmountPerDayLimit + // - appearedEthAmountPerDayLimit + // - consolidationEthAmountPerDayLimit + // - exitedValidatorEthAmountLimit + const sanityLimits = { + exitedEthAmountPerDayLimit: sanityCheckerParams.exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit: sanityCheckerParams.appearedEthAmountPerDayLimit, + annualBalanceIncreaseBPLimit: sanityCheckerParams.annualBalanceIncreaseBPLimit, + simulatedShareRateDeviationBPLimit: sanityCheckerParams.simulatedShareRateDeviationBPLimit, + maxBalanceExitRequestedPerReportInEth: sanityCheckerParams.maxBalanceExitRequestedPerReportInEth, + maxEffectiveBalanceWeightWCType01: sanityCheckerParams.maxEffectiveBalanceWeightWCType01, + maxEffectiveBalanceWeightWCType02: sanityCheckerParams.maxEffectiveBalanceWeightWCType02, + maxItemsPerExtraDataTransaction: sanityCheckerParams.maxItemsPerExtraDataTransaction, + maxNodeOperatorsPerExtraDataItem: sanityCheckerParams.maxNodeOperatorsPerExtraDataItem, + requestTimestampMargin: sanityCheckerParams.requestTimestampMargin, + maxPositiveTokenRebase: sanityCheckerParams.maxPositiveTokenRebase, + maxCLBalanceDecreaseBP: sanityCheckerParams.maxCLBalanceDecreaseBP, + clBalanceOraclesErrorUpperBPLimit: sanityCheckerParams.clBalanceOraclesErrorUpperBPLimit, + consolidationEthAmountPerDayLimit: sanityCheckerParams.consolidationEthAmountPerDayLimit, + exitedValidatorEthAmountLimit: sanityCheckerParams.exitedValidatorEthAmountLimit, + }; + + const oracleReportSanityCheckerArgs = [locator.address, accounting.address, admin, sanityLimits]; await deployWithoutProxy( Sk.oracleReportSanityChecker, diff --git a/scripts/scratch/steps/0090-upgrade-locator.ts b/scripts/scratch/steps/0090-upgrade-locator.ts index 62c2881e7c..3b3f60ed79 100644 --- a/scripts/scratch/steps/0090-upgrade-locator.ts +++ b/scripts/scratch/steps/0090-upgrade-locator.ts @@ -29,6 +29,7 @@ export async function main() { withdrawalVault: getAddress(Sk.withdrawalVault, state), validatorExitDelayVerifier: getAddress(Sk.validatorExitDelayVerifier, state), triggerableWithdrawalsGateway: getAddress(Sk.triggerableWithdrawalsGateway, state), + consolidationGateway: getAddress(Sk.consolidationGateway, state), oracleDaemonConfig: getAddress(Sk.oracleDaemonConfig, state), accounting: getAddress(Sk.accounting, state), predepositGuarantee: getAddress(Sk.predepositGuarantee, state), @@ -37,7 +38,7 @@ export async function main() { vaultFactory: getAddress(Sk.stakingVaultFactory, state), lazyOracle: getAddress(Sk.lazyOracle, state), operatorGrid: getAddress(Sk.operatorGrid, state), + topUpGateway: getAddress(Sk.topUpGateway, state), }; - await updateProxyImplementation(Sk.lidoLocator, "LidoLocator", locatorAddress, proxyContractsOwner, [locatorConfig]); } diff --git a/scripts/scratch/steps/0140-plug-staking-modules.ts b/scripts/scratch/steps/0140-plug-staking-modules.ts index 3b15da7ad6..e360a577bd 100644 --- a/scripts/scratch/steps/0140-plug-staking-modules.ts +++ b/scripts/scratch/steps/0140-plug-staking-modules.ts @@ -1,5 +1,6 @@ import { ethers } from "hardhat"; +import { WithdrawalCredentialsType } from "lib"; import { loadContract } from "lib/contract"; import { makeTx } from "lib/deploy"; import { streccak } from "lib/keccak"; @@ -13,6 +14,7 @@ const NOR_STAKING_MODULE_MODULE_FEE_BP = 500; // 5% const NOR_STAKING_MODULE_TREASURY_FEE_BP = 500; // 5% const NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK = 150; const NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE = 25; +const NOR_WITHDRAWAL_TYPE = WithdrawalCredentialsType.WC0x01; const SDVT_STAKING_MODULE_TARGET_SHARE_BP = 400; // 4% const SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP = 10000; // 100% @@ -20,6 +22,7 @@ const SDVT_STAKING_MODULE_MODULE_FEE_BP = 800; // 8% const SDVT_STAKING_MODULE_TREASURY_FEE_BP = 200; // 2% const SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK = 150; const SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE = 25; +const SDVT_WITHDRAWAL_TYPE = WithdrawalCredentialsType.WC0x01; export async function main() { const deployer = (await ethers.provider.getSigner()).address; @@ -38,12 +41,15 @@ export async function main() { [ state.nodeOperatorsRegistry.deployParameters.stakingModuleName, state[Sk.appNodeOperatorsRegistry].proxy.address, - NOR_STAKING_MODULE_STAKE_SHARE_LIMIT_BP, - NOR_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, - NOR_STAKING_MODULE_MODULE_FEE_BP, - NOR_STAKING_MODULE_TREASURY_FEE_BP, - NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, - NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + { + stakeShareLimit: NOR_STAKING_MODULE_STAKE_SHARE_LIMIT_BP, + priorityExitShareThreshold: NOR_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, + stakingModuleFee: NOR_STAKING_MODULE_MODULE_FEE_BP, + treasuryFee: NOR_STAKING_MODULE_TREASURY_FEE_BP, + maxDepositsPerBlock: NOR_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: NOR_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: NOR_WITHDRAWAL_TYPE, + }, ], { from: deployer }, ); @@ -55,12 +61,15 @@ export async function main() { [ state.simpleDvt.deployParameters.stakingModuleName, state[Sk.appSimpleDvt].proxy.address, - SDVT_STAKING_MODULE_TARGET_SHARE_BP, - SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, - SDVT_STAKING_MODULE_MODULE_FEE_BP, - SDVT_STAKING_MODULE_TREASURY_FEE_BP, - SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, - SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + { + stakeShareLimit: SDVT_STAKING_MODULE_TARGET_SHARE_BP, + priorityExitShareThreshold: SDVT_STAKING_MODULE_PRIORITY_EXIT_SHARE_THRESHOLD_BP, + stakingModuleFee: SDVT_STAKING_MODULE_MODULE_FEE_BP, + treasuryFee: SDVT_STAKING_MODULE_TREASURY_FEE_BP, + maxDepositsPerBlock: SDVT_STAKING_MODULE_MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: SDVT_STAKING_MODULE_MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: SDVT_WITHDRAWAL_TYPE, + }, ], { from: deployer }, ); diff --git a/scripts/scratch/steps/0150-transfer-roles.ts b/scripts/scratch/steps/0150-transfer-roles.ts index 067e85a065..e51cd1a788 100644 --- a/scripts/scratch/steps/0150-transfer-roles.ts +++ b/scripts/scratch/steps/0150-transfer-roles.ts @@ -25,6 +25,9 @@ export async function main() { { name: "OracleDaemonConfig", address: state[Sk.oracleDaemonConfig].address }, { name: "OracleReportSanityChecker", address: state[Sk.oracleReportSanityChecker].address }, { name: "TriggerableWithdrawalsGateway", address: state[Sk.triggerableWithdrawalsGateway].address }, + { name: "ConsolidationGateway", address: state[Sk.consolidationGateway].address }, + { name: "ConsolidationBus", address: state[Sk.consolidationBus].proxy.address }, + { name: "ConsolidationMigrator", address: state[Sk.consolidationMigrator].proxy.address }, { name: "VaultHub", address: state[Sk.vaultHub].proxy.address }, { name: "PredepositGuarantee", address: state[Sk.predepositGuarantee].proxy.address }, { name: "OperatorGrid", address: state[Sk.operatorGrid].proxy.address }, diff --git a/scripts/top-up/__pycache__/top-up.cpython-310.pyc b/scripts/top-up/__pycache__/top-up.cpython-310.pyc new file mode 100644 index 0000000000..0f939146b1 Binary files /dev/null and b/scripts/top-up/__pycache__/top-up.cpython-310.pyc differ diff --git a/scripts/top-up/top-up.py b/scripts/top-up/top-up.py new file mode 100755 index 0000000000..2c0d4ce083 --- /dev/null +++ b/scripts/top-up/top-up.py @@ -0,0 +1,1675 @@ +#!/usr/bin/env python3 +""" +Build Merkle proofs for validator top-up verification via EIP-4788. + +Usage examples: + + # Build proofs and write to default file + python validator_proof_builder.py prove \ + -e http://localhost:8545 -c http://localhost:5052 \ + -v 12345 -v 67890 -o + + # Build proofs to custom file + python validator_proof_builder.py prove \ + -e http://localhost:8545 -c http://localhost:5052 \ + -v 12345 -o my_proofs.json + + # Send top-up transaction (reads PRIVATE_KEY from .env) + python validator_proof_builder.py top-up \ + -e http://localhost:8545 \ + -g 0xYourGatewayAddress \ + -v 12345 -k 0 --operator-id 1 \ + -v 67890 -k 1 --operator-id 1 \ + -m 1 + + # Build proofs and send top-up in one step (reads PRIVATE_KEY from .env) + python validator_proof_builder.py top-up-prove \ + -e http://localhost:8545 -c http://localhost:5052 \ + -g 0xYourGatewayAddress \ + -v 12345 -k 0 --operator-id 1 \ + -v 67890 -k 1 --operator-id 1 \ + -m 1 + +Output: + JSON to stdout matching TopUpData struct for on-chain verification. + +Flow: + 1. Fetch latest block from EL (eth_getBlockByNumber) + 2. Extract parentBeaconBlockRoot and timestamp from EL block + - parentBeaconBlockRoot = beacon root verifiable via 4788 precompile + - timestamp = key to query 4788 on-chain + 3. Fetch beacon block by root from CL + 4. Build validator proofs against that beacon block's state + 5. Verify proofs locally before output + +On-chain verification: + Call 4788 precompile at 0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02 + with childBlockTimestamp to get beacon_root, then verify proofs against it. +""" + +import argparse +import hashlib +import json +import os +import sys +import time +from typing import List, Tuple, Any + +import requests +from dotenv import load_dotenv +from web3 import Web3 + +# ============================================ +# py-ssz imports +# ============================================ +from ssz.sedes import ( + uint8, + uint64, + uint256, + boolean, + Bitvector, + Vector, + List as SSZList, + Container, + ByteVector, +) +import ssz + + +# ============================================ +# Constants (Mainnet Electra) +# ============================================ +VALIDATOR_REGISTRY_LIMIT = 2**40 +SLOTS_PER_HISTORICAL_ROOT = 8192 +EPOCHS_PER_HISTORICAL_VECTOR = 65536 +EPOCHS_PER_SLASHINGS_VECTOR = 8192 +HISTORICAL_ROOTS_LIMIT = 16777216 +ETH1_DATA_VOTES_LIMIT = 2048 +JUSTIFICATION_BITS_LENGTH = 4 +SYNC_COMMITTEE_SIZE = 512 +EPOCHS_PER_SYNC_COMMITTEE_PERIOD = 256 +MAX_PROPOSER_SLASHINGS = 16 +MAX_ATTESTER_SLASHINGS = 2 +MAX_ATTESTATIONS = 128 +MAX_DEPOSITS = 16 +MAX_VOLUNTARY_EXITS = 16 +MAX_BLS_TO_EXECUTION_CHANGES = 16 +MAX_WITHDRAWALS_PER_PAYLOAD = 16 +MAX_BLOB_COMMITMENTS_PER_BLOCK = 4096 + +MIN_SEED_LOOKAHEAD = 1 +SLOTS_PER_EPOCH = 32 +PROPOSER_LOOKAHEAD_SIZE = (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH # = 64 + + +# ============================================ +# SSZ Type Definitions (py-ssz style) +# ============================================ + +# Custom byte vector types +bytes4 = ByteVector(4) +bytes20 = ByteVector(20) +bytes32 = ByteVector(32) +bytes48 = ByteVector(48) +bytes96 = ByteVector(96) + +# Electra-specific constants +PENDING_DEPOSITS_LIMIT = 134217728 +PENDING_PARTIAL_WITHDRAWALS_LIMIT = 134217728 +PENDING_CONSOLIDATIONS_LIMIT = 262144 + +from ssz.sedes import ( + uint8, + uint64, + uint256, + boolean, + Bitvector, + Vector, + List as SSZList, + ByteVector, + Container, +) + +# Custom byte vector types +bytes4 = ByteVector(4) +bytes20 = ByteVector(20) +bytes32 = ByteVector(32) +bytes48 = ByteVector(48) +bytes96 = ByteVector(96) + +# Fork: previous_version, current_version, epoch +Fork = Container(field_sedes=(bytes4, bytes4, uint64)) + +# Checkpoint: epoch, root +Checkpoint = Container(field_sedes=(uint64, bytes32)) + +# Validator: pubkey, withdrawal_credentials, effective_balance, slashed, +# activation_eligibility_epoch, activation_epoch, exit_epoch, withdrawable_epoch +Validator = Container( + field_sedes=( + bytes48, # 0: pubkey + bytes32, # 1: withdrawal_credentials + uint64, # 2: effective_balance + boolean, # 3: slashed + uint64, # 4: activation_eligibility_epoch + uint64, # 5: activation_epoch + uint64, # 6: exit_epoch + uint64, # 7: withdrawable_epoch + ) +) + +# Eth1Data: deposit_root, deposit_count, block_hash +Eth1Data = Container(field_sedes=(bytes32, uint64, bytes32)) + +# BeaconBlockHeader: slot, proposer_index, parent_root, state_root, body_root +BeaconBlockHeader = Container( + field_sedes=( + uint64, # 0: slot + uint64, # 1: proposer_index + bytes32, # 2: parent_root + bytes32, # 3: state_root + bytes32, # 4: body_root + ) +) + +# SyncCommittee: pubkeys, aggregate_pubkey +SyncCommittee = Container( + field_sedes=( + Vector(bytes48, SYNC_COMMITTEE_SIZE), # 0: pubkeys + bytes48, # 1: aggregate_pubkey + ) +) + +# ExecutionPayloadHeader (Deneb) +ExecutionPayloadHeader = Container( + field_sedes=( + bytes32, # 0: parent_hash + bytes20, # 1: fee_recipient + bytes32, # 2: state_root + bytes32, # 3: receipts_root + ByteVector(256), # 4: logs_bloom + bytes32, # 5: prev_randao + uint64, # 6: block_number + uint64, # 7: gas_limit + uint64, # 8: gas_used + uint64, # 9: timestamp + SSZList(uint8, 32), # 10: extra_data + uint256, # 11: base_fee_per_gas + bytes32, # 12: block_hash + bytes32, # 13: transactions_root + bytes32, # 14: withdrawals_root + uint64, # 15: blob_gas_used + uint64, # 16: excess_blob_gas + ) +) + +# HistoricalSummary: block_summary_root, state_summary_root +HistoricalSummary = Container(field_sedes=(bytes32, bytes32)) + +# PendingDeposit: pubkey, withdrawal_credentials, amount, signature, slot +PendingDeposit = Container( + field_sedes=( + bytes48, # 0: pubkey + bytes32, # 1: withdrawal_credentials + uint64, # 2: amount + bytes96, # 3: signature + uint64, # 4: slot + ) +) + +# PendingPartialWithdrawal: validator_index, amount, withdrawable_epoch +PendingPartialWithdrawal = Container(field_sedes=(uint64, uint64, uint64)) + +# PendingConsolidation: source_index, target_index +PendingConsolidation = Container(field_sedes=(uint64, uint64)) + +# BeaconState (Electra with Fulu proposer_lookahead) +BeaconState = Container( + field_sedes=( + # Versioning [0-3] + uint64, # 0: genesis_time + bytes32, # 1: genesis_validators_root + uint64, # 2: slot + Fork, # 3: fork + # History [4-7] + BeaconBlockHeader, # 4: latest_block_header + Vector(bytes32, SLOTS_PER_HISTORICAL_ROOT), # 5: block_roots + Vector(bytes32, SLOTS_PER_HISTORICAL_ROOT), # 6: state_roots + SSZList(bytes32, HISTORICAL_ROOTS_LIMIT), # 7: historical_roots + # Eth1 [8-10] + Eth1Data, # 8: eth1_data + SSZList(Eth1Data, ETH1_DATA_VOTES_LIMIT), # 9: eth1_data_votes + uint64, # 10: eth1_deposit_index + # Registry [11-12] + SSZList(Validator, VALIDATOR_REGISTRY_LIMIT), # 11: validators + SSZList(uint64, VALIDATOR_REGISTRY_LIMIT), # 12: balances + # Randomness [13] + Vector(bytes32, EPOCHS_PER_HISTORICAL_VECTOR), # 13: randao_mixes + # Slashings [14] + Vector(uint64, EPOCHS_PER_SLASHINGS_VECTOR), # 14: slashings + # Participation [15-16] + SSZList(uint8, VALIDATOR_REGISTRY_LIMIT), # 15: previous_epoch_participation + SSZList(uint8, VALIDATOR_REGISTRY_LIMIT), # 16: current_epoch_participation + # Finality [17-20] + Bitvector(JUSTIFICATION_BITS_LENGTH), # 17: justification_bits + Checkpoint, # 18: previous_justified_checkpoint + Checkpoint, # 19: current_justified_checkpoint + Checkpoint, # 20: finalized_checkpoint + # Inactivity [21] + SSZList(uint64, VALIDATOR_REGISTRY_LIMIT), # 21: inactivity_scores + # Sync committees [22-23] + SyncCommittee, # 22: current_sync_committee + SyncCommittee, # 23: next_sync_committee + # Execution [24] + ExecutionPayloadHeader, # 24: latest_execution_payload_header + # Withdrawals [25-26] + uint64, # 25: next_withdrawal_index + uint64, # 26: next_withdrawal_validator_index + # Deep history [27] + SSZList(HistoricalSummary, HISTORICAL_ROOTS_LIMIT), # 27: historical_summaries + # Electra [28-36] + uint64, # 28: deposit_requests_start_index + uint64, # 29: deposit_balance_to_consume + uint64, # 30: exit_balance_to_consume + uint64, # 31: earliest_exit_epoch + uint64, # 32: consolidation_balance_to_consume + uint64, # 33: earliest_consolidation_epoch + SSZList(PendingDeposit, PENDING_DEPOSITS_LIMIT), # 34: pending_deposits + SSZList( + PendingPartialWithdrawal, PENDING_PARTIAL_WITHDRAWALS_LIMIT + ), # 35: pending_partial_withdrawals + SSZList( + PendingConsolidation, PENDING_CONSOLIDATIONS_LIMIT + ), # 36: pending_consolidations + # Fulu (not active yet) + Vector(uint64, PROPOSER_LOOKAHEAD_SIZE), # 37: proposer_lookahead + ) +) + +# BeaconState field indices +STATE_GENESIS_TIME = 0 +STATE_GENESIS_VALIDATORS_ROOT = 1 +STATE_SLOT = 2 +STATE_FORK = 3 +STATE_LATEST_BLOCK_HEADER = 4 +STATE_BLOCK_ROOTS = 5 +STATE_STATE_ROOTS = 6 +STATE_HISTORICAL_ROOTS = 7 +STATE_ETH1_DATA = 8 +STATE_ETH1_DATA_VOTES = 9 +STATE_ETH1_DEPOSIT_INDEX = 10 +STATE_VALIDATORS = 11 +STATE_BALANCES = 12 +STATE_RANDAO_MIXES = 13 +STATE_SLASHINGS = 14 +STATE_PREVIOUS_EPOCH_PARTICIPATION = 15 +STATE_CURRENT_EPOCH_PARTICIPATION = 16 +STATE_JUSTIFICATION_BITS = 17 +STATE_PREVIOUS_JUSTIFIED_CHECKPOINT = 18 +STATE_CURRENT_JUSTIFIED_CHECKPOINT = 19 +STATE_FINALIZED_CHECKPOINT = 20 +STATE_INACTIVITY_SCORES = 21 +STATE_CURRENT_SYNC_COMMITTEE = 22 +STATE_NEXT_SYNC_COMMITTEE = 23 +STATE_LATEST_EXECUTION_PAYLOAD_HEADER = 24 +STATE_NEXT_WITHDRAWAL_INDEX = 25 +STATE_NEXT_WITHDRAWAL_VALIDATOR_INDEX = 26 +STATE_HISTORICAL_SUMMARIES = 27 +STATE_DEPOSIT_REQUESTS_START_INDEX = 28 +STATE_DEPOSIT_BALANCE_TO_CONSUME = 29 +STATE_EXIT_BALANCE_TO_CONSUME = 30 +STATE_EARLIEST_EXIT_EPOCH = 31 +STATE_CONSOLIDATION_BALANCE_TO_CONSUME = 32 +STATE_EARLIEST_CONSOLIDATION_EPOCH = 33 +STATE_PENDING_DEPOSITS = 34 +STATE_PENDING_PARTIAL_WITHDRAWALS = 35 +STATE_PENDING_CONSOLIDATIONS = 36 +STATE_PROPOSER_LOOKAHEAD = 37 + +# Validator field indices +VALIDATOR_PUBKEY = 0 +VALIDATOR_WITHDRAWAL_CREDENTIALS = 1 +VALIDATOR_EFFECTIVE_BALANCE = 2 +VALIDATOR_SLASHED = 3 +VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH = 4 +VALIDATOR_ACTIVATION_EPOCH = 5 +VALIDATOR_EXIT_EPOCH = 6 +VALIDATOR_WITHDRAWABLE_EPOCH = 7 + +# BeaconBlockHeader field indices +HEADER_SLOT = 0 +HEADER_PROPOSER_INDEX = 1 +HEADER_PARENT_ROOT = 2 +HEADER_STATE_ROOT = 3 +HEADER_BODY_ROOT = 4 + + +# ============================================ +# Merkle Tree Utilities (py-ssz doesn't provide backing tree navigation) +# ============================================ + + +def sha256(data: bytes) -> bytes: + return hashlib.sha256(data).digest() + + +def hash_concat(left: bytes, right: bytes) -> bytes: + return sha256(left + right) + + +ZERO_HASHES: List[bytes] = [] + + +def _init_zero_hashes(depth: int = 64): + """Pre-compute zero hashes for empty subtrees.""" + global ZERO_HASHES + if len(ZERO_HASHES) >= depth: + return + ZERO_HASHES = [b"\x00" * 32] + for _ in range(1, depth): + ZERO_HASHES.append(sha256(ZERO_HASHES[-1] + ZERO_HASHES[-1])) + + +_init_zero_hashes(64) + + +def next_power_of_two(n: int) -> int: + """Return the smallest power of 2 >= n.""" + if n <= 1: + return 1 + return 1 << (n - 1).bit_length() + + +def mix_in_length(root: bytes, length: int) -> bytes: + """Mix in the length for List types.""" + length_bytes = length.to_bytes(32, "little") + return hash_concat(root, length_bytes) + + +class MerkleTree: + """ + A Merkle tree built from leaf chunks, supporting proof extraction. + py-ssz doesn't provide a backing tree, so we build one ourselves. + """ + + def __init__(self, chunks: List[bytes], limit: int = None): + """ + Build a Merkle tree from 32-byte chunks. + If limit is provided, this is a List type and we pad to next_power_of_two(limit). + """ + self.limit = limit + self.original_length = len(chunks) + + if limit is not None: + target_len = next_power_of_two(limit) + else: + target_len = next_power_of_two(len(chunks)) if chunks else 1 + + self.depth = target_len.bit_length() - 1 if target_len > 1 else 0 + + # Pad with zero chunks + self.leaves = list(chunks) + [ZERO_HASHES[0]] * (target_len - len(chunks)) + + # Build all layers (layer 0 = leaves, last layer = root) + self.layers: List[List[bytes]] = [self.leaves] + current = self.leaves + while len(current) > 1: + next_layer = [] + for i in range(0, len(current), 2): + left = current[i] + right = current[i + 1] if i + 1 < len(current) else ZERO_HASHES[0] + next_layer.append(hash_concat(left, right)) + self.layers.append(next_layer) + current = next_layer + + self.root = current[0] if current else ZERO_HASHES[0] + + def get_proof(self, index: int) -> List[bytes]: + """ + Get Merkle proof for leaf at index. + Returns proof in bottom-up order (leaf to root). + """ + proof = [] + idx = index + for layer in self.layers[:-1]: # Exclude root layer + sibling_idx = idx ^ 1 # XOR to get sibling index + if sibling_idx < len(layer): + proof.append(layer[sibling_idx]) + else: + # Use appropriate zero hash for this depth + proof.append(ZERO_HASHES[0]) + idx //= 2 + return proof + + +def build_merkle_tree_for_list( + items: list, item_sedes, limit: int +) -> Tuple[bytes, MerkleTree]: + """ + Build a Merkle tree for an SSZ List. + Returns (root with length mixed in, MerkleTree of data). + """ + chunks = [] + for item in items: + chunk = ssz.get_hash_tree_root(item, item_sedes) + chunks.append(chunk) + + tree = MerkleTree(chunks, limit=limit) + # List root = mix_in_length(data_root, len) + final_root = mix_in_length(tree.root, len(items)) + return final_root, tree + + +def build_merkle_tree_for_container(obj, container_sedes) -> Tuple[bytes, MerkleTree]: + """ + Build a Merkle tree for an SSZ Container. + Returns (root, MerkleTree of field roots). + """ + field_roots = [] + for field_name, field_sedes in container_sedes.fields: + field_value = getattr(obj, field_name) + field_root = ssz.get_hash_tree_root(field_value, field_sedes) + field_roots.append(field_root) + + tree = MerkleTree(field_roots) + return tree.root, tree + + +# ============================================ +# Generalized Index Calculation +# ============================================ + + +def compute_gindex_for_validator(validator_index: int) -> int: + """ + Compute generalized index for validator[index] in BeaconState. + Proves the entire Validator object (hash_tree_root). + """ + STATE_TREE_DEPTH = 6 # 37 fields (Electra) -> pad to 64 + VALIDATORS_FIELD_INDEX = 11 + VALIDATORS_LIST_DEPTH = 40 + + validators_gindex = (1 << STATE_TREE_DEPTH) + VALIDATORS_FIELD_INDEX + validators_data_gindex = validators_gindex * 2 + final_gindex = ( + validators_data_gindex * (1 << VALIDATORS_LIST_DEPTH) + validator_index + ) + + return final_gindex + + +def compute_gindex_for_state_root_in_header() -> int: + """ + Compute gindex for state_root inside BeaconBlockHeader. + 5 fields -> pad to 8 -> depth 3, state_root is field 3. + """ + HEADER_DEPTH = 3 + STATE_ROOT_INDEX = 3 + return (1 << HEADER_DEPTH) + STATE_ROOT_INDEX + + +# ============================================ +# Merkle Proof Functions +# ============================================ + + +def extract_proof_by_gindex( + tree: MerkleTree, gindex: int, tree_depth: int +) -> List[bytes]: + """ + Extract proof from a MerkleTree using generalized index. + Returns proof in bottom-up order (leaf to root). + + gindex encodes the path: binary representation (excluding leading 1) + gives the path from root to leaf (0=left, 1=right). + """ + if gindex <= 1: + return [] + + # Path bits from root to leaf + path_bits = [int(b) for b in bin(gindex)[3:]] + + # We need to pad path_bits to tree_depth + if len(path_bits) < tree_depth: + path_bits = [0] * (tree_depth - len(path_bits)) + path_bits + + # Navigate tree and collect siblings + proof_top_down = [] + idx = 0 + for i, bit in enumerate(path_bits): + layer_idx = i + if layer_idx >= len(tree.layers) - 1: + # Beyond tree depth, use zero hashes + proof_top_down.append(ZERO_HASHES[tree_depth - i - 1]) + continue + + layer = tree.layers[layer_idx] + if bit == 0: + # Going left, sibling is right + sibling_idx = idx * 2 + 1 + else: + # Going right, sibling is left + sibling_idx = idx * 2 + + if sibling_idx < len(layer): + proof_top_down.append(layer[sibling_idx]) + else: + proof_top_down.append(ZERO_HASHES[0]) + + idx = idx * 2 + bit + + # Reverse for bottom-up order + return list(reversed(proof_top_down)) + + +def extract_proof_for_field( + container_tree: MerkleTree, field_index: int +) -> List[bytes]: + """ + Extract proof for a field in a container. + Returns proof in bottom-up order. + """ + return container_tree.get_proof(field_index) + + +def extract_proof_for_list_item( + list_tree: MerkleTree, item_index: int, list_depth: int +) -> List[bytes]: + """ + Extract proof for an item in a list. + Returns proof from item to list data root (not including length mixin). + """ + # For lists, we need to handle the virtual tree expansion + # The list tree only contains actual items, but proof must account for full depth + proof = [] + + # Navigate through the tree + idx = item_index + for d in range(list_depth): + if d < len(list_tree.layers) - 1: + layer = list_tree.layers[d] + sibling_idx = idx ^ 1 + if sibling_idx < len(layer): + proof.append(layer[sibling_idx]) + else: + proof.append(ZERO_HASHES[d]) + else: + # Virtual levels - use zero hashes + proof.append(ZERO_HASHES[d]) + idx //= 2 + + return proof + + +def verify_merkle_proof( + leaf: bytes, proof: List[bytes], gindex: int, root: bytes +) -> bool: + """ + Verify with proof ordered leaf->root (bottom-up). + Uses gindex path bits bottom-up (LSB-first). + """ + if gindex <= 1: + return leaf == root + + path_bits = [int(b) for b in bin(gindex)[3:]] # root->leaf bits + path_bits_bottom_up = list(reversed(path_bits)) + + if len(proof) != len(path_bits_bottom_up): + return False + + computed = leaf + for bit, sibling in zip(path_bits_bottom_up, proof): + if bit == 0: + computed = hash_concat(computed, sibling) + else: + computed = hash_concat(sibling, computed) + + return computed == root + + +def verify_merkle_proof_by_index( + leaf: bytes, proof: List[bytes], index: int, root: bytes +) -> bool: + """ + Verify Merkle proof using simple index (not gindex). + Proof is ordered leaf->root (bottom-up). + """ + computed = leaf + idx = index + + for sibling in proof: + if idx % 2 == 0: + # We're on the left, sibling is on the right + computed = hash_concat(computed, sibling) + else: + # We're on the right, sibling is on the left + computed = hash_concat(sibling, computed) + idx //= 2 + + return computed == root + + +# ============================================ +# Multi-level proof extraction for BeaconState +# ============================================ + + +def build_sparse_list_proof(chunks: List[bytes], index: int, depth: int) -> List[bytes]: + """ + Build Merkle proof for item at index. + Efficient: computes only sibling subtree roots needed for the path. + """ + n = len(chunks) + memo = {} + + def node_hash(level: int, pos: int) -> bytes: + """ + Return hash of node at `level` (0 = leaf level), position `pos`. + Tree is virtually padded with zero chunks up to `depth`. + """ + key = (level, pos) + if key in memo: + return memo[key] + + # This subtree starts beyond available leaves -> pure zero subtree. + if (pos << level) >= n: + return ZERO_HASHES[level] + + if level == 0: + h = chunks[pos] + else: + left = node_hash(level - 1, pos * 2) + right = node_hash(level - 1, pos * 2 + 1) + h = hash_concat(left, right) + + memo[key] = h + return h + + proof = [] + for level in range(depth): + sibling_pos = (index >> level) ^ 1 + proof.append(node_hash(level, sibling_pos)) + + return proof + + +def extract_validator_proof( + state_field_roots: List[bytes], + validators_list: list, + validator_index: int, +) -> List[bytes]: + """ + Extract full proof for a validator from BeaconState. + Optimized to avoid building full tree for 2^40 limit. + """ + VALIDATORS_FIELD_INDEX = 11 + VALIDATORS_LIST_DEPTH = 40 # log2(VALIDATOR_REGISTRY_LIMIT) + + # Step 1: Compute validator roots + validator_chunks = [] + for v in validators_list: + chunk = Validator.get_hash_tree_root(v) + validator_chunks.append(chunk) + + # Step 2: Build sparse proof from validator to list data root + validator_proof = build_sparse_list_proof( + chunks=validator_chunks, + index=validator_index, + depth=VALIDATORS_LIST_DEPTH, + ) + + # Step 3: Add length mixin - the sibling is the length + length_bytes = len(validators_list).to_bytes(32, "little") + validator_proof.append(length_bytes) + + # Step 4: Build state tree and get proof from validators field to state root + state_tree = MerkleTree(state_field_roots) + field_proof = state_tree.get_proof(VALIDATORS_FIELD_INDEX) + validator_proof.extend(field_proof) + + return validator_proof + + +def compute_merkle_root_sparse(chunks: List[bytes], depth: int) -> bytes: + """Compute merkle root of chunks with given tree depth using zero hashes.""" + if not chunks: + return ZERO_HASHES[depth] + + layer = list(chunks) + + for d in range(depth): + next_layer = [] + for i in range(0, len(layer), 2): + left = layer[i] + right = layer[i + 1] if i + 1 < len(layer) else ZERO_HASHES[d] + next_layer.append(hash_concat(left, right)) + + if not next_layer: + next_layer = [ZERO_HASHES[d + 1]] + + layer = next_layer + + return layer[0] + + +# def extract_validator_proof( +# state_field_roots: List[bytes], +# validators_list: list, +# validator_index: int, +# ) -> List[bytes]: +# """ +# Extract full proof for a validator from BeaconState. + +# The proof consists of: +# 1. Proof from validator to validators list data root +# 2. Length mixin proof (single hash) +# 3. Proof from validators field to state root + +# Returns proof in bottom-up order. +# """ +# STATE_TREE_DEPTH = 6 # 37 fields -> pad to 64 +# VALIDATORS_FIELD_INDEX = 11 +# VALIDATORS_LIST_DEPTH = 40 + +# # Step 1: Build tree for validators list +# validator_chunks = [] +# for v in validators_list: +# chunk = Validator.get_hash_tree_root(v) # Validator - это Container +# validator_chunks.append(chunk) + +# validators_tree = MerkleTree(validator_chunks, limit=VALIDATOR_REGISTRY_LIMIT) + +# # Step 2: Get proof from validator to list data root +# validator_proof = extract_proof_for_list_item( +# validators_tree, validator_index, VALIDATORS_LIST_DEPTH +# ) + +# # Step 3: Add length mixin - the sibling is the length +# length_bytes = len(validators_list).to_bytes(32, "little") +# validator_proof.append(length_bytes) + +# # Step 4: Build state tree and get proof from validators field to state root +# state_tree = MerkleTree(state_field_roots) +# field_proof = state_tree.get_proof(VALIDATORS_FIELD_INDEX) +# validator_proof.extend(field_proof) + +# return validator_proof + + +def extract_header_proof(header) -> List[bytes]: + """ + Extract proof for state_root field in BeaconBlockHeader. + Returns proof in bottom-up order. + """ + HEADER_DEPTH = 3 + STATE_ROOT_INDEX = 3 + + # Build header tree + field_roots = [] + for i, sedes in enumerate(BeaconBlockHeader.field_sedes): + field_value = header[i] + field_root = sedes.get_hash_tree_root(field_value) + field_roots.append(field_root) + + header_tree = MerkleTree(field_roots) + return header_tree.get_proof(STATE_ROOT_INDEX) + + +# ============================================ +# EL + CL Client +# ============================================ + + +def get_latest_el_block(el_url: str) -> dict: + """ + Step 1: Get latest EL block via eth_getBlockByNumber("latest", false). + Returns dict with 'timestamp' and 'parentBeaconBlockRoot'. + """ + resp = requests.post( + el_url, + json={ + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": ["latest", False], + "id": 1, + }, + timeout=30, + ) + resp.raise_for_status() + result = resp.json()["result"] + return { + "timestamp": int(result["timestamp"], 16), + "parentBeaconBlockRoot": bytes.fromhex(result["parentBeaconBlockRoot"][2:]), + } + + +def get_beacon_block_header_by_root(cl_url: str, block_root: bytes) -> dict: + """ + Step 3: GET /eth/v2/beacon/blocks/{root} and extract header fields. + """ + root_hex = "0x" + block_root.hex() + url = f"{cl_url}/eth/v2/beacon/blocks/{root_hex}" + resp = requests.get(url, timeout=30) + resp.raise_for_status() + header = resp.json()["data"]["message"] + return { + "slot": int(header["slot"]), + "proposer_index": int(header["proposer_index"]), + } + + +def get_beacon_state_ssz(cl_url: str, slot: int) -> bytes: + """ + Step 4: GET /eth/v2/debug/beacon/states/{slot} in SSZ format. + """ + url = f"{cl_url}/eth/v2/debug/beacon/states/{slot}" + headers = {"Accept": "application/octet-stream"} + print(f"Fetching BeaconState for slot {slot}...", file=sys.stderr) + resp = requests.get(url, headers=headers, timeout=300) + resp.raise_for_status() + print(f"Received {len(resp.content):,} bytes", file=sys.stderr) + return resp.content + + +# ============================================ +# Proof Builder +# ============================================ + + +def compute_state_field_roots(state) -> List[bytes]: + """Compute hash_tree_root for each field of BeaconState.""" + field_roots = [] + for i, sedes in enumerate(BeaconState.field_sedes): + field_value = state[i] + field_root = sedes.get_hash_tree_root(field_value) + field_roots.append(field_root) + + return field_roots + + +# def build_top_up_data(el_url: str, cl_url: str, validator_indices: List[int]) -> dict: +# """ +# Build TopUpData for the given validator indices. + +# Flow: +# 1. Get latest EL block -> timestamp, parentBeaconBlockRoot +# 2. parentBeaconBlockRoot IS the beacon root from 4788 +# 3. Get beacon block header by root -> slot, proposerIndex +# 4. Get state for that slot -> build proofs +# 5. Verify proofs locally +# """ +# # Step 1-2: Get latest EL block +# print("Step 1: Fetching latest EL block...", file=sys.stderr) +# el_block = get_latest_el_block(el_url) +# child_block_timestamp = el_block["timestamp"] +# parent_beacon_block_root = el_block["parentBeaconBlockRoot"] +# print(f" timestamp: {child_block_timestamp}", file=sys.stderr) +# print( +# f" parentBeaconBlockRoot: 0x{parent_beacon_block_root.hex()}", file=sys.stderr +# ) + +# # Step 3: Get beacon block header by root +# print("Step 3: Fetching beacon block header...", file=sys.stderr) +# beacon_header = get_beacon_block_header_by_root(cl_url, parent_beacon_block_root) +# slot = beacon_header["slot"] +# proposer_index = beacon_header["proposer_index"] +# print(f" slot: {slot}, proposerIndex: {proposer_index}", file=sys.stderr) + +# # Step 4: Get state SSZ +# print("Step 4: Fetching beacon state...", file=sys.stderr) +# ssz_bytes = get_beacon_state_ssz(cl_url, slot) + +# print("Deserializing BeaconState...", file=sys.stderr) +# state = ssz.decode(ssz_bytes, BeaconState) + +# # Compute state root +# state_root = BeaconState.get_hash_tree_root(state) +# print(f" state_root: 0x{state_root.hex()}", file=sys.stderr) + +# # Fetch and build header object for proof extraction +# header_url = f"{cl_url}/eth/v1/beacon/headers/{slot}" +# header_resp = requests.get(header_url, timeout=30) +# header_resp.raise_for_status() +# header_msg = header_resp.json()["data"]["header"]["message"] + +# header = ( +# int(header_msg["slot"]), +# int(header_msg["proposer_index"]), +# bytes.fromhex(header_msg["parent_root"][2:]), +# bytes.fromhex(header_msg["state_root"][2:]), +# bytes.fromhex(header_msg["body_root"][2:]), +# ) + +# beacon_block_root = BeaconBlockHeader.get_hash_tree_root(header) +# print(f" beacon_block_root: 0x{beacon_block_root.hex()}", file=sys.stderr) + +# # Verify beacon_block_root matches parentBeaconBlockRoot from EL +# if beacon_block_root != parent_beacon_block_root: +# raise ValueError( +# f"beacon_block_root mismatch!\n" +# f" computed: 0x{beacon_block_root.hex()}\n" +# f" expected: 0x{parent_beacon_block_root.hex()}" +# ) +# print(" beacon_block_root matches parentBeaconBlockRoot", file=sys.stderr) + +# # Build header proof (state_root -> beacon_block_root) +# header_proof = extract_header_proof(header) +# state_root_gindex = compute_gindex_for_state_root_in_header() + +# # Pre-compute state field roots for efficiency +# print("Computing state field roots...", file=sys.stderr) +# state_field_roots = compute_state_field_roots(state) + +# # Build proofs for each validator +# validator_witnesses = [] +# validators = state[STATE_VALIDATORS] + +# for vi in validator_indices: +# print(f"Step 5: Building proof for validator {vi}...", file=sys.stderr) + +# validator = validators[vi] + +# validator_root = Validator.get_hash_tree_root(validator) + +# # Proof: validator[i] -> state_root +# validator_gindex = compute_gindex_for_validator(vi) +# validator_proof = extract_validator_proof( +# state_field_roots, list(validators), vi +# ) + +# # Full proof: validator_proof + header_proof +# full_proof = validator_proof + header_proof + +# # Verification: walk from leaf to state_root +# print(f" Verifying validator proof (leaf -> state_root)...", file=sys.stderr) +# if not verify_merkle_proof( +# validator_root, validator_proof, validator_gindex, state_root +# ): +# raise ValueError( +# f"Validator proof verification FAILED for index {vi}!\n" +# f" leaf (validator_root): 0x{validator_root.hex()}\n" +# f" expected state_root: 0x{state_root.hex()}" +# ) +# print(f" Validator proof verified OK", file=sys.stderr) + +# # Verification: walk from state_root to beacon_block_root +# print( +# f" Verifying header proof (state_root -> beacon_block_root)...", +# file=sys.stderr, +# ) +# if not verify_merkle_proof( +# state_root, header_proof, state_root_gindex, beacon_block_root +# ): +# raise ValueError( +# f"Header proof verification FAILED!\n" +# f" leaf (state_root): 0x{state_root.hex()}\n" +# f" expected beacon_block_root: 0x{beacon_block_root.hex()}" +# ) +# print(f" Header proof verified OK", file=sys.stderr) + +# validator_witnesses.append( +# { +# "validatorIndex": vi, +# "pubkey": "0x" + bytes(validator[VALIDATOR_PUBKEY]).hex(), +# "effectiveBalance": int(validator[VALIDATOR_EFFECTIVE_BALANCE]), +# "activationEligibilityEpoch": int( +# validator[VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH] +# ), +# "activationEpoch": int(validator[VALIDATOR_ACTIVATION_EPOCH]), +# "exitEpoch": int(validator[VALIDATOR_EXIT_EPOCH]), +# "withdrawableEpoch": int(validator[VALIDATOR_WITHDRAWABLE_EPOCH]), +# "slashed": bool(validator[VALIDATOR_SLASHED]), +# "proofs": ["0x" + p.hex() for p in full_proof], +# } +# ) + +# return { +# "beaconRootData": { +# "childBlockTimestamp": child_block_timestamp, +# "slot": slot, +# "proposerIndex": proposer_index, +# }, +# "validatorWitnesses": validator_witnesses, +# } + + +def build_top_up_data(el_url: str, cl_url: str, validator_indices: List[int]) -> dict: + """ + Build TopUpData for the given validator indices. + + Flow: + 1. Get latest EL block -> timestamp, parentBeaconBlockRoot + 2. parentBeaconBlockRoot IS the beacon root from 4788 + 3. Get beacon block header by root -> slot, proposerIndex + 4. Get state for that slot -> build proofs + 5. Verify proofs locally + """ + # Step 1-2: Get latest EL block + print("Step 1: Fetching latest EL block...", file=sys.stderr) + el_block = get_latest_el_block(el_url) + child_block_timestamp = el_block["timestamp"] + parent_beacon_block_root = el_block["parentBeaconBlockRoot"] + print(f" timestamp: {child_block_timestamp}", file=sys.stderr) + print( + f" parentBeaconBlockRoot: 0x{parent_beacon_block_root.hex()}", file=sys.stderr + ) + + # Step 3: Get beacon block header by root + print("Step 3: Fetching beacon block header...", file=sys.stderr) + beacon_header = get_beacon_block_header_by_root(cl_url, parent_beacon_block_root) + slot = beacon_header["slot"] + proposer_index = beacon_header["proposer_index"] + print(f" slot: {slot}, proposerIndex: {proposer_index}", file=sys.stderr) + + # Step 4: Get state SSZ + print("Step 4: Fetching beacon state...", file=sys.stderr) + ssz_bytes = get_beacon_state_ssz(cl_url, slot) + + print("Deserializing BeaconState...", file=sys.stderr) + state = ssz.decode(ssz_bytes, BeaconState) + + # Compute state root + state_root = BeaconState.get_hash_tree_root(state) + print(f" state_root: 0x{state_root.hex()}", file=sys.stderr) + + # Fetch and build header object for proof extraction + header_url = f"{cl_url}/eth/v1/beacon/headers/{slot}" + header_resp = requests.get(header_url, timeout=30) + header_resp.raise_for_status() + header_msg = header_resp.json()["data"]["header"]["message"] + + header = ( + int(header_msg["slot"]), + int(header_msg["proposer_index"]), + bytes.fromhex(header_msg["parent_root"][2:]), + bytes.fromhex(header_msg["state_root"][2:]), + bytes.fromhex(header_msg["body_root"][2:]), + ) + + beacon_block_root = BeaconBlockHeader.get_hash_tree_root(header) + print(f" beacon_block_root: 0x{beacon_block_root.hex()}", file=sys.stderr) + + # Verify beacon_block_root matches parentBeaconBlockRoot from EL + if beacon_block_root != parent_beacon_block_root: + raise ValueError( + f"beacon_block_root mismatch!\n" + f" computed: 0x{beacon_block_root.hex()}\n" + f" expected: 0x{parent_beacon_block_root.hex()}" + ) + print(" beacon_block_root matches parentBeaconBlockRoot", file=sys.stderr) + + # Build header proof (state_root -> beacon_block_root) + header_proof = extract_header_proof(header) + + # Pre-compute state field roots for efficiency + print("Computing state field roots...", file=sys.stderr) + state_field_roots = compute_state_field_roots(state) + + # Build proofs for each validator + validator_witnesses = [] + validators = state[STATE_VALIDATORS] + validators_list = list(validators) + + # Pre-compute validator chunks and validators_data_root once + print("Computing validator chunks...", file=sys.stderr) + validator_chunks = [Validator.get_hash_tree_root(v) for v in validators_list] + + VALIDATORS_LIST_DEPTH = 40 + print("Computing validators_data_root...", file=sys.stderr) + validators_data_root = compute_merkle_root_sparse( + validator_chunks, VALIDATORS_LIST_DEPTH + ) + + for vi in validator_indices: + print(f"Step 5: Building proof for validator {vi}...", file=sys.stderr) + + validator = validators[vi] + validator_root = Validator.get_hash_tree_root(validator) + + # Build validator proof + validator_proof = extract_validator_proof( + state_field_roots, validators_list, vi + ) + + # Full proof: validator_proof + header_proof + full_proof = validator_proof + header_proof + + # === ПОЭТАПНАЯ ВЕРИФИКАЦИЯ === + + # 1. Проверяем proof от validator до validators_data_root + print(f" Verifying validator -> validators_data_root...", file=sys.stderr) + list_proof = validator_proof[:VALIDATORS_LIST_DEPTH] + + if not verify_merkle_proof_by_index( + validator_root, list_proof, vi, validators_data_root + ): + raise ValueError( + f"Validator list proof FAILED for index {vi}!\n" + f" validator_root: 0x{validator_root.hex()}\n" + f" validators_data_root: 0x{validators_data_root.hex()}" + ) + print(f" Validator list proof OK", file=sys.stderr) + + # 2. Проверяем length mixin + print(f" Verifying length mixin...", file=sys.stderr) + length_bytes = validator_proof[VALIDATORS_LIST_DEPTH] + validators_root = hash_concat(validators_data_root, length_bytes) + + expected_validators_root = state_field_roots[STATE_VALIDATORS] + if validators_root != expected_validators_root: + raise ValueError( + f"Validators root mismatch!\n" + f" computed: 0x{validators_root.hex()}\n" + f" expected: 0x{expected_validators_root.hex()}" + ) + print(f" Length mixin OK", file=sys.stderr) + + # 3. Проверяем proof от validators field до state_root + print(f" Verifying validators_root -> state_root...", file=sys.stderr) + field_proof = validator_proof[VALIDATORS_LIST_DEPTH + 1 :] + + if not verify_merkle_proof_by_index( + validators_root, field_proof, STATE_VALIDATORS, state_root + ): + raise ValueError( + f"State field proof FAILED!\n" + f" validators_root: 0x{validators_root.hex()}\n" + f" state_root: 0x{state_root.hex()}" + ) + print(f" State field proof OK", file=sys.stderr) + + # 4. Проверяем header proof + print(f" Verifying state_root -> beacon_block_root...", file=sys.stderr) + if not verify_merkle_proof_by_index( + state_root, header_proof, HEADER_STATE_ROOT, beacon_block_root + ): + raise ValueError( + f"Header proof FAILED!\n" + f" state_root: 0x{state_root.hex()}\n" + f" beacon_block_root: 0x{beacon_block_root.hex()}" + ) + print(f" Header proof OK", file=sys.stderr) + + validator_witnesses.append( + { + "validatorIndex": vi, + "pubkey": "0x" + bytes(validator[VALIDATOR_PUBKEY]).hex(), + "effectiveBalance": int(validator[VALIDATOR_EFFECTIVE_BALANCE]), + "activationEligibilityEpoch": int( + validator[VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH] + ), + "activationEpoch": int(validator[VALIDATOR_ACTIVATION_EPOCH]), + "exitEpoch": int(validator[VALIDATOR_EXIT_EPOCH]), + "withdrawableEpoch": int(validator[VALIDATOR_WITHDRAWABLE_EPOCH]), + "slashed": bool(validator[VALIDATOR_SLASHED]), + "proofs": ["0x" + p.hex() for p in full_proof], + } + ) + + return { + "beaconRootData": { + "childBlockTimestamp": child_block_timestamp, + "slot": slot, + "proposerIndex": proposer_index, + }, + "validatorWitnesses": validator_witnesses, + } + + +# ============================================ +# TopUpGateway ABI (topUp function only) +# ============================================ + +TOP_UP_GATEWAY_ABI = json.loads( + """[ + { + "inputs": [ + { + "components": [ + {"internalType": "uint256", "name": "moduleId", "type": "uint256"}, + {"internalType": "uint256[]", "name": "keyIndices", "type": "uint256[]"}, + {"internalType": "uint256[]", "name": "operatorIds", "type": "uint256[]"}, + {"internalType": "uint256[]", "name": "validatorIndices", "type": "uint256[]"}, + { + "components": [ + {"internalType": "uint64", "name": "childBlockTimestamp", "type": "uint64"}, + {"internalType": "uint64", "name": "slot", "type": "uint64"}, + {"internalType": "uint64", "name": "proposerIndex", "type": "uint64"} + ], + "internalType": "struct BeaconRootData", + "name": "beaconRootData", + "type": "tuple" + }, + { + "components": [ + {"internalType": "bytes32[]", "name": "proofValidator", "type": "bytes32[]"}, + {"internalType": "bytes", "name": "pubkey", "type": "bytes"}, + {"internalType": "uint64", "name": "effectiveBalance", "type": "uint64"}, + {"internalType": "uint64", "name": "activationEligibilityEpoch", "type": "uint64"}, + {"internalType": "uint64", "name": "activationEpoch", "type": "uint64"}, + {"internalType": "uint64", "name": "exitEpoch", "type": "uint64"}, + {"internalType": "uint64", "name": "withdrawableEpoch", "type": "uint64"}, + {"internalType": "bool", "name": "slashed", "type": "bool"} + ], + "internalType": "struct ValidatorWitness[]", + "name": "validatorWitness", + "type": "tuple[]" + }, + {"internalType": "uint256[]", "name": "pendingBalanceGwei", "type": "uint256[]"} + ], + "internalType": "struct TopUpData", + "name": "_topUps", + "type": "tuple" + } + ], + "name": "topUp", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + } +]""" +) + +DEFAULT_WITNESS_FILE = "validatorWitness.json" + + +# ============================================ +# CLI +# ============================================ + + +def cmd_prove(args): + """Build proofs and optionally write to file.""" + try: + result = build_top_up_data( + el_url=args.el_url, + cl_url=args.cl_url, + validator_indices=args.validator_indices, + ) + + json_output = json.dumps(result, indent=2) + + if args.output: + with open(args.output, "w") as f: + f.write(json_output) + print(f"Proof data written to {args.output}", file=sys.stderr) + else: + print(json_output) + + except Exception as e: + print(f"ERROR: {type(e).__name__}: {e}", file=sys.stderr) + import traceback + + traceback.print_exc(file=sys.stderr) + sys.exit(1) + + +def send_top_up_tx( + witness_data: dict, + validator_indices: List[int], + key_indices: List[int], + operator_ids: List[int], + module_id: int, + el_url: str, + gateway_address: str, + gas_limit: int, +): + """Shared logic: build and send a topUp transaction from witness data.""" + load_dotenv() + + private_key = os.environ.get("PRIVATE_KEY") + if not private_key: + print("ERROR: PRIVATE_KEY not found in .env file", file=sys.stderr) + sys.exit(1) + + if len(validator_indices) != len(key_indices) or len(validator_indices) != len( + operator_ids + ): + print( + "ERROR: --validator-index, --key-index, and --operator-id must have the same count", + file=sys.stderr, + ) + sys.exit(1) + + w3 = Web3(Web3.HTTPProvider(el_url)) + if not w3.is_connected(): + print("ERROR: Cannot connect to EL node", file=sys.stderr) + sys.exit(1) + + account = w3.eth.account.from_key(private_key) + print(f"Sender: {account.address}", file=sys.stderr) + + beacon_root_data = witness_data["beaconRootData"] + validators = witness_data["validatorWitnesses"] + + witness_by_index = {v["validatorIndex"]: v for v in validators} + ordered_witnesses = [] + for vi in validator_indices: + if vi not in witness_by_index: + print( + f"ERROR: Validator index {vi} not found in witness data", + file=sys.stderr, + ) + sys.exit(1) + ordered_witnesses.append(witness_by_index[vi]) + + validator_witnesses_tuples = [ + ( + [bytes.fromhex(p[2:]) for p in vw["proofs"]], + bytes.fromhex(vw["pubkey"][2:]), + vw["effectiveBalance"], + vw["activationEligibilityEpoch"], + vw["activationEpoch"], + vw["exitEpoch"], + vw["withdrawableEpoch"], + vw["slashed"], + ) + for vw in ordered_witnesses + ] + + pending_balances = [0] * len(validator_indices) + + top_up_data = ( + module_id, + list(key_indices), + list(operator_ids), + list(validator_indices), + ( + beacon_root_data["childBlockTimestamp"], + beacon_root_data["slot"], + beacon_root_data["proposerIndex"], + ), + validator_witnesses_tuples, + pending_balances, + ) + + contract = w3.eth.contract( + address=Web3.to_checksum_address(gateway_address), + abi=TOP_UP_GATEWAY_ABI, + ) + + print("Building transaction...", file=sys.stderr) + tx = contract.functions.topUp(top_up_data).build_transaction( + { + "from": account.address, + "nonce": w3.eth.get_transaction_count(account.address), + "gas": gas_limit, + "maxFeePerGas": w3.eth.gas_price * 2, + "maxPriorityFeePerGas": w3.to_wei(1, "gwei"), + } + ) + + # Dry-run via eth_call to get revert reason before sending + print("Simulating transaction (eth_call)...", file=sys.stderr) + try: + contract.functions.topUp(top_up_data).call({"from": account.address}) + print("Simulation OK", file=sys.stderr) + except Exception as sim_err: + print(f"Simulation REVERTED: {sim_err}", file=sys.stderr) + print("Sending anyway to get on-chain receipt...", file=sys.stderr) + + print("Signing and sending transaction...", file=sys.stderr) + signed_tx = w3.eth.account.sign_transaction(tx, private_key) + tx_hash = w3.eth.send_raw_transaction(signed_tx.raw_transaction) + print(f"Transaction sent: 0x{tx_hash.hex()}", file=sys.stderr) + + print("Waiting for receipt...", file=sys.stderr) + receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=300) + if receipt["status"] == 1: + print( + f"Transaction confirmed in block {receipt['blockNumber']}", file=sys.stderr + ) + else: + print( + f"Transaction REVERTED in block {receipt['blockNumber']}", file=sys.stderr + ) + print(f" gasUsed: {receipt['gasUsed']}", file=sys.stderr) + print(f" tx hash: 0x{tx_hash.hex()}", file=sys.stderr) + # Try to extract revert reason via debug_traceTransaction or replay + try: + w3.eth.call( + {"to": receipt["to"], "from": receipt["from"], "data": tx["data"]}, + receipt["blockNumber"], + ) + except Exception as revert_err: + print(f" revert reason: {revert_err}", file=sys.stderr) + sys.exit(1) + + +def cmd_top_up(args): + """Read witness file and send topUp transaction to TopUpGateway.""" + input_file = args.input + print(f"Reading witness data from {input_file}...", file=sys.stderr) + with open(input_file, "r") as f: + witness_data = json.load(f) + + send_top_up_tx( + witness_data=witness_data, + validator_indices=args.validator_index, + key_indices=args.key_index, + operator_ids=args.operator_id, + module_id=args.module_id, + el_url=args.el_url, + gateway_address=args.gateway_address, + gas_limit=args.gas_limit, + ) + + +def cmd_top_up_prove(args): + """Build proofs and immediately send topUp transaction to TopUpGateway.""" + try: + t0 = time.time() + + witness_data = build_top_up_data( + el_url=args.el_url, + cl_url=args.cl_url, + validator_indices=args.validator_indices, + ) + + proof_elapsed = time.time() - t0 + print(f"Proofs built in {proof_elapsed:.2f}s, sending tx to TopUpGateway...") + + send_top_up_tx( + witness_data=witness_data, + validator_indices=args.validator_indices, + key_indices=args.key_index, + operator_ids=args.operator_id, + module_id=args.module_id, + el_url=args.el_url, + gateway_address=args.gateway_address, + gas_limit=args.gas_limit, + ) + + total_elapsed = time.time() - t0 + print( + f"Total time: {total_elapsed:.2f}s (proof: {proof_elapsed:.2f}s, tx: {total_elapsed - proof_elapsed:.2f}s)" + ) + except Exception as e: + print(f"ERROR: {type(e).__name__}: {e}", file=sys.stderr) + import traceback + + traceback.print_exc(file=sys.stderr) + sys.exit(1) + + +def main(): + parser = argparse.ArgumentParser( + description="Build Merkle proofs and send top-up transactions for validator verification via EIP-4788.", + ) + subparsers = parser.add_subparsers(dest="command", required=True) + + # ---- prove command ---- + prove_parser = subparsers.add_parser( + "prove", + help="Build Merkle proofs for validators", + ) + prove_parser.add_argument( + "--el-url", + "-e", + required=True, + help="EL RPC endpoint, e.g. http://localhost:8545", + ) + prove_parser.add_argument( + "--cl-url", + "-c", + required=True, + help="CL API endpoint, e.g. http://localhost:5052", + ) + prove_parser.add_argument( + "--validator-index", + "-v", + type=int, + action="append", + required=True, + dest="validator_indices", + help="Validator index (can be specified multiple times)", + ) + prove_parser.add_argument( + "--output", + "-o", + default=None, + help=f"Output file path (default: stdout, use -o without value for '{DEFAULT_WITNESS_FILE}')", + nargs="?", + const=DEFAULT_WITNESS_FILE, + ) + + # ---- top-up command ---- + topup_parser = subparsers.add_parser( + "top-up", + help="Send topUp transaction to TopUpGateway", + ) + topup_parser.add_argument( + "--el-url", + "-e", + required=True, + help="EL RPC endpoint, e.g. http://localhost:8545", + ) + topup_parser.add_argument( + "--gateway-address", + "-g", + required=True, + help="TopUpGateway contract address", + ) + topup_parser.add_argument( + "--validator-index", + "-v", + type=int, + action="append", + required=True, + dest="validator_index", + help="Validator index (can be specified multiple times, order matters)", + ) + topup_parser.add_argument( + "--key-index", + "-k", + type=int, + action="append", + required=True, + dest="key_index", + help="Key index for each validator (same order as -v)", + ) + topup_parser.add_argument( + "--operator-id", + type=int, + action="append", + required=True, + dest="operator_id", + help="Operator ID for each validator (same order as -v)", + ) + topup_parser.add_argument( + "--module-id", + "-m", + type=int, + required=True, + dest="module_id", + help="Staking module ID", + ) + topup_parser.add_argument( + "--input", + "-i", + default=DEFAULT_WITNESS_FILE, + help=f"Input witness file (default: {DEFAULT_WITNESS_FILE})", + ) + topup_parser.add_argument( + "--gas-limit", + type=int, + default=1_000_000, + help="Gas limit for transaction (default: 1000000)", + ) + + # ---- top-up-prove command ---- + topup_prove_parser = subparsers.add_parser( + "top-up-prove", + help="Build proofs and immediately send topUp transaction", + ) + topup_prove_parser.add_argument( + "--el-url", + "-e", + required=True, + help="EL RPC endpoint, e.g. http://localhost:8545", + ) + topup_prove_parser.add_argument( + "--cl-url", + "-c", + required=True, + help="CL API endpoint, e.g. http://localhost:5052", + ) + topup_prove_parser.add_argument( + "--gateway-address", + "-g", + required=True, + help="TopUpGateway contract address", + ) + topup_prove_parser.add_argument( + "--validator-index", + "-v", + type=int, + action="append", + required=True, + dest="validator_indices", + help="Validator index (can be specified multiple times)", + ) + topup_prove_parser.add_argument( + "--key-index", + "-k", + type=int, + action="append", + required=True, + dest="key_index", + help="Key index for each validator (same order as -v)", + ) + topup_prove_parser.add_argument( + "--operator-id", + type=int, + action="append", + required=True, + dest="operator_id", + help="Operator ID for each validator (same order as -v)", + ) + topup_prove_parser.add_argument( + "--module-id", + "-m", + type=int, + required=True, + dest="module_id", + help="Staking module ID", + ) + topup_prove_parser.add_argument( + "--gas-limit", + type=int, + default=1_000_000, + help="Gas limit for transaction (default: 1000000)", + ) + + args = parser.parse_args() + + if args.command == "prove": + cmd_prove(args) + elif args.command == "top-up": + cmd_top_up(args) + elif args.command == "top-up-prove": + cmd_top_up_prove(args) + + +if __name__ == "__main__": + main() diff --git a/scripts/upgrade/upgrade-params-mainnet.toml b/scripts/upgrade/upgrade-params-mainnet.toml index ff7fd8323d..5e7093219f 100644 --- a/scripts/upgrade/upgrade-params-mainnet.toml +++ b/scripts/upgrade/upgrade-params-mainnet.toml @@ -7,3 +7,8 @@ genesisForkVersion = "0x00000000" # Ethereum mainnet genesis fork version: h gIndex = "0x0000000000000000000000000000000000000000000000000096000000000028" # Generalized index for state verification: https://research.lido.fi/t/lip-27-ensuring-compatibility-with-ethereum-s-pectra-upgrade/9444#p-20086-update-gindexes-5 gIndexAfterChange = "0x0000000000000000000000000000000000000000000000000096000000000028" # Required for hardfork upgrades, the same for now changeSlot = 0 # Slot number when the change takes effect + +[consolidationGateway] +maxConsolidationRequestsLimit = 8000 # Maximum number of consolidations requests that can be processed +consolidationsPerFrame = 1 # Number of consolidations processed per frame +frameDurationInSec = 48 # Duration of each processing frame in seconds diff --git a/scripts/utils/scratch.ts b/scripts/utils/scratch.ts index 5b34cb048b..f0ebed8320 100644 --- a/scripts/utils/scratch.ts +++ b/scripts/utils/scratch.ts @@ -115,11 +115,26 @@ export function scratchParametersToDeploymentState(params: ScratchParameters): R triggerableWithdrawalsGateway: { deployParameters: params.triggerableWithdrawalsGateway, }, + consolidationGateway: { + deployParameters: params.consolidationGateway, + }, + consolidationBus: { + deployParameters: params.consolidationBus, + }, + consolidationMigrator: { + deployParameters: params.consolidationMigrator, + }, predepositGuarantee: { deployParameters: params.predepositGuarantee, }, operatorGrid: { deployParameters: params.operatorGrid, }, + topUpGateway: { + deployParameters: params.topUpGateway, + }, + stakingRouter: { + deployParameters: params.stakingRouter, + }, }; } diff --git a/tasks/check-interfaces.ts b/tasks/check-interfaces.ts index 726de62595..29950863f1 100644 --- a/tasks/check-interfaces.ts +++ b/tasks/check-interfaces.ts @@ -53,6 +53,21 @@ const PAIRS_TO_SKIP: { "function transferFrom(address sender, address recipient, uint256 amount) returns (bool)", ], }, + { + interfaceFqn: "contracts/0.4.24/Lido.sol:IAccountingOracle", + contractFqn: "contracts/0.8.9/oracle/AccountingOracle.sol:AccountingOracle", + reason: "Fixing requires Lido redeploy", + }, + { + interfaceFqn: "contracts/0.4.24/Lido.sol:IStakingRouter", + contractFqn: "contracts/0.8.25/sr/StakingRouter.sol:StakingRouter", + reason: "only var names/state modifiers are diff., can be safely ignored", + }, + { + interfaceFqn: "contracts/0.8.25/sr/SRTypes.sol:IAccountingOracle", + contractFqn: "contracts/0.8.9/oracle/AccountingOracle.sol:AccountingOracle", + reason: "Optimization to avoid memory struct allocation on each deposit.", + }, ]; task("check-interfaces").setAction(async (_, hre) => { diff --git a/tasks/index.ts b/tasks/index.ts index 73c5f20bea..68bb0d854d 100644 --- a/tasks/index.ts +++ b/tasks/index.ts @@ -6,3 +6,4 @@ import "./compile"; import "./check-interfaces"; import "./validate-configs"; import "./lint-solidity"; +import "./protocol-get-addresses"; diff --git a/tasks/protocol-get-addresses.ts b/tasks/protocol-get-addresses.ts new file mode 100644 index 0000000000..92b56e0bcb --- /dev/null +++ b/tasks/protocol-get-addresses.ts @@ -0,0 +1,7 @@ +import { task } from "hardhat/config"; + +task("protocol:get-addresses", "Get deployed protocol contract addresses").setAction(async () => { + const { readNetworkState } = await import("lib/state-file"); + const state = readNetworkState(); + console.log(JSON.stringify(state, null, 2)); +}); diff --git a/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol b/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol new file mode 100644 index 0000000000..7e980ffd3e --- /dev/null +++ b/test/0.4.24/contracts/AccountingOracle__MockForStakingRouter.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract AccountingOracle__MockForStakingRouter { + uint256 currentFrameRefSlot; + uint256 lastProcessingRefSlot; + bool mainDataSubmitted; + bool extraDataSubmitted; + + uint256 public constant SECONDS_PER_SLOT = 4; + uint256 public constant GENESIS_TIME = 100; + + constructor() { + currentFrameRefSlot = 0; + mainDataSubmitted = false; + extraDataSubmitted = false; + } + + struct ProcessingState { + uint256 currentFrameRefSlot; + uint256 processingDeadlineTime; + bytes32 mainDataHash; + bool mainDataSubmitted; + bytes32 extraDataHash; + uint256 extraDataFormat; + bool extraDataSubmitted; + uint256 extraDataItemsCount; + uint256 extraDataItemsSubmitted; + } + + function getProcessingState() external view returns (ProcessingState memory result) { + result.currentFrameRefSlot = currentFrameRefSlot; + result.mainDataSubmitted = mainDataSubmitted; + result.extraDataSubmitted = extraDataSubmitted; + } + + function getLastProcessingRefSlot() external view returns (uint256) { + return lastProcessingRefSlot; + } + + function getCurrentFrame() external view returns (uint256 refSlot, uint256 refSlotTimestamp) { + refSlot = currentFrameRefSlot; + refSlotTimestamp = GENESIS_TIME + refSlot * SECONDS_PER_SLOT; + } + + function mock_setProcessingState(uint256 _refSlot, bool _mainDataSubmitted, bool _extraDataSubmitted) external { + currentFrameRefSlot = _refSlot; + lastProcessingRefSlot = _refSlot; + mainDataSubmitted = _mainDataSubmitted; + extraDataSubmitted = _extraDataSubmitted; + } +} diff --git a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol deleted file mode 100644 index c6ee34ced4..0000000000 --- a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV3.sol +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -// for testing purposes only - -pragma solidity 0.4.24; - -import {Lido} from "contracts/0.4.24/Lido.sol"; -import {UnstructuredStorage} from "@aragon/os/contracts/apps/AragonApp.sol"; - -contract Lido__HarnessForFinalizeUpgradeV3 is Lido { - using UnstructuredStorage for bytes32; - - bytes32 constant LIDO_LOCATOR_POSITION = keccak256("lido.Lido.lidoLocator"); - bytes32 constant TOTAL_SHARES_POSITION = keccak256("lido.StETH.totalShares"); - bytes32 constant BUFFERED_ETHER_POSITION = keccak256("lido.Lido.bufferedEther"); - bytes32 constant CL_VALIDATORS_POSITION = keccak256("lido.Lido.beaconValidators"); - bytes32 constant CL_BALANCE_POSITION = keccak256("lido.Lido.beaconBalance"); - bytes32 constant DEPOSITED_VALIDATORS_POSITION = keccak256("lido.Lido.depositedValidators"); - - bytes32 internal constant TOTAL_SHARES_POSITION_V3 = - 0x6038150aecaa250d524370a0fdcdec13f2690e0723eaf277f41d7cae26b359e6; - - function harness_initialize_v2(address _lidoLocator) external payable { - _bootstrapInitialHolder(); // stone in the elevator - - initialized(); - - _resume(); - - _setContractVersion(2); - - BUFFERED_ETHER_POSITION.setStorageUint256(msg.value); - LIDO_LOCATOR_POSITION.setStorageAddress(_lidoLocator); - TOTAL_SHARES_POSITION.setStorageUint256(TOTAL_SHARES_POSITION_V3.getStorageUint256()); - CL_VALIDATORS_POSITION.setStorageUint256(100); - CL_BALANCE_POSITION.setStorageUint256(101); - DEPOSITED_VALIDATORS_POSITION.setStorageUint256(102); - } - - function harness_setContractVersion(uint256 _version) external { - _setContractVersion(_version); - } - - function harness_mintShares_v2(address _to, uint256 _sharesAmount) external { - _mintShares(_to, _sharesAmount); - _emitTransferAfterMintingShares(_to, _sharesAmount); - TOTAL_SHARES_POSITION.setStorageUint256(TOTAL_SHARES_POSITION_V3.getStorageUint256()); - } -} diff --git a/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol new file mode 100644 index 0000000000..be905985c7 --- /dev/null +++ b/test/0.4.24/contracts/Lido__HarnessForFinalizeUpgradeV4.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.4.24; + +import {Lido} from "contracts/0.4.24/Lido.sol"; +import {UnstructuredStorage} from "@aragon/os/contracts/apps/AragonApp.sol"; +import {UnstructuredStorageExt} from "contracts/0.4.24/utils/UnstructuredStorageExt.sol"; + +contract Lido__HarnessForFinalizeUpgradeV4 is Lido { + using UnstructuredStorage for bytes32; + using UnstructuredStorageExt for bytes32; + + // v3 storage positions + bytes32 internal constant BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION = + keccak256("lido.Lido.bufferedEtherAndDepositedValidators"); + bytes32 internal constant CL_BALANCE_AND_CL_VALIDATORS_POSITION = keccak256("lido.Lido.clBalanceAndClValidators"); + + function harness_initialize_v3(address _lidoLocator) external payable { + _bootstrapInitialHolder(); // stone in the elevator + + _setLidoLocator(_lidoLocator); + emit LidoLocatorSet(_lidoLocator); + + initialized(); + + _resume(); + + _setContractVersion(3); + + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setLowUint128(msg.value); + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION.setHighUint128(120); + + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setLowUint128(100 * 32 ether); + CL_BALANCE_AND_CL_VALIDATORS_POSITION.setHighUint128(100); + } + + function harness_setContractVersion(uint256 _version) external { + _setContractVersion(_version); + } +} diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol index 9b5e9b87e6..20abf3ec60 100644 --- a/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoAccounting.sol @@ -34,6 +34,20 @@ contract StakingRouter__MockForLidoAccounting { emit Mock__MintedRewardsReported(); } + function receiveDepositableEther() external payable { + // Mock implementation - no-op + } + + uint256 private depositAmountFromLastSlot__mocked; + + function onAccountingReport(uint256) external { + // Mock implementation - no-op + } + + function getDepositAmountFromLastSlot(uint256) external view returns (uint256) { + return depositAmountFromLastSlot__mocked; + } + function mock__getStakingRewardsDistribution( address[] calldata _recipients, uint256[] calldata _stakingModuleIds, @@ -47,4 +61,8 @@ contract StakingRouter__MockForLidoAccounting { totalFee__mocked = _totalFee; precisionPoint__mocked = _precisionPoints; } + + function mock__setDepositAmountFromLastSlot(uint256 _amount) external { + depositAmountFromLastSlot__mocked = _amount; + } } diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol index d046ec24c9..36565a4cda 100644 --- a/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoMisc.sol @@ -6,7 +6,10 @@ pragma solidity 0.8.9; contract StakingRouter__MockForLidoMisc { event Mock__DepositCalled(); + uint256 public constant INITIAL_DEPOSIT_SIZE = 32 ether; + uint256 private stakingModuleMaxDepositsCount; + uint256 private stakingModuleMaxInitialDepositsAmount; function getWithdrawalCredentials() external pure returns (bytes32) { return 0x010000000000000000000000b9d7934878b5fb9610b3fe8a5e441e8fad7e293f; // Lido Withdrawal Creds @@ -29,6 +32,13 @@ contract StakingRouter__MockForLidoMisc { modulesFee = 500; } + function getStakingModuleMaxInitialDepositsAmount( + uint256 stakingModuleId, + uint256 eth + ) external view returns (uint256, uint256) { + return (stakingModuleMaxInitialDepositsAmount, stakingModuleMaxDepositsCount); + } + function getStakingModuleMaxDepositsCount( uint256, // _stakingModuleId, uint256 // _maxDepositsValue @@ -37,14 +47,22 @@ contract StakingRouter__MockForLidoMisc { } function deposit( - uint256, // _depositsCount, uint256, // _stakingModuleId, bytes calldata // _depositCalldata ) external payable { emit Mock__DepositCalled(); } + function receiveDepositableEther() external payable { + // Mock function to receive ETH from Lido.withdrawDepositableEther + } + function mock__getStakingModuleMaxDepositsCount(uint256 newValue) external { stakingModuleMaxDepositsCount = newValue; + stakingModuleMaxInitialDepositsAmount = newValue * INITIAL_DEPOSIT_SIZE; + } + + function mock__setStakingModuleMaxInitialDepositsAmount(uint256 newValue) external { + stakingModuleMaxInitialDepositsAmount = newValue; } } diff --git a/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol b/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol new file mode 100644 index 0000000000..1f708a7dea --- /dev/null +++ b/test/0.4.24/contracts/StakingRouter__MockForLidoTopUp.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract StakingRouter__MockForLidoTopUp { + event Mock__TopUpCalled(uint256 stakingModuleId, bytes pubkeysPacked, uint256[] topUpLimitsGwei); + + uint256 private _amount; + bytes private _pubkeysPacked; + uint256[] private _topUpAmounts; + + uint256 public topUpCalls; + bool public shouldRevert; + + function getTopUpDepositAmount( + uint256, + uint256, + uint256[] calldata, + uint256[] calldata, + bytes calldata, + uint256[] calldata + ) external view returns (uint256 amount, bytes memory pubkeysPacked, uint256[] memory allocations) { + amount = _amount; + pubkeysPacked = _pubkeysPacked; + allocations = _topUpAmounts; + } + + function topUp( + uint256 stakingModuleId, + bytes calldata pubkeysPacked, + uint256[] calldata topUpAmountsGwei + ) external payable { + require(!shouldRevert, "StakingRouter: revert"); + ++topUpCalls; + emit Mock__TopUpCalled(stakingModuleId, pubkeysPacked, topUpAmountsGwei); + } + + function mock__setTopUpAmount( + uint256 topUpDepositAmount, + bytes calldata pubkeysPacked, + uint256[] calldata topUpAmounts + ) external { + _amount = topUpDepositAmount; + _pubkeysPacked = pubkeysPacked; + _topUpAmounts = topUpAmounts; + } + + function mock__setShouldRevert(bool value) external { + shouldRevert = value; + } +} diff --git a/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol b/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol index 6811039b20..5dd465c6ee 100644 --- a/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol +++ b/test/0.4.24/contracts/WithdrawalQueue__MockForAccounting.sol @@ -13,6 +13,8 @@ contract WithdrawalQueue__MockForAccounting { ); bool public isPaused; + bool public isBunkerModeActive; + uint256 public unfinalizedStETH; uint256 private ethToLock_; uint256 private sharesToBurn_; @@ -32,6 +34,12 @@ contract WithdrawalQueue__MockForAccounting { function finalize(uint256 _lastRequestIdToBeFinalized, uint256 _maxShareRate) external payable { _maxShareRate; + if (unfinalizedStETH > msg.value) { + unfinalizedStETH -= msg.value; + } else { + unfinalizedStETH = 0; + } + // some random fake event values uint256 firstRequestIdToFinalize = 0; uint256 sharesToBurn = msg.value; @@ -53,4 +61,8 @@ contract WithdrawalQueue__MockForAccounting { ethToLock_ = _ethToLock; sharesToBurn_ = _sharesToBurn; } + + function mock__unfinalizedStETH(uint256 _unfinalizedStETH) external { + unfinalizedStETH = _unfinalizedStETH; + } } diff --git a/test/0.4.24/lido/lido.accounting.test.ts b/test/0.4.24/lido/lido.accounting.test.ts index 0fda2c4d7f..63a4eea0e0 100644 --- a/test/0.4.24/lido/lido.accounting.test.ts +++ b/test/0.4.24/lido/lido.accounting.test.ts @@ -4,6 +4,8 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + AccountingOracle__MockForStakingRouter, + AccountingOracle__MockForStakingRouter__factory, ACL, Burner__MockForAccounting, Burner__MockForAccounting__factory, @@ -37,16 +39,18 @@ describe("Lido:accounting", () => { let burner: Burner__MockForAccounting; let elRewardsVault: LidoExecutionLayerRewardsVault__MockForLidoAccounting; let withdrawalVault: WithdrawalVault__MockForLidoAccounting; + let accountingOracle: AccountingOracle__MockForStakingRouter; beforeEach(async () => { [deployer, stranger] = await ethers.getSigners(); - [stakingRouter, withdrawalQueue, burner, elRewardsVault, withdrawalVault] = await Promise.all([ + [stakingRouter, withdrawalQueue, burner, elRewardsVault, withdrawalVault, accountingOracle] = await Promise.all([ new StakingRouter__MockForLidoAccounting__factory(deployer).deploy(), new WithdrawalQueue__MockForAccounting__factory(deployer).deploy(), new Burner__MockForAccounting__factory(deployer).deploy(), new LidoExecutionLayerRewardsVault__MockForLidoAccounting__factory(deployer).deploy(), new WithdrawalVault__MockForLidoAccounting__factory(deployer).deploy(), + new AccountingOracle__MockForStakingRouter__factory(deployer).deploy(), ]); ({ lido, acl } = await deployLidoDao({ @@ -58,13 +62,13 @@ describe("Lido:accounting", () => { burner, elRewardsVault, withdrawalVault, + accountingOracle, }, })); locator = LidoLocator__factory.connect(await lido.getLidoLocator(), deployer); await acl.createPermission(deployer, lido, await lido.RESUME_ROLE(), deployer); await acl.createPermission(deployer, lido, await lido.PAUSE_ROLE(), deployer); - await acl.createPermission(deployer, lido, await lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(), deployer); await lido.resume(); }); @@ -84,36 +88,42 @@ describe("Lido:accounting", () => { await expect( lido.processClStateUpdate( ...args({ - postClValidators: 100n, - postClBalance: 100n, + clValidatorsBalance: 100n, + clPendingBalance: 50n, }), ), ) - .to.emit(lido, "CLValidatorsUpdated") - .withArgs(0n, 0n, 100n); + .to.emit(lido, "CLBalancesUpdated") + .withArgs(0n, 100n, 50n); }); - type ArgsTuple = [bigint, bigint, bigint, bigint]; + type ArgsTuple = [bigint, bigint, bigint]; interface Args { reportTimestamp: bigint; - preClValidators: bigint; - postClValidators: bigint; - postClBalance: bigint; + clValidatorsBalance: bigint; + clPendingBalance: bigint; } function args(overrides?: Partial): ArgsTuple { return Object.values({ reportTimestamp: 0n, - preClValidators: 0n, - postClValidators: 0n, - postClBalance: 0n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, ...overrides, }) as ArgsTuple; } }); context("collectRewardsAndProcessWithdrawals", async () => { + async function getAccountingSigner() { + return impersonate(await locator.accounting(), ether("100.0")); + } + + async function getStakingRouterSigner() { + return impersonate(await locator.stakingRouter(), ether("1.0")); + } + it("Reverts when contract is stopped", async () => { await lido.connect(deployer).stop(); await expect(lido.collectRewardsAndProcessWithdrawals(...args())).to.be.revertedWith("CONTRACT_IS_STOPPED"); @@ -134,7 +144,7 @@ describe("Lido:accounting", () => { expect(initialBufferedEther).greaterThanOrEqual(ethToLock); await withdrawalQueue.mock__prefinalizeReturn(ethToLock, 0n); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await lido.collectRewardsAndProcessWithdrawals(...args({ etherToLockOnWithdrawalQueue: ethToLock })); @@ -147,7 +157,7 @@ describe("Lido:accounting", () => { await updateBalance(await elRewardsVault.getAddress(), elRewardsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ elRewardsToWithdraw }))) @@ -166,7 +176,7 @@ describe("Lido:accounting", () => { await updateBalance(await withdrawalVault.getAddress(), withdrawalsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ withdrawalsToWithdraw }))) @@ -187,7 +197,7 @@ describe("Lido:accounting", () => { await updateBalance(await elRewardsVault.getAddress(), elRewardsToWithdraw); await updateBalance(await withdrawalVault.getAddress(), withdrawalsToWithdraw); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect(lido.collectRewardsAndProcessWithdrawals(...args({ elRewardsToWithdraw, withdrawalsToWithdraw }))) @@ -224,7 +234,7 @@ describe("Lido:accounting", () => { const precisionPoints = 10n ** 20n; await stakingRouter.mock__getStakingRewardsDistribution([], [], [], totalFee, precisionPoints); - const accountingSigner = await impersonate(await locator.accounting(), ether("100.0")); + const accountingSigner = await getAccountingSigner(); lido = lido.connect(accountingSigner); await expect( lido.collectRewardsAndProcessWithdrawals( @@ -238,6 +248,155 @@ describe("Lido:accounting", () => { .withArgs(reportTimestamp, preCLBalance, clBalance, withdrawals, elRewards, bufferedEther); }); + it("Resyncs deposits reserve to target on report processing when reserve was spent", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("3.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserve()).to.equal(0n); + + await accountingOracle.mock_setProcessingState(1, true, true); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const bufferedAfterSync = await lido.getBufferedEther(); + const expectedReserveAfterSync = bufferedAfterSync < reserveTarget ? bufferedAfterSync : reserveTarget; + + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + + const stakingRouterSigner = await getStakingRouterSigner(); + const spendAmount = ether("1.0"); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(spendAmount, 1n); + + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + expect(await lido.getDepositsReserve()).to.equal(reserveTarget - spendAmount); + + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())) + .to.emit(lido, "DepositsReserveSet") + .withArgs(reserveTarget); + + expect(await lido.getDepositsReserve()).to.equal(reserveTarget); + }); + + it("Does not emit DepositsReserveSet on report processing when reserve already matches target", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("2.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + + const accountingSigner = await getAccountingSigner(); + // First report syncs reserve to target after target increase. + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())) + .to.emit(lido, "DepositsReserveSet") + .withArgs(reserveTarget); + + const bufferedAfterSync = await lido.getBufferedEther(); + const expectedReserveAfterSync = bufferedAfterSync < reserveTarget ? bufferedAfterSync : reserveTarget; + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + + await expect(lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args())).not.to.emit( + lido, + "DepositsReserveSet", + ); + }); + + it("Keeps effective deposits reserve capped by buffered ether after report sync", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("100.0"); + await lido.setDepositsReserveTarget(reserveTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(reserveTarget); + + const bufferedBefore = await lido.getBufferedEther(); + expect(bufferedBefore).to.be.gt(0n); + + const reserveBefore = await lido.getDepositsReserve(); + expect(reserveBefore).to.equal(0n); + + // Target increase is deferred until report processing. + expect(await lido.getDepositsReserve()).to.equal(reserveBefore); + + const submitted = ether("1.0"); + await lido.submit(await deployer.getAddress(), { value: submitted }); + + const bufferedAfterSubmit = await lido.getBufferedEther(); + expect(bufferedAfterSubmit).to.equal(bufferedBefore + submitted); + expect(await lido.getDepositsReserve()).to.equal(reserveBefore); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedAfterSubmit); + const expectedReserveAfterSync = bufferedAfter < reserveTarget ? bufferedAfter : reserveTarget; + expect(await lido.getDepositsReserve()).to.equal(expectedReserveAfterSync); + }); + + it("Consumes withdrawals reserve on withdrawal finalization (when deposits reserve = 0)", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + await lido.setDepositsReserveTarget(0n); + + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + + const unfinalizedBefore = ether("6.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalizedBefore); + + const bufferedBefore = await lido.getBufferedEther(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalizedBefore); + + const lockAmount = ether("2.0"); + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals( + ...args({ + lastWithdrawalRequestToFinalize: 1n, + simulatedShareRate: 1n, + etherToLockOnWithdrawalQueue: lockAmount, + }), + ); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedBefore - lockAmount); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalizedBefore - lockAmount); + }); + + it("Consumes withdrawals reserve on withdrawal finalization (when deposits reserve > 0)", async () => { + await acl.createPermission(deployer, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const reserveTarget = ether("3.0"); + await lido.setDepositsReserveTarget(reserveTarget); + + await lido.submit(await deployer.getAddress(), { value: ether("10.0") }); + await withdrawalQueue.mock__unfinalizedStETH(ether("6.0")); + + const accountingSigner = await getAccountingSigner(); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(...args()); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const bufferedBefore = await lido.getBufferedEther(); + + expect(depositsReserveBefore).to.be.gt(0n); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const lockAmount = ether("2.0"); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals( + ...args({ + lastWithdrawalRequestToFinalize: 1n, + simulatedShareRate: 1n, + etherToLockOnWithdrawalQueue: lockAmount, + }), + ); + + expect(await lido.getBufferedEther()).to.equal(bufferedBefore - lockAmount); + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore - lockAmount); + }); + type ArgsTuple = [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint]; interface Args { diff --git a/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts b/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts deleted file mode 100644 index 1914a98c3b..0000000000 --- a/test/0.4.24/lido/lido.finalizeUpgrade_v3.test.ts +++ /dev/null @@ -1,211 +0,0 @@ -import { expect } from "chai"; -import { MaxUint256, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { time } from "@nomicfoundation/hardhat-network-helpers"; - -import { - Burner, - Burner__MockForMigration, - ICSModule__factory, - Lido__HarnessForFinalizeUpgradeV3, - LidoLocator, - OssifiableProxy__factory, -} from "typechain-types"; - -import { certainAddress, ether, getStorageAtPosition, impersonate, proxify, TOTAL_BASIS_POINTS } from "lib"; - -import { deployLidoLocator } from "test/deploy"; -import { Snapshot } from "test/suite"; - -describe("Lido.sol:finalizeUpgrade_v3", () => { - let deployer: HardhatEthersSigner; - - let impl: Lido__HarnessForFinalizeUpgradeV3; - let lido: Lido__HarnessForFinalizeUpgradeV3; - let locator: LidoLocator; - - const initialValue = 1n; - const finalizeVersion = 3n; - - let withdrawalQueueAddress: string; - let burner: Burner; - let oldBurner: Burner__MockForMigration; - - const dummyLocatorAddress = certainAddress("dummy-locator"); - let simpleDvtAddress: string; - let nodeOperatorsRegistryAddress: string; - let csmAccountingAddress: string; - - const oldCoverSharesBurnRequested = 100n; - const oldNonCoverSharesBurnRequested = 200n; - const oldTotalCoverSharesBurnt = 300n; - const oldTotalNonCoverSharesBurnt = 400n; - const sharesOnOldBurner = 1000n; - - let originalState: string; - - before(async () => { - [deployer] = await ethers.getSigners(); - impl = await ethers.deployContract("Lido__HarnessForFinalizeUpgradeV3"); - [lido] = await proxify({ impl, admin: deployer }); - - burner = await ethers.deployContract("Burner", [dummyLocatorAddress, lido]); - - const proxyFactory = new OssifiableProxy__factory(deployer); - const burnerProxy = await proxyFactory.deploy(burner, deployer, new Uint8Array()); - burner = burner.attach(burnerProxy) as Burner; - - const isMigrationAllowed = true; - await burner.connect(deployer).initialize(deployer, isMigrationAllowed); - const stakingRouter = await ethers.deployContract("StakingRouter__MockForLidoUpgrade"); - - nodeOperatorsRegistryAddress = (await stakingRouter.getStakingModule(1)).stakingModuleAddress; - simpleDvtAddress = (await stakingRouter.getStakingModule(2)).stakingModuleAddress; - csmAccountingAddress = await ICSModule__factory.connect( - (await stakingRouter.getStakingModule(3)).stakingModuleAddress, - deployer, - ).accounting(); - - locator = await deployLidoLocator({ burner, stakingRouter }); - - withdrawalQueueAddress = await locator.withdrawalQueue(); - - oldBurner = await ethers.deployContract("Burner__MockForMigration", []); - await oldBurner - .connect(deployer) - .setSharesRequestedToBurn(oldCoverSharesBurnRequested, oldNonCoverSharesBurnRequested); - await oldBurner.connect(deployer).setSharesBurnt(oldTotalCoverSharesBurnt, oldTotalNonCoverSharesBurnt); - - await lido.connect(await impersonate(nodeOperatorsRegistryAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(simpleDvtAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(csmAccountingAddress, ether("1"))).approve(oldBurner, MaxUint256); - await lido.connect(await impersonate(withdrawalQueueAddress, ether("1"))).approve(oldBurner, MaxUint256); - }); - - beforeEach(async () => (originalState = await Snapshot.take())); - afterEach(async () => await Snapshot.restore(originalState)); - - it("Reverts if not initialized", async () => { - await expect(lido.finalizeUpgrade_v3(ZeroAddress, [], 0)).to.be.revertedWith("NOT_INITIALIZED"); - }); - - context("initialized", () => { - before(async () => { - const latestBlock = BigInt(await time.latestBlock()); - - await lido.connect(deployer).harness_initialize_v2(locator, { value: initialValue }); - - expect(await impl.getInitializationBlock()).to.equal(MaxUint256); - expect(await lido.getInitializationBlock()).to.equal(latestBlock + 1n); - }); - - it("Reverts if contract version does not equal 2", async () => { - const unexpectedVersion = 1n; - await lido.harness_setContractVersion(unexpectedVersion); - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ).to.be.revertedWith("UNEXPECTED_CONTRACT_VERSION"); - }); - - it("Reverts if old burner is the same as new burner", async () => { - await expect(lido.finalizeUpgrade_v3(burner, [], 0)).to.be.revertedWith("OLD_BURNER_SAME_AS_NEW"); - }); - - it("Reverts if old burner is zero address", async () => { - await expect(lido.finalizeUpgrade_v3(ZeroAddress, [], 0)).to.be.revertedWith("OLD_BURNER_ADDRESS_ZERO"); - }); - - it("Sets contract version to 3 and max external ratio to 10", async () => { - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 10, - ), - ) - .to.emit(lido, "ContractVersionSet") - .withArgs(finalizeVersion) - .and.emit(lido, "MaxExternalRatioBPSet") - .withArgs(10); - expect(await lido.getContractVersion()).to.equal(finalizeVersion); - expect(await lido.getMaxExternalRatioBP()).to.equal(10); - }); - - it("Reverts if initial max external ratio is greater than total basis points", async () => { - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - TOTAL_BASIS_POINTS + 1n, - ), - ).to.be.revertedWith("INVALID_MAX_EXTERNAL_RATIO"); - }); - - it("Migrates storage successfully", async () => { - const totalShares = await getStorageAtPosition(lido, "lido.StETH.totalShares"); - const bufferedEther = await getStorageAtPosition(lido, "lido.Lido.bufferedEther"); - - const beaconValidators = await getStorageAtPosition(lido, "lido.Lido.beaconValidators"); - const beaconBalance = await getStorageAtPosition(lido, "lido.Lido.beaconBalance"); - const depositedValidators = await getStorageAtPosition(lido, "lido.Lido.depositedValidators"); - - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ).to.not.be.reverted; - - expect(await lido.getLidoLocator()).to.equal(locator); - expect(await lido.getTotalShares()).to.equal(totalShares); - expect(await lido.getBufferedEther()).to.equal(bufferedEther); - - expect((await lido.getBeaconStat()).beaconBalance).to.equal(beaconBalance); - expect((await lido.getBeaconStat()).beaconValidators).to.equal(beaconValidators); - expect((await lido.getBeaconStat()).depositedValidators).to.equal(depositedValidators); - }); - - it("Migrates burner successfully", async () => { - await lido.harness_mintShares_v2(oldBurner, sharesOnOldBurner); - expect(await lido.sharesOf(oldBurner)).to.equal(sharesOnOldBurner); - - await expect( - lido.finalizeUpgrade_v3( - oldBurner, - [nodeOperatorsRegistryAddress, simpleDvtAddress, csmAccountingAddress, withdrawalQueueAddress], - 0, - ), - ) - .to.emit(lido, "TransferShares") - .withArgs(oldBurner, burner, sharesOnOldBurner); - - expect(await lido.sharesOf(oldBurner)).to.equal(0n); - expect(await lido.sharesOf(burner)).to.equal(sharesOnOldBurner); - - expect(await burner.getCoverSharesBurnt()).to.equal(oldTotalCoverSharesBurnt); - expect(await burner.getNonCoverSharesBurnt()).to.equal(oldTotalNonCoverSharesBurnt); - const [coverShares, nonCoverShares] = await burner.getSharesRequestedToBurn(); - expect(coverShares).to.equal(oldCoverSharesBurnRequested); - expect(nonCoverShares).to.equal(oldNonCoverSharesBurnRequested); - - // Check old burner allowances are revoked - expect(await lido.allowance(nodeOperatorsRegistryAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(simpleDvtAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(csmAccountingAddress, oldBurner)).to.equal(0n); - expect(await lido.allowance(withdrawalQueueAddress, oldBurner)).to.equal(0n); - - // Check new burner allowances are set - expect(await lido.allowance(nodeOperatorsRegistryAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(simpleDvtAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(csmAccountingAddress, burner)).to.equal(MaxUint256); - expect(await lido.allowance(withdrawalQueueAddress, burner)).to.equal(MaxUint256); - }); - }); -}); diff --git a/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts b/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts new file mode 100644 index 0000000000..78e603d8dd --- /dev/null +++ b/test/0.4.24/lido/lido.finalizeUpgrade_v4.test.ts @@ -0,0 +1,102 @@ +import { expect } from "chai"; +import { MaxUint256 } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { time } from "@nomicfoundation/hardhat-network-helpers"; + +import { + AccountingOracle__MockForStakingRouter, + Lido__HarnessForFinalizeUpgradeV4, + LidoLocator, +} from "typechain-types"; + +import { ether, getStorageAtPositionAsUint128Pair, proxify } from "lib"; + +import { deployLidoLocator } from "test/deploy/locator"; +import { Snapshot } from "test/suite"; + +describe("Lido.sol:finalizeUpgrade_v4", () => { + let deployer: HardhatEthersSigner; + + let impl: Lido__HarnessForFinalizeUpgradeV4; + let lido: Lido__HarnessForFinalizeUpgradeV4; + let accountingOracle: AccountingOracle__MockForStakingRouter; + let locator: LidoLocator; + + const initialValue = 1n; + const finalizeVersion = 4n; + + let originalState: string; + + before(async () => { + [deployer] = await ethers.getSigners(); + impl = await ethers.deployContract("Lido__HarnessForFinalizeUpgradeV4", { + signer: deployer, + }); + [lido] = await proxify({ impl, admin: deployer }); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + locator = await deployLidoLocator({ lido, accountingOracle }, deployer); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + afterEach(async () => await Snapshot.restore(originalState)); + + it("Reverts if not initialized", async () => { + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("NOT_INITIALIZED"); + }); + + context("initialized", () => { + before(async () => { + const latestBlock = BigInt(await time.latestBlock()); + + await lido.connect(deployer).harness_initialize_v3(locator, { value: initialValue }); + + expect(await impl.getInitializationBlock()).to.equal(MaxUint256); + expect(await lido.getInitializationBlock()).to.equal(latestBlock + 1n); + }); + + it("Reverts if contract version does not equal 3", async () => { + const unexpectedVersion = 1n; + await lido.harness_setContractVersion(unexpectedVersion); + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("UNEXPECTED_CONTRACT_VERSION"); + }); + + it("Sets contract version to 4", async () => { + await expect(lido.finalizeUpgrade_v4()).to.emit(lido, "ContractVersionSet").withArgs(finalizeVersion); + expect(await lido.getContractVersion()).to.equal(finalizeVersion); + }); + + it("Reverts upgrade if occurred before report", async () => { + // simulate no report + await accountingOracle.mock_setProcessingState(1, false, false); + await expect(lido.finalizeUpgrade_v4()).to.be.revertedWith("NO_REPORT"); + }); + + it("Migrates storage successfully after report and before next frame", async () => { + // simulate report + await accountingOracle.mock_setProcessingState(1, true, true); + const { low: bufferedEther, high: depositedValidators } = await getStorageAtPositionAsUint128Pair( + lido, + "lido.Lido.bufferedEtherAndDepositedValidators", + ); + const { low: clBalance, high: clValidators } = await getStorageAtPositionAsUint128Pair( + lido, + "lido.Lido.clBalanceAndClValidators", + ); + + const depositedBalance = (depositedValidators - clValidators) * ether("32"); + + await expect(lido.finalizeUpgrade_v4()).to.not.be.reverted; + + expect(await lido.getBufferedEther()).to.equal(bufferedEther); + expect((await lido.getBeaconStat()).beaconBalance).to.equal(clBalance); + expect((await lido.getBeaconStat()).beaconValidators).to.equal(depositedValidators); + expect((await lido.getBeaconStat()).depositedValidators).to.equal(depositedValidators); + expect((await lido.getBalanceStats()).clValidatorsBalanceAtLastReport).to.equal(clBalance); + expect((await lido.getBalanceStats()).clPendingBalanceAtLastReport).to.equal(0); + expect((await lido.getBalanceStats()).depositedSinceLastReport).to.equal(depositedBalance); + expect((await lido.getBalanceStats()).depositedForCurrentReport).to.equal(0); + }); + }); +}); diff --git a/test/0.4.24/lido/lido.initialize.test.ts b/test/0.4.24/lido/lido.initialize.test.ts index 4f2238b385..dda86a8aff 100644 --- a/test/0.4.24/lido/lido.initialize.test.ts +++ b/test/0.4.24/lido/lido.initialize.test.ts @@ -21,7 +21,9 @@ describe("Lido.sol:initialize", () => { before(async () => { [deployer] = await ethers.getSigners(); - const impl = await ethers.deployContract("Lido", deployer); + const impl = await ethers.deployContract("Lido", { + signer: deployer, + }); expect(await impl.getInitializationBlock()).to.equal(MaxUint256); [lido] = await proxify({ impl, admin: deployer }); @@ -33,7 +35,7 @@ describe("Lido.sol:initialize", () => { context("initialize", () => { const initialValue = 1n; - const contractVersion = 3n; + const contractVersion = 4n; let withdrawalQueueAddress: string; let burnerAddress: string; diff --git a/test/0.4.24/lido/lido.misc.test.ts b/test/0.4.24/lido/lido.misc.test.ts index ac108f49a0..06a254de56 100644 --- a/test/0.4.24/lido/lido.misc.test.ts +++ b/test/0.4.24/lido/lido.misc.test.ts @@ -5,6 +5,8 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + Accounting__MockForAccountingOracle, + AccountingOracle__MockForStakingRouter, ACL, Lido, LidoLocator, @@ -29,6 +31,8 @@ describe("Lido.sol:misc", () => { let locator: LidoLocator; let withdrawalQueue: WithdrawalQueue__MockForLidoMisc; let stakingRouter: StakingRouter__MockForLidoMisc; + let accounting: Accounting__MockForAccountingOracle; + let accountingOracle: AccountingOracle__MockForStakingRouter; const elRewardsVaultBalance = ether("100.0"); const withdrawalsVaultBalance = ether("100.0"); @@ -39,6 +43,9 @@ describe("Lido.sol:misc", () => { withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForLidoMisc", deployer); stakingRouter = await ethers.deployContract("StakingRouter__MockForLidoMisc", deployer); + accounting = await ethers.deployContract("Accounting__MockForAccountingOracle", deployer); + accounting = await ethers.deployContract("Accounting__MockForAccountingOracle", deployer); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); ({ lido, acl } = await deployLidoDao({ rootAccount: deployer, @@ -47,12 +54,14 @@ describe("Lido.sol:misc", () => { withdrawalQueue, stakingRouter, depositSecurityModule, + accounting, + accountingOracle, }, })); + await acl.createPermission(user, lido, await lido.STAKING_CONTROL_ROLE(), deployer); await acl.createPermission(user, lido, await lido.RESUME_ROLE(), deployer); await acl.createPermission(user, lido, await lido.PAUSE_ROLE(), deployer); - await acl.createPermission(user, lido, await lido.UNSAFE_CHANGE_DEPOSITED_VALIDATORS_ROLE(), deployer); lido = lido.connect(user); locator = await ethers.getContractAt("LidoLocator", await lido.getLidoLocator(), user); @@ -143,9 +152,10 @@ describe("Lido.sol:misc", () => { }); context("canDeposit", () => { - it("Returns true if Lido is not stopped and bunkerMode is disabled", async () => { + it("Returns true if Lido is not stopped and bunkerMode is disabled, and report is submitted", async () => { await lido.resume(); await withdrawalQueue.mock__bunkerMode(false); + await accountingOracle.mock_setProcessingState(1, true, true); expect(await lido.canDeposit()).to.equal(true); }); @@ -168,23 +178,11 @@ describe("Lido.sol:misc", () => { expect(await lido.canDeposit()).to.equal(false); }); - }); - - context("unsafeChangeDepositedValidators", () => { - it("Sets the number of deposited validators", async () => { - const { depositedValidators } = await lido.getBeaconStat(); - - const updatedDepositedValidators = depositedValidators + 50n; - await expect(lido.unsafeChangeDepositedValidators(updatedDepositedValidators)) - .to.emit(lido, "DepositedValidatorsChanged") - .withArgs(updatedDepositedValidators); - - expect((await lido.getBeaconStat()).depositedValidators).to.equal(updatedDepositedValidators); - }); + it("Returns false if main phase of report is not submitted", async () => { + await accountingOracle.mock_setProcessingState(1, false, false); - it("Reverts if the caller is unauthorized", async () => { - await expect(lido.connect(stranger).unsafeChangeDepositedValidators(100n)).to.be.revertedWith("APP_AUTH_FAILED"); + expect(await lido.canDeposit()).to.equal(false); }); }); @@ -221,9 +219,11 @@ describe("Lido.sol:misc", () => { }); context("getDepositableEther", () => { - it("Returns the amount of ether eligible for deposits", async () => { + it("Returns the amount of ether eligible for deposits (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEtherBefore = await lido.getBufferedEther(); // top up buffer @@ -233,9 +233,11 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(bufferedEtherBefore + deposit); }); - it("Returns 0 if reserved by the buffered ether is fully reserved for withdrawals", async () => { + it("Returns 0 if buffered ether is fully reserved for withdrawals (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEther = await lido.getBufferedEther(); // reserve all buffered ether for withdrawals @@ -244,9 +246,11 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(0); }); - it("Returns the difference if the buffered ether is partially reserved", async () => { + it("Returns buffered-minus-withdrawals reserve (deposits reserve = 0)", async () => { await lido.resume(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositsReserveTarget()).to.equal(0n); const bufferedEther = await lido.getBufferedEther(); // reserve half of buffered ether for withdrawals @@ -255,56 +259,524 @@ describe("Lido.sol:misc", () => { expect(await lido.getDepositableEther()).to.equal(bufferedEther - reservedForWithdrawals); }); + + it("Spending depositable ether does not affect withdrawals reserve", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + const stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + const unfinalized = ether("50.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + const bufferedBefore = await lido.getBufferedEther(); + const depositsReserveBefore = await lido.getDepositsReserve(); + const expectedWithdrawalsReserveBefore = + bufferedBefore - depositsReserveBefore < unfinalized ? bufferedBefore - depositsReserveBefore : unfinalized; + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBefore).to.equal(expectedWithdrawalsReserveBefore); + expect(withdrawalsReserveBefore).to.be.gt(0n); + const depositableBefore = await lido.getDepositableEther(); + expect(depositableBefore).to.be.gt(1n); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore / 2n, 0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + + const remainingDepositable = await lido.getDepositableEther(); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(remainingDepositable, 0n); + + expect(await lido.getDepositableEther()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Returns deposits reserve when withdrawals demand saturates remaining buffer", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + const depositsReserve = await lido.getDepositsReserve(); + expect(depositsReserve).to.equal(ether("25.0")); + expect(await lido.getDepositableEther()).to.be.gt(depositsReserve); + + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Keeps depositable unchanged on reserve target increase before report sync", async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + + const accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + + const depositableBefore = await lido.getDepositableEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositsReserveBefore = await lido.getDepositsReserve(); + + await lido.setDepositsReserveTarget(ether("50.0")); + + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(await lido.getDepositableEther()).to.equal(depositableBefore); + }); }); - context("deposit", () => { - const maxDepositsCount = 100n; - const stakingModuleId = 1n; - const depositCalldata = new Uint8Array(); + context("depositsReserve", () => { + let stakingRouterSigner: HardhatEthersSigner; + let accountingSigner: HardhatEthersSigner; + + const syncReserveWithOracleReport = async () => { + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + }; + + const assertDepositsReserveInvariants = async () => { + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositable = await lido.getDepositableEther(); + + expect(depositsReserve).to.be.lte(buffered); + expect(withdrawalsReserve).to.be.lte(buffered); + expect(depositable).to.be.lte(buffered); + expect(depositable).to.equal(buffered - withdrawalsReserve); + expect(depositsReserve + withdrawalsReserve).to.be.lte(buffered); + }; beforeEach(async () => { await lido.resume(); - lido = lido.connect(depositSecurityModule); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + }); + + it("Reverts if caller has no BUFFER_RESERVE_MANAGER_ROLE", async () => { + await expect(lido.connect(stranger).setDepositsReserveTarget(ether("1.0"))).to.be.revertedWith("APP_AUTH_FAILED"); + }); + + it("Calculates allocation consistently with withdrawals reserve and target", async () => { + const deposit = ether("100.0"); + const reserveTarget = ether("30.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(reserveTarget); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const unfinalized = ether("40.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + const expectedDepositsReserve = buffered < reserveTarget ? buffered : reserveTarget; + const remainingAfterDeposits = buffered - expectedDepositsReserve; + const expectedWithdrawalsReserve = remainingAfterDeposits < unfinalized ? remainingAfterDeposits : unfinalized; + + expect(await lido.getDepositsReserve()).to.equal(expectedDepositsReserve); + expect(await lido.getWithdrawalsReserve()).to.equal(expectedWithdrawalsReserve); + expect(await lido.getDepositableEther()).to.equal(buffered - expectedWithdrawalsReserve); }); - it("Reverts if the caller is not `DepositSecurityModule`", async () => { - lido = lido.connect(stranger); + it("Does not increase current reserve immediately when target is increased", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("10.0")); + + await lido.setDepositsReserveTarget(ether("60.0")); + // Reserve increase is deferred until report processing. + expect(await lido.getDepositsReserve()).to.equal(ether("10.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("60.0")); + }); + + it("Keeps depositable unchanged on target increase before report sync", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("10.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + + const depositableBefore = await lido.getDepositableEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + + await lido.setDepositsReserveTarget(ether("50.0")); + + expect(await lido.getDepositableEther()).to.equal(depositableBefore); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Caps current reserve immediately when target is lowered", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("70.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("70.0")); + + await lido.setDepositsReserveTarget(ether("20.0")); + expect(await lido.getDepositsReserve()).to.equal(ether("20.0")); + }); + + it("Decreases depositable immediately on target decrease in saturated withdrawals demand", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + + const buffered = await lido.getBufferedEther(); + expect(await lido.getDepositableEther()).to.equal(ether("40.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - ether("40.0")); + + await lido.setDepositsReserveTarget(ether("20.0")); + + expect(await lido.getDepositableEther()).to.equal(ether("20.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - ether("20.0")); + }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)).to.be.revertedWith( - "APP_AUTH_DSM_FAILED", + it("Updates depositable immediately when unfinalized withdrawals demand changes", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + + await withdrawalQueue.mock__unfinalizedStETH(ether("10.0")); + expect(await lido.getDepositableEther()).to.equal(buffered - ether("10.0")); + + await withdrawalQueue.mock__unfinalizedStETH(ether("50.0")); + expect(await lido.getDepositableEther()).to.equal(buffered - ether("50.0")); + + await withdrawalQueue.mock__unfinalizedStETH(ether("1000.0")); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Keeps depositable at deposits reserve when unfinalized demand reaches allocation boundary", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("25.0")); + await syncReserveWithOracleReport(); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + const boundary = buffered - depositsReserve; + + await withdrawalQueue.mock__unfinalizedStETH(boundary); + expect(await lido.getWithdrawalsReserve()).to.equal(boundary); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + + await withdrawalQueue.mock__unfinalizedStETH(boundary + 1n); + expect(await lido.getWithdrawalsReserve()).to.equal(boundary); + expect(await lido.getDepositableEther()).to.equal(depositsReserve); + }); + + it("Handles setting reserve target to zero", async () => { + const deposit = ether("100.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(ether("40.0")); + + const unfinalized = ether("30.0"); + await withdrawalQueue.mock__unfinalizedStETH(unfinalized); + + await lido.setDepositsReserveTarget(0n); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(unfinalized); + const buffered = await lido.getBufferedEther(); + expect(await lido.getDepositableEther()).to.equal(buffered - unfinalized); + }); + + it("Consumes deposits reserve once when CL-depositable ether is spent and reserve target exceeds buffer", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + const bufferedBefore = await lido.getBufferedEther(); + + // Keep all buffered ether depositable and make stored reserve larger than the buffer. + await withdrawalQueue.mock__unfinalizedStETH(0n); + await lido.setDepositsReserveTarget(bufferedBefore + ether("100.0")); + await syncReserveWithOracleReport(); + + const spentDepositableEther = ether("10.0"); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(spentDepositableEther, 1n); + + const bufferedAfter = await lido.getBufferedEther(); + expect(bufferedAfter).to.equal(bufferedBefore - spentDepositableEther); + expect(await lido.getDepositsReserve()).to.equal(bufferedAfter); + expect(await lido.getDepositableEther()).to.equal(bufferedAfter); + }); + + it("Does not decrease withdrawals reserve when all depositable ether is withdrawn", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("50.0")); + + const bufferedBefore = await lido.getBufferedEther(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositableBefore = await lido.getDepositableEther(); + expect(depositableBefore).to.equal(bufferedBefore - withdrawalsReserveBefore); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + + expect(await lido.getDepositableEther()).to.equal(0n); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + }); + + it("Emits only target event on target increase and emits reserve update on target decrease", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + + const increasedTarget = ether("25.0"); + await expect(lido.setDepositsReserveTarget(increasedTarget)) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(increasedTarget) + .and.not.to.emit(lido, "DepositsReserveSet"); + + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const loweredTarget = ether("10.0"); + await expect(lido.setDepositsReserveTarget(loweredTarget)) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(loweredTarget) + .and.to.emit(lido, "DepositsReserveSet") + .withArgs(loweredTarget); + }); + + it("Keeps deposits reserve at zero when buffer is empty and target is positive", async () => { + await withdrawalQueue.mock__unfinalizedStETH(0n); + const depositableBefore = await lido.getDepositableEther(); + if (depositableBefore > 0n) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + } + + expect(await lido.getBufferedEther()).to.equal(0n); + + const target = ether("50.0"); + await lido.setDepositsReserveTarget(target); + expect(await lido.getDepositsReserveTarget()).to.equal(target); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositableEther()).to.equal(0n); + + await syncReserveWithOracleReport(); + expect(await lido.getDepositsReserve()).to.equal(0n); + expect(await lido.getDepositableEther()).to.equal(0n); + }); + + it("Reverts withdraw when requested amount is above depositable with withdrawals reserve present", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("40.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("80.0")); + + const depositable = await lido.getDepositableEther(); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(depositable + 1n, 0n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", ); }); - it("Reverts if the contract is stopped", async () => { - await lido.connect(user).stop(); + it("Syncs reserve to target after non-zero accounting buffer movements", async () => { + await lido.connect(elRewardsVault).receiveELRewards({ value: ether("3.0") }); + await lido.submit(ZeroAddress, { value: ether("100.0") }); + + await lido.setDepositsReserveTarget(ether("20.0")); + await syncReserveWithOracleReport(); + await lido.connect(stakingRouterSigner).withdrawDepositableEther(ether("5.0"), 0n); + expect(await lido.getDepositsReserve()).to.equal(ether("15.0")); + + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)).to.be.revertedWith( - "CAN_NOT_DEPOSIT", + expect(await lido.getDepositsReserveTarget()).to.equal(ether("20.0")); + expect(await lido.getDepositsReserve()).to.equal(ether("20.0")); + }); + + it("Exhausts CL-depositable ether via multiple withdrawDepositableEther() calls and then reverts", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("20.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("70.0")); + + const chunk = ether("5.0"); + + while ((await lido.getDepositableEther()) >= chunk) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(chunk, 0n); + await assertDepositsReserveInvariants(); + } + + const remaining = await lido.getDepositableEther(); + expect(remaining).to.be.lt(chunk); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(chunk, 0n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", ); + await assertDepositsReserveInvariants(); + }); + + it("Preserves reserve invariants over submit/withdraw/target-update/report sequence", async () => { + await lido.submit(ZeroAddress, { value: ether("50.0") }); + await lido.setDepositsReserveTarget(ether("15.0")); + await syncReserveWithOracleReport(); + await withdrawalQueue.mock__unfinalizedStETH(ether("20.0")); + await assertDepositsReserveInvariants(); + + await lido.connect(stakingRouterSigner).withdrawDepositableEther(ether("10.0"), 0n); + await assertDepositsReserveInvariants(); + + await lido.connect(elRewardsVault).receiveELRewards({ value: ether("7.0") }); + await assertDepositsReserveInvariants(); + + await lido.setDepositsReserveTarget(ether("30.0")); + // target increased, reserve increase is deferred until report + await assertDepositsReserveInvariants(); + + await syncReserveWithOracleReport(); + await assertDepositsReserveInvariants(); }); + }); + + context("withdrawalsReserve", () => { + let stakingRouterSigner: HardhatEthersSigner; + let accountingSigner: HardhatEthersSigner; + + beforeEach(async () => { + await lido.resume(); + await acl.createPermission(user, lido, await lido.BUFFER_RESERVE_MANAGER_ROLE(), deployer); + stakingRouterSigner = await impersonate(await locator.stakingRouter(), ether("1.0")); + accountingSigner = await impersonate(await locator.accounting(), ether("1.0")); + }); + + it("Returns 0 when unfinalizedStETH is zero", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(0n); - it("Emits `Unbuffered` and `DepositedValidatorsChanged` events if there are deposits", async () => { + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + }); + + it("Is capped by remaining buffer after deposits reserve", async () => { + const deposit = ether("100.0"); + await lido.submit(ZeroAddress, { value: deposit }); + await lido.setDepositsReserveTarget(ether("40.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("80.0")); + + const buffered = await lido.getBufferedEther(); + const depositsReserve = await lido.getDepositsReserve(); + expect(await lido.getWithdrawalsReserve()).to.equal(buffered - depositsReserve); + }); + + it("Decreases when deposits reserve target increases (priority to deposits reserve)", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + const buffered = await lido.getBufferedEther(); + // Make withdrawals demand effectively unbounded so withdrawalsReserve == buffered - depositsReserve. + await withdrawalQueue.mock__unfinalizedStETH(buffered); + + const lowTarget = ether("10.0"); + const highTarget = ether("50.0"); + + await lido.setDepositsReserveTarget(lowTarget); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + const withdrawalsReserveWithLowTarget = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveWithLowTarget).to.equal(buffered - lowTarget); + + await lido.setDepositsReserveTarget(highTarget); + // target increase is deferred until report, so withdrawals reserve is unchanged before sync + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveWithLowTarget); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + const withdrawalsReserveWithHighTarget = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveWithHighTarget).to.equal(buffered - highTarget); + expect(withdrawalsReserveWithHighTarget).to.be.lt(withdrawalsReserveWithLowTarget); + }); + + it("Does not change on oracle report when no withdrawals are finalized", async () => { + await lido.submit(ZeroAddress, { value: ether("100.0") }); + await lido.setDepositsReserveTarget(ether("30.0")); + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + await withdrawalQueue.mock__unfinalizedStETH(ether("40.0")); + const before = await lido.getWithdrawalsReserve(); + + await lido.connect(accountingSigner).collectRewardsAndProcessWithdrawals(0n, 0n, 0n, 0n, 0n, 0n, 0n, 0n); + + expect(await lido.getWithdrawalsReserve()).to.equal(before); + }); + + it("Returns 0 when buffer is empty even if unfinalizedStETH is non-zero", async () => { + await withdrawalQueue.mock__unfinalizedStETH(0n); + const depositableBefore = await lido.getDepositableEther(); + if (depositableBefore > 0n) { + await lido.connect(stakingRouterSigner).withdrawDepositableEther(depositableBefore, 0n); + } + + expect(await lido.getBufferedEther()).to.equal(0n); + + await withdrawalQueue.mock__unfinalizedStETH(ether("100.0")); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + }); + }); + + context("withdrawDepositableEther", () => { + let stakingRouterSigner: HardhatEthersSigner; + + beforeEach(async () => { + await lido.resume(); + // Get stakingRouter signer to call withdrawDepositableEther + const stakingRouterAddress = await locator.stakingRouter(); + stakingRouterSigner = await impersonate(stakingRouterAddress, ether("1.0")); + // simulate success report + await accountingOracle.mock_setProcessingState(1, true, true); + }); + + it("Reverts if the caller is not `StakingRouter`", async () => { const oneDepositWorthOfEther = ether("32.0"); - // top up Lido buffer enough for 1 deposit of 32 ether await lido.submit(ZeroAddress, { value: oneDepositWorthOfEther }); - expect(await lido.getDepositableEther()).to.be.greaterThanOrEqual(oneDepositWorthOfEther); + await expect(lido.connect(stranger).withdrawDepositableEther(oneDepositWorthOfEther, 1n)).to.be.revertedWith( + "APP_AUTH_FAILED", + ); + }); + + it("Reverts if amount is zero", async () => { + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(0n, 0n)).to.be.revertedWith( + "ZERO_AMOUNT", + ); + }); + + it("Reverts if not enough depositable ether", async () => { + const tooMuchEther = ether("1000.0"); + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(tooMuchEther, 1n)).to.be.revertedWith( + "NOT_ENOUGH_ETHER", + ); + }); - // mock StakingRouter.getStakingModuleMaxDepositsCount returning 1 deposit - await stakingRouter.mock__getStakingModuleMaxDepositsCount(1); + it("Emits `Unbuffered`, `DepositedValidatorsChanged` events when withdrawing ether", async () => { + const depositAmount = ether("32.0"); + // top up Lido buffer enough for deposit + await lido.submit(ZeroAddress, { value: depositAmount }); + + // Get actual depositable ether which may be less due to withdrawal reservations + const depositableEther = await lido.getDepositableEther(); + expect(depositableEther).to.be.greaterThan(0n); const beforeDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), stakingRouterBalance: ethers.provider.getBalance(stakingRouter), beaconStat: lido.getBeaconStat(), + balanceStats: lido.getBalanceStats(), }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)) + // Use actual depositable amount + const amountToWithdraw = depositableEther; + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(amountToWithdraw, 1n)) .to.emit(lido, "Unbuffered") - .withArgs(oneDepositWorthOfEther) + .withArgs(amountToWithdraw) .and.to.emit(lido, "DepositedValidatorsChanged") - .withArgs(beforeDeposit.beaconStat.depositedValidators + 1n) - .and.to.emit(stakingRouter, "Mock__DepositCalled"); + .withArgs(beforeDeposit.beaconStat.depositedValidators + 1n); const afterDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), @@ -313,19 +785,19 @@ describe("Lido.sol:misc", () => { }); expect(afterDeposit.beaconStat.depositedValidators).to.equal(beforeDeposit.beaconStat.depositedValidators + 1n); - expect(afterDeposit.lidoBalance).to.equal(beforeDeposit.lidoBalance - oneDepositWorthOfEther); - expect(afterDeposit.stakingRouterBalance).to.equal(beforeDeposit.stakingRouterBalance + oneDepositWorthOfEther); + // Verify ETH moved from Lido to StakingRouter + expect(afterDeposit.lidoBalance).to.be.lessThan(beforeDeposit.lidoBalance); + expect(afterDeposit.stakingRouterBalance).to.be.greaterThan(beforeDeposit.stakingRouterBalance); }); - it("Does not emit `Unbuffered` and `DepositedValidatorsChanged` events if the staking module cannot accomodate new deposit", async () => { - const oneDepositWorthOfEther = ether("32.0"); - // top up Lido buffer enough for 1 deposit of 32 ether - await lido.submit(ZeroAddress, { value: oneDepositWorthOfEther }); - - expect(await lido.getDepositableEther()).to.be.greaterThanOrEqual(oneDepositWorthOfEther); + it("Does not emit `DepositedValidatorsChanged` event when depositsCount is 0 (top-up scenario)", async () => { + const depositAmount = ether("10.0"); + // top up Lido buffer + await lido.submit(ZeroAddress, { value: depositAmount }); - // mock StakingRouter.getStakingModuleMaxDepositsCount returning 1 deposit - await stakingRouter.mock__getStakingModuleMaxDepositsCount(0); + // Get actual depositable ether + const depositableEther = await lido.getDepositableEther(); + expect(depositableEther).to.be.greaterThan(0n); const beforeDeposit = await batch({ lidoBalance: ethers.provider.getBalance(lido), @@ -333,9 +805,13 @@ describe("Lido.sol:misc", () => { beaconStat: lido.getBeaconStat(), }); - await expect(lido.deposit(maxDepositsCount, stakingModuleId, depositCalldata)) - .to.emit(stakingRouter, "Mock__DepositCalled") - .not.to.emit(lido, "Unbuffered") + // Use a smaller amount that's definitely available + const amountToWithdraw = depositableEther < depositAmount ? depositableEther : depositAmount; + + // depositsCount = 0 for top-up scenario (existing validators, not new ones) + await expect(lido.connect(stakingRouterSigner).withdrawDepositableEther(amountToWithdraw, 0n)) + .to.emit(lido, "Unbuffered") + .withArgs(amountToWithdraw) .and.not.to.emit(lido, "DepositedValidatorsChanged"); const afterDeposit = await batch({ @@ -344,9 +820,11 @@ describe("Lido.sol:misc", () => { beaconStat: lido.getBeaconStat(), }); + // depositedValidators should not change for top-ups expect(afterDeposit.beaconStat.depositedValidators).to.equal(beforeDeposit.beaconStat.depositedValidators); - expect(afterDeposit.lidoBalance).to.equal(beforeDeposit.lidoBalance); - expect(afterDeposit.stakingRouterBalance).to.equal(beforeDeposit.stakingRouterBalance); + // Verify ETH moved from Lido to StakingRouter + expect(afterDeposit.lidoBalance).to.be.lessThan(beforeDeposit.lidoBalance); + expect(afterDeposit.stakingRouterBalance).to.be.greaterThan(beforeDeposit.stakingRouterBalance); }); }); }); diff --git a/test/0.8.9/beaconChainDepositor.t.sol b/test/0.8.25/beaconChainDepositor.t.sol similarity index 95% rename from test/0.8.9/beaconChainDepositor.t.sol rename to test/0.8.25/beaconChainDepositor.t.sol index 93def7cada..0128fe2094 100644 --- a/test/0.8.9/beaconChainDepositor.t.sol +++ b/test/0.8.25/beaconChainDepositor.t.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; import {Test} from "forge-std/Test.sol"; import {CommonBase} from "forge-std/Base.sol"; @@ -9,7 +9,10 @@ import {StdUtils} from "forge-std/StdUtils.sol"; import {StdAssertions} from "forge-std/StdAssertions.sol"; import {IERC165} from "forge-std/interfaces/IERC165.sol"; -import {BeaconChainDepositor as BCDepositor} from "contracts/0.8.9/BeaconChainDepositor.sol"; +import { + BeaconChainDepositor as BCDepositor, + IDepositContract as IDepositContractLib +} from "contracts/0.8.25/lib/BeaconChainDepositor.sol"; // The following invariants are formulated and enforced for the `BeaconChainDepositor` contract: // - exactly 32 ETH gets attached with every single deposit @@ -171,8 +174,13 @@ contract BCDepositorHandler is CommonBase, StdAssertions, StdUtils { } } -contract BCDepositorHarness is BCDepositor { - constructor(address _depositContract) BCDepositor(_depositContract) {} +contract BCDepositorHarness { + IDepositContractLib public immutable DEPOSIT_CONTRACT; + uint256 internal constant INITIAL_DEPOSIT_SIZE = 32 ether; + + constructor(address _depositContract) { + DEPOSIT_CONTRACT = IDepositContractLib(_depositContract); + } /// @dev Exposed version of the _makeBeaconChainDeposits32ETH /// @param _keysCount amount of keys to deposit @@ -185,7 +193,13 @@ contract BCDepositorHarness is BCDepositor { bytes memory _publicKeysBatch, bytes memory _signaturesBatch ) external { - _makeBeaconChainDeposits32ETH(_keysCount, _withdrawalCredentials, _publicKeysBatch, _signaturesBatch); + BCDepositor.makeBeaconChainDeposits32ETH( + DEPOSIT_CONTRACT, + _keysCount, + _withdrawalCredentials, + _publicKeysBatch, + _signaturesBatch + ); } } diff --git a/test/0.8.25/consolidation-helpers.ts b/test/0.8.25/consolidation-helpers.ts new file mode 100644 index 0000000000..19e79f871e --- /dev/null +++ b/test/0.8.25/consolidation-helpers.ts @@ -0,0 +1,36 @@ +/** + * Shared test helpers for ConsolidationGateway, ConsolidationBus, and ConsolidationMigrator tests. + */ + +/** Sample 48-byte validator public keys for testing. */ +export const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", +]; + +/** Creates dummy (empty-proof) validator witnesses for use in ConsolidationGateway tests. */ +export const witnessesForTargets = (targets: string[]) => + targets.map((pubkey) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, + })); + +/** Creates ConsolidationWitnessGroup[] for ConsolidationBus.executeConsolidation */ +export const buildWitnessGroups = (sourcePubkeysGroups: string[][], targetPubkeys: string[]) => + sourcePubkeysGroups.map((sourcePubkeys, i) => ({ + sourcePubkeys, + targetWitness: { + proof: [] as string[], + pubkey: targetPubkeys[i], + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, + }, + })); diff --git a/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts b/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts new file mode 100644 index 0000000000..76b92ba4a5 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.delay.test.ts @@ -0,0 +1,263 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { advanceChainTime, getCurrentBlockTimestamp } from "lib"; +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: execution delay", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let executor: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + + const EXECUTION_DELAY = 3600; // 1 hour + + let originalState: string; + + before(async () => { + [admin, manager, publisher, executor, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, EXECUTION_DELAY); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("constructor", () => { + it("should set the initial execution delay", async () => { + expect(await consolidationBus.executionDelay()).to.equal(EXECUTION_DELAY); + }); + + it("should emit ExecutionDelayUpdated during initialization", async () => { + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 100, 100, 7200)) + .to.emit(bus, "ExecutionDelayUpdated") + .withArgs(7200); + }); + + it("should allow zero execution delay in initializer", async () => { + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + expect(await bus.executionDelay()).to.equal(0); + }); + }); + + context("setExecutionDelay", () => { + it("should set execution delay", async () => { + await expect(consolidationBus.connect(manager).setExecutionDelay(7200)) + .to.emit(consolidationBus, "ExecutionDelayUpdated") + .withArgs(7200); + + expect(await consolidationBus.executionDelay()).to.equal(7200); + }); + + it("should allow setting delay to zero", async () => { + await expect(consolidationBus.connect(manager).setExecutionDelay(0)) + .to.emit(consolidationBus, "ExecutionDelayUpdated") + .withArgs(0); + + expect(await consolidationBus.executionDelay()).to.equal(0); + }); + + it("should revert without MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setExecutionDelay(100)).to.be.reverted; + }); + }); + + context("execution delay enforcement", () => { + let sourcePubkeysGroups: string[][]; + let targetPubkeys: string[]; + let batchHash: string; + + beforeEach(async () => { + sourcePubkeysGroups = [[PUBKEYS[0]]]; + targetPubkeys = [PUBKEYS[1]]; + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should revert when execution delay has not passed", async () => { + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const executeAfter = batchInfo.addedAt + BigInt(EXECUTION_DELAY); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed") + .withArgs((await getCurrentBlockTimestamp()) + 1n, executeAfter); + }); + + it("should allow execution after delay has passed", async () => { + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should allow execution exactly at the delay boundary", async () => { + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const currentTimestamp = await getCurrentBlockTimestamp(); + const timeToAdvance = batchInfo.addedAt + BigInt(EXECUTION_DELAY) - currentTimestamp; + + await advanceChainTime(timeToAdvance); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should allow immediate execution when delay is zero", async () => { + await consolidationBus.connect(manager).setExecutionDelay(0); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + + it("should enforce delay per batch independently", async () => { + // Add second batch after some time + await advanceChainTime(BigInt(EXECUTION_DELAY / 2)); + + const sourcePubkeysGroups2 = [[PUBKEYS[2]]]; + const targetPubkeys2 = [PUBKEYS[3]]; + await consolidationBus + .connect(publisher) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[2]], targetPubkey: PUBKEYS[3] }]); + + // Advance enough for batch 1 but not batch 2 + await advanceChainTime(BigInt(EXECUTION_DELAY / 2)); + + // Batch 1 should be executable + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + + // Batch 2 should still be blocked + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups2, targetPubkeys2), { value: 0 }), + ).to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed"); + }); + + it("should use the current delay setting at execution time", async () => { + // Increase delay after batch was added + const longerDelay = EXECUTION_DELAY * 2; + await consolidationBus.connect(manager).setExecutionDelay(longerDelay); + + // Advance the original delay + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + // Should still revert because the new longer delay hasn't passed + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.be.revertedWithCustomError(consolidationBus, "ExecutionDelayNotPassed"); + + // Advance the remaining time + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + // Now should succeed + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ).to.emit(consolidationBus, "RequestsExecuted"); + }); + }); + + context("getBatchInfo", () => { + it("should return zero values for non-existent batch", async () => { + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + const batchInfo = await consolidationBus.getBatchInfo(fakeBatchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should return correct info after adding batch", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + const blockTimestamp = await getCurrentBlockTimestamp(); + expect(batchInfo.publisher).to.equal(publisher.address); + expect(batchInfo.addedAt).to.equal(blockTimestamp); + }); + + it("should return zero values after batch is executed", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Advance past delay + await advanceChainTime(BigInt(EXECUTION_DELAY)); + + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups([[PUBKEYS[0]]], [PUBKEYS[1]]), { value: 0 }); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts b/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts new file mode 100644 index 0000000000..f8784d548c --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.deploy.test.ts @@ -0,0 +1,98 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +describe("ConsolidationBus.sol: deployment", () => { + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + + before(async () => { + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + }); + + it("should deploy and initialize successfully with valid parameters", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + + const adminRole = await bus.DEFAULT_ADMIN_ROLE(); + expect(await bus.hasRole(adminRole, admin.address)).to.be.true; + expect(await bus.batchSize()).to.equal(100); + expect(await bus.maxGroupsInBatch()).to.equal(100); + expect(await bus.getConsolidationGateway()).to.equal(gatewayAddr); + }); + + it("should revert if admin is zero address on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(ethers.ZeroAddress, 100, 100, 0)).to.be.revertedWithCustomError( + bus, + "AdminCannotBeZero", + ); + }); + + it("should revert if consolidationGateway is zero address", async () => { + await expect(ethers.deployContract("ConsolidationBus", [ethers.ZeroAddress])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationBus"), "ZeroArgument") + .withArgs("consolidationGateway"); + }); + + it("should revert zero batch size on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 0, 100, 0)) + .to.be.revertedWithCustomError(bus, "ZeroArgument") + .withArgs("batchSizeLimit"); + }); + + it("should revert zero max groups in batch on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 100, 0, 0)) + .to.be.revertedWithCustomError(bus, "ZeroArgument") + .withArgs("maxGroupsInBatchLimit"); + }); + + it("should revert if maxGroupsInBatch exceeds batchSize on initialize", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + + await expect(bus.initialize(admin.address, 10, 20, 0)) + .to.be.revertedWithCustomError(bus, "MaxGroupsExceedsBatchSize") + .withArgs(20, 10); + }); + + it("should revert on double initialization", async () => { + const [admin] = await ethers.getSigners(); + const gatewayAddr = await consolidationGateway.getAddress(); + + const impl = await ethers.deployContract("ConsolidationBus", [gatewayAddr]); + const [bus] = await proxify({ impl, admin }); + await bus.initialize(admin.address, 100, 100, 0); + + await expect(bus.initialize(admin.address, 100, 100, 0)).to.be.revertedWithCustomError( + bus, + "InvalidInitialization", + ); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts b/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts new file mode 100644 index 0000000000..1249eee69d --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.executor.test.ts @@ -0,0 +1,245 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: executor", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let executor: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + let REMOVE_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, executor, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + + // Grant roles + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("executeConsolidation", () => { + let sourcePubkeysGroups: string[][]; + let targetPubkeys: string[]; + let batchHash: string; + + beforeEach(async () => { + sourcePubkeysGroups = [[PUBKEYS[0]]]; + targetPubkeys = [PUBKEYS[1]]; + + // Add a batch + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should execute consolidation", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, fee); + + // Verify batch is removed from storage after execution + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should forward call to ConsolidationGateway", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, executor.address, fee); + }); + + it("should allow anyone to execute consolidation", async () => { + const fee = 10n; + + await expect( + consolidationBus + .connect(stranger) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: fee }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, fee); + }); + + it("should revert if batch not found", async () => { + const fakeSources = [[PUBKEYS[2]]]; + const fakeTargets = [PUBKEYS[0]]; + + const fakeBatchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [PUBKEYS[2]], targetPubkey: PUBKEYS[0] }]], + ), + ); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(fakeSources, fakeTargets), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(fakeBatchHash); + }); + + it("should revert if batch already executed", async () => { + // Execute first time + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }); + + // Try to execute again — batch was deleted, so it's not found + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should revert if batch was removed", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + // Remove the batch + await consolidationBus.connect(manager).removeBatches([batchHash]); + + // Try to execute + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should execute multiple batches sequentially", async () => { + // Add second batch + const sourcePubkeysGroups2 = [[PUBKEYS[1]]]; + const targetPubkeys2 = [PUBKEYS[2]]; + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups2); + + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + // Execute first batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, 10); + + // Execute second batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups2, targetPubkeys2), { value: 15 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash2, 15); + }); + + it("should work with zero value (if gateway allows)", async () => { + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 0 }), + ) + .to.emit(consolidationBus, "RequestsExecuted") + .withArgs(batchHash, 0); + }); + + it("should forward exact msg.value to gateway", async () => { + const exactValue = 12345n; + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: exactValue }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, executor.address, exactValue); + }); + + it("should pass caller as refundRecipient", async () => { + await expect( + consolidationBus + .connect(stranger) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }), + ) + .to.emit(consolidationGateway, "AddConsolidationRequestsCalled") + .withArgs(sourcePubkeysGroups.length, stranger.address, 10); + }); + }); + + context("ETH balance", () => { + it("should not hold ETH after execution", async () => { + const sourcePubkeysGroups = [[PUBKEYS[0]]]; + const targetPubkeys = [PUBKEYS[1]]; + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const balanceBefore = await ethers.provider.getBalance(await consolidationBus.getAddress()); + + await consolidationBus + .connect(executor) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 100 }); + + const balanceAfter = await ethers.provider.getBalance(await consolidationBus.getAddress()); + + expect(balanceAfter).to.equal(balanceBefore); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.management.test.ts b/test/0.8.25/consolidationBus/consolidationBus.management.test.ts new file mode 100644 index 0000000000..c14aca0990 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.management.test.ts @@ -0,0 +1,198 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: management", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + let REMOVE_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 100, 100, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + + // Grant manager role + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("setBatchSize", () => { + it("should set batch size", async () => { + await expect(consolidationBus.connect(manager).setBatchSize(200)) + .to.emit(consolidationBus, "BatchLimitUpdated") + .withArgs(200); + + expect(await consolidationBus.batchSize()).to.equal(200); + }); + + it("should revert setting batch size to zero", async () => { + await expect(consolidationBus.connect(manager).setBatchSize(0)) + .to.be.revertedWithCustomError(consolidationBus, "ZeroArgument") + .withArgs("batchSizeLimit"); + }); + + it("should revert if new batch size is less than current maxGroupsInBatch", async () => { + // maxGroupsInBatch is 100, try to set batchSize to 50 + await expect(consolidationBus.connect(manager).setBatchSize(50)) + .to.be.revertedWithCustomError(consolidationBus, "MaxGroupsExceedsBatchSize") + .withArgs(100, 50); + }); + + it("should revert if caller does not have MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setBatchSize(200)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, MANAGE_ROLE); + }); + }); + + context("setMaxGroupsInBatch", () => { + it("should set max groups in batch", async () => { + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(50)) + .to.emit(consolidationBus, "MaxGroupsInBatchUpdated") + .withArgs(50); + + expect(await consolidationBus.maxGroupsInBatch()).to.equal(50); + }); + + it("should revert setting max groups in batch to zero", async () => { + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(0)) + .to.be.revertedWithCustomError(consolidationBus, "ZeroArgument") + .withArgs("maxGroupsInBatchLimit"); + }); + + it("should revert if maxGroupsInBatch exceeds batchSize", async () => { + // batchSize is 100, try to set maxGroupsInBatch to 200 + await expect(consolidationBus.connect(manager).setMaxGroupsInBatch(200)) + .to.be.revertedWithCustomError(consolidationBus, "MaxGroupsExceedsBatchSize") + .withArgs(200, 100); + }); + + it("should revert if caller does not have MANAGE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).setMaxGroupsInBatch(50)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, MANAGE_ROLE); + }); + }); + + context("removeBatches", () => { + let batchHash: string; + + beforeEach(async () => { + // Register publisher and add a batch + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, publisher.address); + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + // Compute batch hash + batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + }); + + it("should remove batches", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.not.equal(ethers.ZeroAddress); + + await expect(consolidationBus.connect(manager).removeBatches([batchHash])) + .to.emit(consolidationBus, "BatchesRemoved") + .withArgs([batchHash]); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + + it("should revert if caller does not have REMOVE_ROLE", async () => { + await expect(consolidationBus.connect(stranger).removeBatches([batchHash])) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, REMOVE_ROLE); + }); + + it("should revert if batch not found", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + + await expect(consolidationBus.connect(manager).removeBatches([fakeBatchHash])) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(fakeBatchHash); + }); + + it("should revert if batchHashes is empty", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + await expect(consolidationBus.connect(manager).removeBatches([])).to.be.revertedWithCustomError( + consolidationBus, + "EmptyBatchHashes", + ); + }); + + it("should revert if batch already executed", async () => { + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const sourcePubkeysGroups = [[PUBKEYS[0]]]; + const targetPubkeys = [PUBKEYS[1]]; + + await consolidationBus + .connect(manager) + .executeConsolidation(buildWitnessGroups(sourcePubkeysGroups, targetPubkeys), { value: 10 }); + + // Try to remove the executed batch — batch was deleted, so it's not found + await expect(consolidationBus.connect(manager).removeBatches([batchHash])) + .to.be.revertedWithCustomError(consolidationBus, "BatchNotFound") + .withArgs(batchHash); + }); + + it("should remove multiple batches", async () => { + // Add another batch + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[0] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups2); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + await expect(consolidationBus.connect(manager).removeBatches([batchHash, batchHash2])) + .to.emit(consolidationBus, "BatchesRemoved") + .withArgs([batchHash, batchHash2]); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(ethers.ZeroAddress); + }); + }); +}); diff --git a/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts b/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts new file mode 100644 index 0000000000..6605b35411 --- /dev/null +++ b/test/0.8.25/consolidationBus/consolidationBus.publisher.test.ts @@ -0,0 +1,325 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway__MockForConsolidationBus } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { buildWitnessGroups, PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationBus.sol: publisher", () => { + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway__MockForConsolidationBus; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let publisher: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let MANAGE_ROLE: string; + let PUBLISH_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, manager, publisher, stranger] = await ethers.getSigners(); + + consolidationGateway = await ethers.deployContract("ConsolidationGateway__MockForConsolidationBus"); + + const impl = await ethers.deployContract("ConsolidationBus", [await consolidationGateway.getAddress()]); + [consolidationBus] = await proxify({ impl, admin }); + await consolidationBus.initialize(admin.address, 10, 10, 0); + + MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + PUBLISH_ROLE = await consolidationBus.PUBLISH_ROLE(); + + // Grant roles + await consolidationBus.connect(admin).grantRole(MANAGE_ROLE, manager.address); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("addConsolidationRequests", () => { + it("should add consolidation requests", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + const batchData = ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [groups], + ); + const batchHash = ethers.keccak256(batchData); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.emit(consolidationBus, "RequestsAdded") + .withArgs(publisher.address, batchData); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(publisher.address); + expect(batchInfo.addedAt).to.be.greaterThan(0); + }); + + it("should add multiple requests in a batch", async () => { + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }, + ]; + + const batchData = ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [groups], + ); + const batchHash = ethers.keccak256(batchData); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.emit(consolidationBus, "RequestsAdded") + .withArgs(publisher.address, batchData); + + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.not.equal(ethers.ZeroAddress); + }); + + it("should revert if caller does not have PUBLISH_ROLE", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + await expect(consolidationBus.connect(stranger).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PUBLISH_ROLE); + }); + + it("should revert if batch is empty", async () => { + await expect(consolidationBus.connect(publisher).addConsolidationRequests([])).to.be.revertedWithCustomError( + consolidationBus, + "EmptyBatch", + ); + }); + + it("should revert if a source group is empty", async () => { + // First group is non-empty, second group is empty + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [] as string[], targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(1); + }); + + it("should revert with EmptyGroup at first index if first group is empty", async () => { + const groups = [ + { sourcePubkeys: [] as string[], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(0); + }); + + it("should revert if batch size exceeds limit", async () => { + // Create a batch with total source pubkeys exceeding the limit (10) + // Use fewer groups but with multiple source keys each to avoid TooManyGroups + const groups = [ + { sourcePubkeys: Array(6).fill(PUBKEYS[0]), targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: Array(6).fill(PUBKEYS[0]), targetPubkey: PUBKEYS[2] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(12, 10); + }); + + it("should allow batch at exact limit", async () => { + const groups = Array(10).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.not.be.reverted; + }); + + it("should revert if groups count exceeds max groups in batch", async () => { + // Set maxGroupsInBatch to 3 (batchSize stays at 10) + await consolidationBus.connect(manager).setMaxGroupsInBatch(3); + + // Create 4 groups, each with 1 source pubkey (total size 4 <= batchSize 10, but groups 4 > maxGroups 3) + const groups = Array(4).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(4, 3); + }); + + it("should allow batch at exact max groups limit", async () => { + // Set maxGroupsInBatch to 3 + await consolidationBus.connect(manager).setMaxGroupsInBatch(3); + + const groups = Array(3).fill({ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }); + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.not.be.reverted; + }); + + it("should check both batch size and max groups limits independently", async () => { + // Set maxGroupsInBatch to 5, batchSize stays at 10 + await consolidationBus.connect(manager).setMaxGroupsInBatch(5); + + // 3 groups with 4 source pubkeys each = 12 total > batchSize 10 + // but groups 3 <= maxGroups 5 + // TooManyGroups check comes first, but this should pass it and fail on BatchTooLarge + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[1] }, + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2], PUBKEYS[0]], targetPubkey: PUBKEYS[0] }, + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(12, 10); + }); + + it("should revert if batch already added", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Try to add again + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "BatchAlreadyPending") + .withArgs(batchHash); + }); + + it("should revert if source equals target pubkey", async () => { + const samePubkey = PUBKEYS[0]; + const groups = [{ sourcePubkeys: [samePubkey], targetPubkey: samePubkey }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(0); + }); + + it("should revert if source equals target pubkey at any index", async () => { + // First group is valid, second group has source == target + const groups = [ + { sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[2] }, + { sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[1] }, // PUBKEYS[1] == PUBKEYS[1] at group index 1 + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(1); + }); + + it("should revert if any source in a multi-source group equals the target", async () => { + // Group has multiple sources, one of which matches the target + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetPubkey: PUBKEYS[1] }, // PUBKEYS[1] is both source and target + ]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "SourceEqualsTarget") + .withArgs(0); + }); + + it("should allow re-adding batch after removal", async () => { + const REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + await consolidationBus.connect(admin).grantRole(REMOVE_ROLE, manager.address); + + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups]), + ); + + // Remove + await consolidationBus.connect(manager).removeBatches([batchHash]); + + // Batch should be cleared + const batchInfo = await consolidationBus.getBatchInfo(batchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + + // Re-add should succeed + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.emit( + consolidationBus, + "RequestsAdded", + ); + }); + + it("should allow re-adding batch after execution", async () => { + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + + // Add first time + await consolidationBus.connect(publisher).addConsolidationRequests(groups); + + // Execute + await consolidationBus.executeConsolidation(buildWitnessGroups([[PUBKEYS[0]]], [PUBKEYS[1]]), { + value: 10, + }); + + // Re-add should succeed + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)).to.emit( + consolidationBus, + "RequestsAdded", + ); + }); + + it("should revert if target pubkey length is not 48 bytes", async () => { + const invalidTargetPubkey = "0x1234"; + const groups = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: invalidTargetPubkey }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "InvalidTargetPubkeyLength") + .withArgs(0, 2); + }); + + it("should revert if source pubkey length is not 48 bytes", async () => { + const invalidSourcePubkey = "0x1234"; + const groups = [{ sourcePubkeys: [invalidSourcePubkey], targetPubkey: PUBKEYS[1] }]; + + await expect(consolidationBus.connect(publisher).addConsolidationRequests(groups)) + .to.be.revertedWithCustomError(consolidationBus, "InvalidSourcePubkeyLength") + .withArgs(0, 0, 2); + }); + + it("should allow different publishers to add different batches", async () => { + // Register another publisher + const [, , , , publisher2] = await ethers.getSigners(); + await consolidationBus.connect(admin).grantRole(PUBLISH_ROLE, publisher2.address); + + const groups1 = [{ sourcePubkeys: [PUBKEYS[0]], targetPubkey: PUBKEYS[1] }]; + const groups2 = [{ sourcePubkeys: [PUBKEYS[1]], targetPubkey: PUBKEYS[2] }]; + + await consolidationBus.connect(publisher).addConsolidationRequests(groups1); + await consolidationBus.connect(publisher2).addConsolidationRequests(groups2); + + const batchHash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups1]), + ); + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], [groups2]), + ); + + expect((await consolidationBus.getBatchInfo(batchHash1)).publisher).to.equal(publisher.address); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(publisher2.address); + }); + }); + + context("view methods", () => { + it("getBatchInfo should return zero values for non-existent batch", async () => { + const fakeBatchHash = ethers.keccak256(ethers.toUtf8Bytes("fake")); + const batchInfo = await consolidationBus.getBatchInfo(fakeBatchHash); + expect(batchInfo.publisher).to.equal(ethers.ZeroAddress); + expect(batchInfo.addedAt).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts new file mode 100644 index 0000000000..8cbbb93318 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.addConsolidationRequests.test.ts @@ -0,0 +1,554 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { addressToWC, advanceChainTime, generateValidator, prepareLocalMerkleTree } from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +const ZERO_ADDRESS = ethers.ZeroAddress; + +// Helper to create a dummy witness (no real CL proof) for tests that don't need proof verification +const dummyWitness = (pubkey: string) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +// Helper functions +const grantConsolidationRequestRole = async ( + consolidationGateway: ConsolidationGateway, + account: HardhatEthersSigner, +) => { + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const grantLimitManagerRole = async (consolidationGateway: ConsolidationGateway, account: HardhatEthersSigner) => { + const role = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const setConsolidationLimit = async ( + consolidationGateway: ConsolidationGateway, + signer: HardhatEthersSigner, + maxRequests: number, + requestsPerFrame: number, + frameDuration: number, +) => { + return consolidationGateway + .connect(signer) + .setConsolidationRequestLimit(maxRequests, requestsPerFrame, frameDuration); +}; + +describe("ConsolidationGateway.sol: addConsolidationRequests", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let dsm: DepositSecurityModule__MockForConsolidationGateway; + let lido: Lido__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + // Pre-built valid witnesses with CL proofs for target validators + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + let validatorPubkeys: string[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + dsm = await ethers.deployContract("DepositSecurityModule__MockForConsolidationGateway"); + lido = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate 3 validators with matching withdrawal credentials + const validators = []; + const validatorIndices: number[] = []; + for (let i = 0; i < 3; i++) { + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + validators.push(validator); + validatorIndices.push(validatorIndex); + } + + // Commit merkle tree to beacon block root + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + // Build valid witnesses for all validators + validWitnesses = []; + validatorPubkeys = []; + for (let i = 0; i < validators.length; i++) { + const proof = await localMerkle.buildProof(validatorIndices[i], beaconBlockHeader); + validWitnesses.push({ + proof, + pubkey: String(validators[i].container.pubkey), + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }); + validatorPubkeys.push(String(validators[i].container.pubkey)); + } + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, // maxConsolidationRequestsLimit + 1, // consolidationsPerFrame + 48, // frameDurationInSec + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + await grantConsolidationRequestRole(consolidationGateway, authorizedEntity); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("authorization", () => { + it("should revert if caller does not have the ADD_CONSOLIDATION_REQUEST_ROLE", async () => { + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + + await expect( + consolidationGateway + .connect(stranger) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, role); + }); + }); + + context("input validation", () => { + it("should revert with ZeroArgument error if msg.value == 0", async () => { + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 0 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ZeroArgument") + .withArgs("msg.value"); + }); + + it("should revert with ZeroArgument error if groups count is zero", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests([], ZERO_ADDRESS, { value: 10 }), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ZeroArgument") + .withArgs("groups"); + }); + + it("should revert with EmptyGroup error if a source group is empty", async () => { + // Second group is empty + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }, + { sourcePubkeys: [], targetWitness: dummyWitness(PUBKEYS[2]) }, + ], + ZERO_ADDRESS, + { value: 10 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "EmptyGroup") + .withArgs(1); + }); + + it("should revert with EmptyGroup at first index if first group is empty", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [], targetWitness: dummyWitness(PUBKEYS[1]) }, + { sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[2]) }, + ], + ZERO_ADDRESS, + { value: 10 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "EmptyGroup") + .withArgs(0); + }); + }); + + context("preconditions", () => { + it("should revert with DSMDepositsPaused error if DSM deposits are paused", async () => { + await dsm.mock__setDepositsPaused(true); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "DSMDepositsPaused"); + }); + + it("should revert with LidoDepositsPaused error if Lido deposits are paused", async () => { + await lido.mock__setCanDeposit(false); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "LidoDepositsPaused"); + }); + + it("should not revert when DSM deposits are not paused and Lido deposits are enabled", async () => { + await dsm.mock__setDepositsPaused(false); + await lido.mock__setCanDeposit(true); + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled"); + }); + }); + + context("CL proof verification", () => { + it("should revert with RootNotFound when validator witness beacon root is missing", async () => { + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { + sourcePubkeys: [PUBKEYS[0]], + targetWitness: { + ...validWitnesses[0], + childBlockTimestamp: validWitnesses[0].childBlockTimestamp + 1, + }, + }, + ], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "RootNotFound"); + }); + + it("should revert with InvalidProof when validator witness proof is malformed", async () => { + // InvalidProof is defined in the SSZ library , not on ConsolidationGateway itself. + // The CLProofVerifier calls SSZ.verifyProof() which reverts with SSZ.InvalidProof(), + // but since the error is on the library, it doesn't appear in ConsolidationGateway's ABI. + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { + sourcePubkeys: [PUBKEYS[0]], + targetWitness: { + ...validWitnesses[0], + proof: [ + "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + ...validWitnesses[0].proof.slice(1), + ], + }, + }, + ], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError({ interface: ethers.Interface.from(["error InvalidProof()"]) }, "InvalidProof"); + }); + }); + + context("rate limiting", () => { + it("should consume limit when processing requests", async () => { + const dataBefore = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataBefore[4]).to.equal(100); // currentConsolidationRequestsLimit + + // 2 total requests: [source0, source1] -> target0 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 3 }, + ); + + const dataAfter = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataAfter[3]).to.equal(98); // prevConsolidationRequestsLimit + expect(dataAfter[4]).to.equal(98); // currentConsolidationRequestsLimit + + await advanceChainTime(48n); + + const dataRestored = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(dataRestored[3]).to.equal(98); // prevConsolidationRequestsLimit + expect(dataRestored[4]).to.equal(99); // currentConsolidationRequestsLimit (restored by 1) + }); + + it("should revert if limit doesn't cover requests count", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 2, 1, 48); + + // 3 total requests across groups + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ], + ZERO_ADDRESS, + { value: 4 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded") + .withArgs(3, 2); + }); + + it("should succeed when limit covers all requests and exhaust remaining limit", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 3, 1, 48); + + // 3 total requests: [source0, source1] -> target0, [source2] -> target1 + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ]; + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }); + + const flatSources = [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0], validatorPubkeys[1]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + + // Limit fully consumed — next request should fail + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }), + ) + .to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded") + .withArgs(3, 0); + + // Restore limit after frame advancement + await advanceChainTime(48n * 3n); + + await expect( + consolidationGateway.connect(authorizedEntity).addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }), + ) + .to.emit(withdrawalVault, "AddConsolidationRequestsCalled") + .withArgs(flatSources, flatTargets); + }); + }); + + context("fee handling", () => { + it("should revert if total fee is insufficient", async () => { + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 1 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "InsufficientFee") + .withArgs(2, 1); + }); + + it("should use the current consolidation fee for insufficient fee checks", async () => { + await withdrawalVault.mock__setFee(3); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 5 }, + ), + ) + .to.be.revertedWithCustomError(consolidationGateway, "InsufficientFee") + .withArgs(6, 5); + }); + + it("should forward the configured fee to withdrawal vault and refund the remainder", async () => { + await withdrawalVault.mock__setFee(4); + + const withdrawalVaultBalanceBefore = await ethers.provider.getBalance(withdrawalVault); + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway.connect(authorizedEntity).addConsolidationRequests( + [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ], + stranger, + { value: 15 }, + ); + + const withdrawalVaultBalanceAfter = await ethers.provider.getBalance(withdrawalVault); + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + + expect(withdrawalVaultBalanceAfter).to.equal(withdrawalVaultBalanceBefore + 12n); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore + 3n); + }); + + it("should preserve gateway eth balance (no stuck funds)", async () => { + const balanceBefore = await ethers.provider.getBalance(consolidationGateway); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + + const balanceAfter = await ethers.provider.getBalance(consolidationGateway); + expect(balanceAfter).to.equal(balanceBefore); + }); + + it("should refund fee to recipient address", async () => { + const prevBalance = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 1 + 7, + }); + + const newBalance = await ethers.provider.getBalance(stranger); + + expect(newBalance).to.equal(prevBalance + 7n); + }); + + it("should refund fee to sender address when refundRecipient is zero", async () => { + const SENDER_ADDR = authorizedEntity.address; + const prevBalance = await ethers.provider.getBalance(SENDER_ADDR); + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 1 + 7, + }); + + const receipt = await tx.wait(); + const gasUsed = receipt!.gasUsed * receipt!.gasPrice; + + const newBalance = await ethers.provider.getBalance(SENDER_ADDR); + expect(newBalance).to.equal(prevBalance - gasUsed - 1n); + }); + + it("should revert with FeeRefundFailed if refund recipient refuses ETH", async () => { + const refundReverter = await ethers.deployContract("RefundReverter"); + + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], + await refundReverter.getAddress(), + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "FeeRefundFailed"); + }); + + it("should not make refund if refund is zero", async () => { + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 1, + }); + + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore); + }); + + it("should refund ETH if refund > 0", async () => { + const recipientBalanceBefore = await ethers.provider.getBalance(stranger); + + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], stranger, { + value: 5, + }); + + const recipientBalanceAfter = await ethers.provider.getBalance(stranger); + expect(recipientBalanceAfter).to.equal(recipientBalanceBefore + 4n); // 5 - 1 fee = 4 refund + }); + }); + + context("request forwarding", () => { + it("should expand grouped sources to flat source-target pairs", async () => { + // Grouped: [source0, source1] -> target0, i.e. two sources to one target + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ZERO_ADDRESS, + { value: 3 }, + ); + + const flatSources = [PUBKEYS[0], PUBKEYS[1]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + }); + + it("should expand multiple groups with multiple sources each", async () => { + // Group 0: [source0, source1] -> target0 (2 pairs) + // Group 1: [source2] -> target1 (1 pair) + const groups = [ + { sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }, + { sourcePubkeys: [PUBKEYS[2]], targetWitness: validWitnesses[1] }, + ]; + + const tx = await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ZERO_ADDRESS, { value: 4 }); + + const flatSources = [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]; + const flatTargets = [validatorPubkeys[0], validatorPubkeys[0], validatorPubkeys[1]]; + await expect(tx).to.emit(withdrawalVault, "AddConsolidationRequestsCalled").withArgs(flatSources, flatTargets); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts new file mode 100644 index 0000000000..620383aba9 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.deploy.test.ts @@ -0,0 +1,121 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { WithdrawalVault__MockForConsolidationGateway } from "typechain-types"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; + +const DUMMY_GI = "0x0000000000000000000000000000000000000000000000000096000000000028"; + +describe("ConsolidationGateway.sol: deployment", () => { + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + + before(async () => { + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + }); + }); + + it("should deploy successfully with valid admin and verify initial state", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + const adminRole = await gateway.DEFAULT_ADMIN_ROLE(); + expect(await gateway.hasRole(adminRole, admin.address)).to.be.true; + }); + + it("should initialize rate limit config during deployment", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 50, + 5, + 100, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + const data = await gateway.getConsolidationRequestLimitFullInfo(); + expect(data[0]).to.equal(50); // maxConsolidationRequestsLimit + expect(data[1]).to.equal(5); // consolidationsPerFrame + expect(data[2]).to.equal(100); // frameDurationInSec + expect(data[3]).to.equal(50); // prevConsolidationRequestsLimit + expect(data[4]).to.equal(50); // currentConsolidationRequestsLimit + }); + + it("should emit ConsolidationRequestsLimitSet during deployment", async () => { + const [admin] = await ethers.getSigners(); + const locatorAddr = (await deployLidoLocator()).getAddress(); + + const gateway = await ethers.deployContract("ConsolidationGateway", [ + admin.address, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]); + + await expect(gateway.deploymentTransaction()) + .to.emit(gateway, "ConsolidationRequestsLimitSet") + .withArgs(100, 1, 48); + }); + + it("should revert if admin is zero address", async () => { + const locatorAddr = (await deployLidoLocator()).getAddress(); + + await expect( + ethers.deployContract("ConsolidationGateway", [ + ethers.ZeroAddress, + locatorAddr, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]), + ).to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationGateway"), "AdminCannotBeZero"); + }); + + it("should revert if lidoLocator is zero address", async () => { + const [admin] = await ethers.getSigners(); + + await expect( + ethers.deployContract("ConsolidationGateway", [ + admin.address, + ethers.ZeroAddress, + 100, + 1, + 48, + DUMMY_GI, + DUMMY_GI, + 0, + ]), + ) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationGateway"), "ZeroArgument") + .withArgs("lidoLocator"); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts new file mode 100644 index 0000000000..3e3a9c4d5b --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.pausable.test.ts @@ -0,0 +1,396 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { + addressToWC, + advanceChainTime, + generateValidator, + getCurrentBlockTimestamp, + prepareLocalMerkleTree, +} from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +const dummyWitness = (pubkey: string) => ({ + proof: [] as string[], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +const ZERO_ADDRESS = ethers.ZeroAddress; + +describe("ConsolidationGateway.sol: pausable", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let dsm: DepositSecurityModule__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let PAUSE_ROLE: string; + let RESUME_ROLE: string; + + // Pre-built valid witnesses with CL proofs for target validators + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + dsm = await ethers.deployContract("DepositSecurityModule__MockForConsolidationGateway"); + const lido: Lido__MockForConsolidationGateway = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate a validator with matching withdrawal credentials + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + + // Commit merkle tree to beacon block root + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + // Build valid witness + const proof = await localMerkle.buildProof(validatorIndex, beaconBlockHeader); + validWitnesses = [ + { + proof, + pubkey: String(validator.container.pubkey), + validatorIndex, + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }, + ]; + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, + 1, + 48, + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, authorizedEntity); + + PAUSE_ROLE = await consolidationGateway.PAUSE_ROLE(); + RESUME_ROLE = await consolidationGateway.RESUME_ROLE(); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("pausable until", () => { + beforeEach(async () => { + // set up necessary roles + await consolidationGateway.connect(admin).grantRole(PAUSE_ROLE, admin); + await consolidationGateway.connect(admin).grantRole(RESUME_ROLE, admin); + }); + + context("resume", () => { + it("should revert if the sender does not have the RESUME_ROLE", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to resume without the RESUME_ROLE + await expect(consolidationGateway.connect(stranger).resume()) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, RESUME_ROLE); + }); + + it("should revert if the contract is not paused", async () => { + // Contract is initially not paused + await expect(consolidationGateway.connect(admin).resume()).to.be.revertedWithCustomError( + consolidationGateway, + "PausedExpected", + ); + }); + + it("should resume the contract when paused and emit Resumed event", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Resume the contract + await expect(consolidationGateway.connect(admin).resume()).to.emit(consolidationGateway, "Resumed"); + + // Verify contract is resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("pauseFor", () => { + it("should revert if the sender does not have the PAUSE_ROLE", async () => { + await expect(consolidationGateway.connect(stranger).pauseFor(1000n)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PAUSE_ROLE); + }); + + it("should revert if the contract is already paused", async () => { + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to pause again + await expect(consolidationGateway.connect(admin).pauseFor(500n)).to.be.revertedWithCustomError( + consolidationGateway, + "ResumedExpected", + ); + }); + + it("should revert if pause duration is zero", async () => { + await expect(consolidationGateway.connect(admin).pauseFor(0n)).to.be.revertedWithCustomError( + consolidationGateway, + "ZeroPauseDuration", + ); + }); + + it("should pause the contract for the specified duration and emit Paused event", async () => { + await expect(consolidationGateway.connect(admin).pauseFor(1000n)) + .to.emit(consolidationGateway, "Paused") + .withArgs(1000n); + + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should pause the contract indefinitely with PAUSE_INFINITELY", async () => { + const pauseInfinitely = await consolidationGateway.PAUSE_INFINITELY(); + + // Pause the contract indefinitely + await expect(consolidationGateway.connect(admin).pauseFor(pauseInfinitely)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseInfinitely); + + // Verify contract is paused + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time significantly + await advanceChainTime(1_000_000_000n); + + // Contract should still be paused + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should automatically resume after the pause duration passes", async () => { + // Pause the contract for 100 seconds + await consolidationGateway.connect(admin).pauseFor(100n); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Contract should be automatically resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("pauseUntil", () => { + it("should revert if the sender does not have the PAUSE_ROLE", async () => { + const timestamp = await getCurrentBlockTimestamp(); + await expect(consolidationGateway.connect(stranger).pauseUntil(timestamp + 1000n)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, PAUSE_ROLE); + }); + + it("should revert if the contract is already paused", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // First pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Try to pause again with pauseUntil + await expect(consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n)).to.be.revertedWithCustomError( + consolidationGateway, + "ResumedExpected", + ); + }); + + it("should revert if timestamp is in the past", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + await expect(consolidationGateway.connect(admin).pauseUntil(timestamp - 1000n)).to.be.revertedWithCustomError( + consolidationGateway, + "PauseUntilMustBeInFuture", + ); + }); + + it("should pause the contract until the specified timestamp and emit Paused event", async () => { + const timestamp = await getCurrentBlockTimestamp(); + const pauseUntil = timestamp + 1000n; + + await expect(consolidationGateway.connect(admin).pauseUntil(pauseUntil)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseUntil - timestamp); + + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should pause the contract indefinitely with PAUSE_INFINITELY", async () => { + const pauseInfinitely = await consolidationGateway.PAUSE_INFINITELY(); + + // Pause the contract indefinitely + await expect(consolidationGateway.connect(admin).pauseUntil(pauseInfinitely)) + .to.emit(consolidationGateway, "Paused") + .withArgs(pauseInfinitely); + + // Verify contract is paused + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time significantly + await advanceChainTime(1_000_000_000n); + + // Contract should still be paused + expect(await consolidationGateway.isPaused()).to.equal(true); + }); + + it("should automatically resume after the pause timestamp passes", async () => { + const timestamp = await getCurrentBlockTimestamp(); + const pauseUntil = timestamp + 100n; + + // Pause the contract until timestamp + 100 + await consolidationGateway.connect(admin).pauseUntil(pauseUntil); + expect(await consolidationGateway.isPaused()).to.equal(true); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Contract should be automatically resumed + expect(await consolidationGateway.isPaused()).to.equal(false); + }); + }); + + context("Interaction with addConsolidationRequests", () => { + it("pauseFor: should prevent consolidation requests immediately after pausing", async () => { + // Pause the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + + // Should prevent consolidation requests + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("pauseUntil: should prevent consolidation requests immediately after pausing", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause the contract + await consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n); + + // Should prevent consolidation requests + await expect( + consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: dummyWitness(PUBKEYS[1]) }], + ZERO_ADDRESS, + { value: 2 }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("pauseFor: should allow consolidation requests immediately after resuming", async () => { + // Pause and then resume the contract + await consolidationGateway.connect(admin).pauseFor(1000n); + await consolidationGateway.connect(admin).resume(); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseUntil: should allow consolidation requests immediately after resuming", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause and then resume the contract + await consolidationGateway.connect(admin).pauseUntil(timestamp + 1000n); + await consolidationGateway.connect(admin).resume(); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseFor: should allow consolidation requests after pause duration automatically expires", async () => { + // Pause the contract for 100 seconds + await consolidationGateway.connect(admin).pauseFor(100n); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + + it("pauseUntil: should allow consolidation requests after pause duration automatically expires", async () => { + const timestamp = await getCurrentBlockTimestamp(); + + // Pause the contract until timestamp + 100 + await consolidationGateway.connect(admin).pauseUntil(timestamp + 100n); + + // Advance time by 101 seconds + await advanceChainTime(101n); + + // Should allow consolidation requests + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests([{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], ZERO_ADDRESS, { + value: 2, + }); + }); + }); + }); +}); diff --git a/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts b/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts new file mode 100644 index 0000000000..b6ff43f5e4 --- /dev/null +++ b/test/0.8.25/consolidationGateway/consolidationGateway.rateLimit.test.ts @@ -0,0 +1,251 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationGateway, + DepositSecurityModule__MockForConsolidationGateway, + Lido__MockForConsolidationGateway, + WithdrawalVault__MockForConsolidationGateway, +} from "typechain-types"; + +import { addressToWC, advanceChainTime, generateValidator, prepareLocalMerkleTree } from "lib"; + +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +// Helper functions +const grantLimitManagerRole = async (consolidationGateway: ConsolidationGateway, account: HardhatEthersSigner) => { + const role = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.grantRole(role, account); +}; + +const setConsolidationLimit = async ( + consolidationGateway: ConsolidationGateway, + signer: HardhatEthersSigner, + maxRequests: number, + requestsPerFrame: number, + frameDuration: number, +) => { + return consolidationGateway + .connect(signer) + .setConsolidationRequestLimit(maxRequests, requestsPerFrame, frameDuration); +}; + +const expectLimitData = async ( + consolidationGateway: ConsolidationGateway, + expectedMaxRequests: number, + expectedPerFrame: number, + expectedFrameDuration: number, + expectedPrevLimit: number, + expectedCurrentLimit: number | typeof ethers.MaxUint256, +) => { + const data = await consolidationGateway.getConsolidationRequestLimitFullInfo(); + expect(data[0]).to.equal(expectedMaxRequests); // maxConsolidationRequestsLimit + expect(data[1]).to.equal(expectedPerFrame); // consolidationsPerFrame + expect(data[2]).to.equal(expectedFrameDuration); // frameDurationInSec + expect(data[3]).to.equal(expectedPrevLimit); // prevConsolidationRequestsLimit + expect(data[4]).to.equal(expectedCurrentLimit); // currentConsolidationRequestsLimit +}; + +describe("ConsolidationGateway.sol: rate limit management", () => { + let consolidationGateway: ConsolidationGateway; + let withdrawalVault: WithdrawalVault__MockForConsolidationGateway; + let admin: HardhatEthersSigner; + let authorizedEntity: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let validWitnesses: { + proof: string[]; + pubkey: string; + validatorIndex: number; + childBlockTimestamp: number; + slot: number; + proposerIndex: number; + }[]; + + let originalState: string; + + before(async () => { + [admin, authorizedEntity, stranger] = await ethers.getSigners(); + + const locator = await deployLidoLocator(); + const locatorAddr = await locator.getAddress(); + + withdrawalVault = await ethers.deployContract("WithdrawalVault__MockForConsolidationGateway"); + const dsm: DepositSecurityModule__MockForConsolidationGateway = await ethers.deployContract( + "DepositSecurityModule__MockForConsolidationGateway", + ); + const lido: Lido__MockForConsolidationGateway = await ethers.deployContract("Lido__MockForConsolidationGateway"); + + await updateLidoLocatorImplementation(locatorAddr, { + withdrawalVault: await withdrawalVault.getAddress(), + depositSecurityModule: await dsm.getAddress(), + lido: await lido.getAddress(), + }); + + // Set up merkle tree for CL proof verification + const localMerkle = await prepareLocalMerkleTree(); + const withdrawalCredentials = addressToWC(await withdrawalVault.getAddress(), 2); + + // Generate validators with matching withdrawal credentials + const validators = []; + const validatorIndices: number[] = []; + for (let i = 0; i < 3; i++) { + const validator = generateValidator(withdrawalCredentials); + const { validatorIndex } = await localMerkle.addValidator(validator.container); + validators.push(validator); + validatorIndices.push(validatorIndex); + } + + const { childBlockTimestamp, beaconBlockHeader } = await localMerkle.commitChangesToBeaconRoot(); + + validWitnesses = []; + for (let i = 0; i < validators.length; i++) { + const proof = await localMerkle.buildProof(validatorIndices[i], beaconBlockHeader); + validWitnesses.push({ + proof, + pubkey: String(validators[i].container.pubkey), + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot as number, + proposerIndex: beaconBlockHeader.proposerIndex as number, + }); + } + + consolidationGateway = await ethers.deployContract("ConsolidationGateway", [ + admin, + locatorAddr, + 100, // maxConsolidationRequestsLimit + 1, // consolidationsPerFrame + 48, // frameDurationInSec + localMerkle.gIFirstValidator, + localMerkle.gIFirstValidator, + 0, + ]); + + const role = await consolidationGateway.ADD_CONSOLIDATION_REQUEST_ROLE(); + await consolidationGateway.grantRole(role, authorizedEntity); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("setConsolidationRequestLimit", () => { + it("should revert without EXIT_LIMIT_MANAGER_ROLE", async () => { + const limitManagerRole = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + + await expect(consolidationGateway.connect(stranger).setConsolidationRequestLimit(4, 1, 48)) + .to.be.revertedWithCustomError(consolidationGateway, "AccessControlUnauthorizedAccount") + .withArgs(await stranger.getAddress(), limitManagerRole); + }); + + it("should set consolidation limit and emit event", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + const limitTx = await setConsolidationLimit(consolidationGateway, authorizedEntity, 4, 1, 48); + await expect(limitTx).to.emit(consolidationGateway, "ConsolidationRequestsLimitSet").withArgs(4, 1, 48); + }); + + it("should revert if consolidationsPerFrame bigger than maxConsolidationRequestsLimit", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await expect( + setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 1, 48), + ).to.be.revertedWithCustomError(consolidationGateway, "TooLargeItemsPerFrame"); + }); + + it("should update limit config values", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await setConsolidationLimit(consolidationGateway, authorizedEntity, 50, 5, 100); + + await expectLimitData(consolidationGateway, 50, 5, 100, 50, 50); + }); + + it("should allow decreasing limit mid-usage", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + // Consume some limit + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 2 }, + ); + + // Decrease limit — should succeed + await setConsolidationLimit(consolidationGateway, authorizedEntity, 10, 1, 48); + await expectLimitData(consolidationGateway, 10, 1, 48, 10, 10); + }); + }); + + context("getConsolidationRequestLimitFullInfo", () => { + it("should return initial limit data", async () => { + await expectLimitData(consolidationGateway, 100, 1, 48, 100, 100); + }); + + it("should reflect limit consumption after requests", async () => { + // 2 total requests: [source0, source1] -> target0 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 3 }, + ); + + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 98); + }); + + it("should restore limit after frame advancement", async () => { + // Consume 2 + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests( + [{ sourcePubkeys: [PUBKEYS[0], PUBKEYS[1]], targetWitness: validWitnesses[0] }], + ethers.ZeroAddress, + { value: 3 }, + ); + + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 98); + + // Advance one frame → restores 1 + await advanceChainTime(48n); + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 99); + + // Advance another frame → restores another 1 + await advanceChainTime(48n); + await expectLimitData(consolidationGateway, 100, 1, 48, 98, 100); + }); + + it("should return currentConsolidationRequestsLimit as MaxUint256 when limit is 0 (unlimited)", async () => { + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + + await setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 0, 48); + + await expectLimitData(consolidationGateway, 0, 0, 48, 0, ethers.MaxUint256); + }); + + it("should allow unlimited consolidation requests when limit is 0", async () => { + // Default limit is 100, but limit 0 means unlimited — deploy with 0 + await grantLimitManagerRole(consolidationGateway, authorizedEntity); + await setConsolidationLimit(consolidationGateway, authorizedEntity, 0, 0, 48); + + // 3 total requests grouped into pairs + const groups = Array(3) + .fill(0) + .map((_, i) => ({ sourcePubkeys: [PUBKEYS[i % 3]], targetWitness: validWitnesses[i % 3] })); + + // Should not revert even with many requests when limit is 0 (unlimited) + await consolidationGateway + .connect(authorizedEntity) + .addConsolidationRequests(groups, ethers.ZeroAddress, { value: 10 }); + }); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts new file mode 100644 index 0000000000..1d7a055b7b --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.allowlist.test.ts @@ -0,0 +1,276 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + ConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +describe("ConsolidationMigrator.sol: allowlist", () => { + let consolidationMigrator: ConsolidationMigrator; + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + let admin: HardhatEthersSigner; + let allowPairManager: HardhatEthersSigner; + let disallowPairManager: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let ALLOW_PAIR_ROLE: string; + let DISALLOW_PAIR_ROLE: string; + + let originalState: string; + + before(async () => { + [admin, allowPairManager, disallowPairManager, submitter, stranger] = await ethers.getSigners(); + + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + + const impl = await ethers.deployContract("ConsolidationMigrator", [ + await stakingRouter.getAddress(), + await consolidationBus.getAddress(), + 1, // sourceModuleId + 2, // targetModuleId + ]); + [consolidationMigrator] = await proxify({ impl, admin }); + await consolidationMigrator.initialize(admin.address); + + ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + + // Grant roles + await consolidationMigrator.connect(admin).grantRole(ALLOW_PAIR_ROLE, allowPairManager.address); + await consolidationMigrator.connect(admin).grantRole(DISALLOW_PAIR_ROLE, disallowPairManager.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("allowPair", () => { + it("should allow a pair with submitter", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + await expect(consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address)) + .to.emit(consolidationMigrator, "ConsolidationPairAllowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + }); + + it("should revert if caller does not have ALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(stranger).allowPair(1, 10, submitter.address)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, ALLOW_PAIR_ROLE); + }); + + it("should revert if submitter is zero address", async () => { + await expect(consolidationMigrator.connect(allowPairManager).allowPair(1, 10, ethers.ZeroAddress)) + .to.be.revertedWithCustomError(consolidationMigrator, "ZeroArgument") + .withArgs("submitter"); + }); + + it("should allow updating submitter for existing pair (idempotent)", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + // First allow with submitter + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + // Update submitter to stranger + await expect(consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, stranger.address)) + .to.emit(consolidationMigrator, "ConsolidationPairAllowed") + .withArgs(sourceOpId, targetOpId, stranger.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(stranger.address); + }); + + it("should allow multiple targets for same source with different submitters", async () => { + const sourceOpId = 1; + const targetOpId1 = 10; + const targetOpId2 = 20; + const targetOpId3 = 30; + + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId1, submitter.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId2, stranger.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId3, admin.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId1)).to.be.true; + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId2)).to.be.true; + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId3)).to.be.true; + + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId1)).to.equal(submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId2)).to.equal(stranger.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId3)).to.equal(admin.address); + + const targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(3); + expect(targets).to.include(BigInt(targetOpId1)); + expect(targets).to.include(BigInt(targetOpId2)); + expect(targets).to.include(BigInt(targetOpId3)); + }); + }); + + context("disallowPair", () => { + beforeEach(async () => { + await consolidationMigrator.connect(allowPairManager).allowPair(1, 10, submitter.address); + }); + + it("should disallow a pair and clear submitter", async () => { + const sourceOpId = 1; + const targetOpId = 10; + + // Verify submitter is set before disallow + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + await expect(consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, targetOpId)) + .to.emit(consolidationMigrator, "ConsolidationPairDisallowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.false; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(ethers.ZeroAddress); + }); + + it("should revert if caller does not have DISALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(stranger).disallowPair(1, 10)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, DISALLOW_PAIR_ROLE); + }); + + it("should revert if caller has ALLOW_PAIR_ROLE but not DISALLOW_PAIR_ROLE", async () => { + await expect(consolidationMigrator.connect(allowPairManager).disallowPair(1, 10)) + .to.be.revertedWithCustomError(consolidationMigrator, "AccessControlUnauthorizedAccount") + .withArgs(allowPairManager.address, DISALLOW_PAIR_ROLE); + }); + + it("should revert if pair not in allowlist", async () => { + const sourceOpId = 999; + const targetOpId = 888; + + await expect(consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "PairNotInAllowlist") + .withArgs(sourceOpId, targetOpId); + }); + }); + + context("selfDisallowPair", () => { + const sourceOpId = 1; + const targetOpId = 10; + + beforeEach(async () => { + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, targetOpId, submitter.address); + }); + + it("should allow submitter to self-disallow their pair", async () => { + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.true; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(submitter.address); + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)) + .to.emit(consolidationMigrator, "ConsolidationPairDisallowed") + .withArgs(sourceOpId, targetOpId, submitter.address); + + expect(await consolidationMigrator.isPairAllowed(sourceOpId, targetOpId)).to.be.false; + expect(await consolidationMigrator.getSubmitter(sourceOpId, targetOpId)).to.equal(ethers.ZeroAddress); + }); + + it("should revert if caller is not the submitter", async () => { + await expect(consolidationMigrator.connect(stranger).selfDisallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, sourceOpId, targetOpId); + }); + + it("should revert if pair does not exist", async () => { + const unknownSourceOpId = 999; + const unknownTargetOpId = 888; + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(unknownSourceOpId, unknownTargetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, unknownSourceOpId, unknownTargetOpId); + }); + + it("should remove pair from getAllowedTargets", async () => { + // Add another pair + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 20, submitter.address); + + let targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(2); + + await consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId); + + targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(1); + expect(targets[0]).to.be.equal(20n); + }); + + it("should revert if called twice for the same pair", async () => { + await consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId); + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOpId, targetOpId); + }); + + it("should not require any role", async () => { + // submitter has no roles granted, but is the designated submitter for the pair + expect(await consolidationMigrator.hasRole(ALLOW_PAIR_ROLE, submitter.address)).to.be.false; + expect(await consolidationMigrator.hasRole(DISALLOW_PAIR_ROLE, submitter.address)).to.be.false; + + await expect(consolidationMigrator.connect(submitter).selfDisallowPair(sourceOpId, targetOpId)).to.emit( + consolidationMigrator, + "ConsolidationPairDisallowed", + ); + }); + }); + + context("view methods", () => { + it("isPairAllowed should return false for non-existent pair", async () => { + expect(await consolidationMigrator.isPairAllowed(999, 888)).to.be.false; + }); + + it("getAllowedTargets should return empty array for new source", async () => { + const targets = await consolidationMigrator.getAllowedTargets(999); + expect(targets.length).to.equal(0); + }); + + it("getSubmitter should return zero address for non-existent pair", async () => { + expect(await consolidationMigrator.getSubmitter(999, 888)).to.equal(ethers.ZeroAddress); + }); + + it("getAllowedTargets should return correct list after adding and removing", async () => { + const sourceOpId = 1; + + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 10, submitter.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 20, stranger.address); + await consolidationMigrator.connect(allowPairManager).allowPair(sourceOpId, 30, admin.address); + + let targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(3); + + await consolidationMigrator.connect(disallowPairManager).disallowPair(sourceOpId, 20); + + targets = await consolidationMigrator.getAllowedTargets(sourceOpId); + expect(targets.length).to.equal(2); + expect(targets).to.include(BigInt(10)); + expect(targets).to.include(BigInt(30)); + expect(targets).to.not.include(BigInt(20)); + + // Verify submitter was cleared for removed pair + expect(await consolidationMigrator.getSubmitter(sourceOpId, 20)).to.equal(ethers.ZeroAddress); + // Verify remaining submitters are intact + expect(await consolidationMigrator.getSubmitter(sourceOpId, 10)).to.equal(submitter.address); + expect(await consolidationMigrator.getSubmitter(sourceOpId, 30)).to.equal(admin.address); + }); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts new file mode 100644 index 0000000000..7bed7404bb --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.deploy.test.ts @@ -0,0 +1,93 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +describe("ConsolidationMigrator.sol: deployment", () => { + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + + before(async () => { + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + }); + + it("should deploy and initialize successfully with valid parameters", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + await migrator.initialize(admin.address); + + const adminRole = await migrator.DEFAULT_ADMIN_ROLE(); + expect(await migrator.hasRole(adminRole, admin.address)).to.be.true; + expect(await migrator.getStakingRouter()).to.equal(stakingRouterAddr); + expect(await migrator.getConsolidationBus()).to.equal(consolidationBusAddr); + expect(await migrator.sourceModuleId()).to.equal(1); + expect(await migrator.targetModuleId()).to.equal(2); + }); + + it("should revert if admin is zero address on initialize", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + + await expect(migrator.initialize(ethers.ZeroAddress)).to.be.revertedWithCustomError(migrator, "AdminCannotBeZero"); + }); + + it("should revert if stakingRouter is zero address", async () => { + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [ethers.ZeroAddress, consolidationBusAddr, 1, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("stakingRouter"); + }); + + it("should revert if consolidationBus is zero address", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, ethers.ZeroAddress, 1, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("consolidationBus"); + }); + + it("should revert if sourceModuleId is zero", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 0, 2])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("sourceModuleId"); + }); + + it("should revert if targetModuleId is zero", async () => { + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + await expect(ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 0])) + .to.be.revertedWithCustomError(await ethers.getContractFactory("ConsolidationMigrator"), "ZeroArgument") + .withArgs("targetModuleId"); + }); + + it("should revert on double initialization", async () => { + const [admin] = await ethers.getSigners(); + const stakingRouterAddr = await stakingRouter.getAddress(); + const consolidationBusAddr = await consolidationBus.getAddress(); + + const impl = await ethers.deployContract("ConsolidationMigrator", [stakingRouterAddr, consolidationBusAddr, 1, 2]); + const [migrator] = await proxify({ impl, admin }); + await migrator.initialize(admin.address); + + await expect(migrator.initialize(admin.address)).to.be.revertedWithCustomError(migrator, "InvalidInitialization"); + }); +}); diff --git a/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts b/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts new file mode 100644 index 0000000000..8ba422ccaa --- /dev/null +++ b/test/0.8.25/consolidationMigrator/consolidationMigrator.submit.test.ts @@ -0,0 +1,282 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + ConsolidationBus__MockForConsolidationMigrator, + ConsolidationMigrator, + SourceModule__MockForConsolidationMigrator, + StakingRouter__MockForConsolidationMigrator, + TargetModule__MockForConsolidationMigrator, +} from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { Snapshot } from "test/suite"; + +import { PUBKEYS } from "../consolidation-helpers"; + +describe("ConsolidationMigrator.sol: submit", () => { + let consolidationMigrator: ConsolidationMigrator; + let stakingRouter: StakingRouter__MockForConsolidationMigrator; + let sourceModule: SourceModule__MockForConsolidationMigrator; + let targetModule: TargetModule__MockForConsolidationMigrator; + let consolidationBus: ConsolidationBus__MockForConsolidationMigrator; + let admin: HardhatEthersSigner; + let allowPairManager: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + const SOURCE_MODULE_ID = 1; + const TARGET_MODULE_ID = 2; + const SOURCE_OPERATOR_ID = 100; + const TARGET_OPERATOR_ID = 200; + + let originalState: string; + + before(async () => { + [admin, allowPairManager, submitter, stranger] = await ethers.getSigners(); + + // Deploy mocks + stakingRouter = await ethers.deployContract("StakingRouter__MockForConsolidationMigrator"); + sourceModule = await ethers.deployContract("SourceModule__MockForConsolidationMigrator"); + targetModule = await ethers.deployContract("TargetModule__MockForConsolidationMigrator"); + consolidationBus = await ethers.deployContract("ConsolidationBus__MockForConsolidationMigrator"); + + // Set up staking router to return module addresses + await stakingRouter.mock__setStakingModule(SOURCE_MODULE_ID, await sourceModule.getAddress()); + await stakingRouter.mock__setStakingModule(TARGET_MODULE_ID, await targetModule.getAddress()); + + // Deploy ConsolidationMigrator + const impl = await ethers.deployContract("ConsolidationMigrator", [ + await stakingRouter.getAddress(), + await consolidationBus.getAddress(), + SOURCE_MODULE_ID, + TARGET_MODULE_ID, + ]); + [consolidationMigrator] = await proxify({ impl, admin }); + await consolidationMigrator.initialize(admin.address); + + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(admin).grantRole(ALLOW_PAIR_ROLE, allowPairManager.address); + await consolidationMigrator.connect(admin).grantRole(DISALLOW_PAIR_ROLE, allowPairManager.address); + + // Allow the test pair with submitter + await consolidationMigrator + .connect(allowPairManager) + .allowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, submitter.address); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("submitConsolidationBatch", () => { + beforeEach(async () => { + // Set up source module with deposited keys (totalDeposited=2) + await sourceModule.mock__setOperatorData(SOURCE_OPERATOR_ID, 2, [PUBKEYS[0], PUBKEYS[1]]); + + // Set up target module with deposited keys (totalDeposited=2) + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 2, [PUBKEYS[2], PUBKEYS[3]]); + }); + + it("should submit consolidation batch from designated submitter", async () => { + const groups = [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + { sourceKeyIndices: [1], targetKeyIndex: 1 }, + ]; + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups), + ) + .to.emit(consolidationMigrator, "ConsolidationSubmitted") + .withArgs( + SOURCE_OPERATOR_ID, + TARGET_OPERATOR_ID, + groups.map((g) => [g.sourceKeyIndices, g.targetKeyIndex]), + ); + + // Verify ConsolidationBus was called + expect(await consolidationBus.callCount()).to.equal(1); + expect(await consolidationBus.lastCaller()).to.equal(await consolidationMigrator.getAddress()); + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + }); + + it("should forward correct pubkeys to ConsolidationBus", async () => { + const groups = [{ sourceKeyIndices: [0], targetKeyIndex: 0 }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + // Verify the pubkeys + const sourcePubkey = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const targetPubkey = await consolidationBus.getLastTargetPubkey(0); + + expect(sourcePubkey.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(targetPubkey.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + }); + + it("should revert if caller is not the designated submitter", async () => { + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + }); + + it("should revert if pair is not allowed (no submitter set)", async () => { + const unknownTargetOpId = 999; + + // When pair is not allowed, there's no submitter set (address(0)) + // So caller will fail authorization check first + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, unknownTargetOpId, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, unknownTargetOpId); + }); + + it("should revert if source key is not deposited", async () => { + // Key at index 2 exists but is not deposited (totalDeposited=2, 3 keys total) + await sourceModule.mock__setOperatorData(SOURCE_OPERATOR_ID, 2, [PUBKEYS[0], PUBKEYS[1], PUBKEYS[2]]); + // Add more target keys and make index 2 deposited + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 3, [PUBKEYS[2], PUBKEYS[3], PUBKEYS[0]]); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [2], targetKeyIndex: 2 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(SOURCE_MODULE_ID, SOURCE_OPERATOR_ID, 2); + }); + + it("should revert if target key is not deposited", async () => { + // totalDepositedValidators = 1, so key at index 0 is deposited, but index 1 is NOT + await targetModule.mock__setOperatorData(TARGET_OPERATOR_ID, 1, [PUBKEYS[2], PUBKEYS[3]]); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 1 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(TARGET_MODULE_ID, TARGET_OPERATOR_ID, 1); + }); + + it("should emit ConsolidationBus event", async () => { + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ).to.emit(consolidationBus, "AddConsolidationRequestsCalled"); + }); + + it("should handle multiple validators in a batch", async () => { + const groups = [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + { sourceKeyIndices: [1], targetKeyIndex: 1 }, + ]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + + const sourcePubkey0 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const sourcePubkey1 = await consolidationBus.getLastSourcePubkeyFromGroup(1, 0); + const targetPubkey0 = await consolidationBus.getLastTargetPubkey(0); + const targetPubkey1 = await consolidationBus.getLastTargetPubkey(1); + + expect(sourcePubkey0.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(sourcePubkey1.toLowerCase()).to.equal(PUBKEYS[1].toLowerCase()); + expect(targetPubkey0.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + expect(targetPubkey1.toLowerCase()).to.equal(PUBKEYS[3].toLowerCase()); + }); + + it("should handle multi-source group consolidation (multiple sources to one target)", async () => { + // Two source keys consolidated to one target + const groups = [{ sourceKeyIndices: [0, 1], targetKeyIndex: 0 }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, groups); + + // Should produce 2 pairs in 1 group + expect(await consolidationBus.getLastTotalPairsCount()).to.equal(2); + expect(await consolidationBus.getLastGroupsCount()).to.equal(1); + expect(await consolidationBus.getLastGroupSize(0)).to.equal(2); + + const sourcePubkey0 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 0); + const sourcePubkey1 = await consolidationBus.getLastSourcePubkeyFromGroup(0, 1); + const targetPubkey = await consolidationBus.getLastTargetPubkey(0); + + expect(sourcePubkey0.toLowerCase()).to.equal(PUBKEYS[0].toLowerCase()); + expect(sourcePubkey1.toLowerCase()).to.equal(PUBKEYS[1].toLowerCase()); + expect(targetPubkey.toLowerCase()).to.equal(PUBKEYS[2].toLowerCase()); + }); + + it("should allow new submitter to submit after allowPair update", async () => { + // Update the pair with a new submitter (stranger) + await consolidationMigrator + .connect(allowPairManager) + .allowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, stranger.address); + + // Old submitter should now fail + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + + // New submitter should succeed + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ).to.emit(consolidationMigrator, "ConsolidationSubmitted"); + }); + + it("should revert after pair is disallowed", async () => { + // Disallow the pair + await consolidationMigrator.connect(allowPairManager).disallowPair(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + + // Submitter should no longer be able to submit + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID, [ + { sourceKeyIndices: [0], targetKeyIndex: 0 }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, SOURCE_OPERATOR_ID, TARGET_OPERATOR_ID); + }); + }); +}); diff --git a/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..dae1c7a90f --- /dev/null +++ b/test/0.8.25/contracts/ConsolidationBus__MockForConsolidationMigrator.sol @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for ConsolidationBus for ConsolidationMigrator tests + */ +contract ConsolidationBus__MockForConsolidationMigrator { + struct ConsolidationGroup { + bytes[] sourcePubkeys; + bytes targetPubkey; + } + + event AddConsolidationRequestsCalled(uint256 groupsCount, address caller); + + ConsolidationGroup[] internal _lastGroups; + address public lastCaller; + uint256 public callCount; + + bool internal _shouldRevert; + string internal _revertReason; + + function addConsolidationRequests(ConsolidationGroup[] calldata groups) external { + if (_shouldRevert) { + revert(_revertReason); + } + + delete _lastGroups; + + for (uint256 i = 0; i < groups.length; ++i) { + _lastGroups.push(); + _lastGroups[i].targetPubkey = groups[i].targetPubkey; + for (uint256 j = 0; j < groups[i].sourcePubkeys.length; ++j) { + _lastGroups[i].sourcePubkeys.push(groups[i].sourcePubkeys[j]); + } + } + lastCaller = msg.sender; + callCount++; + + emit AddConsolidationRequestsCalled(groups.length, msg.sender); + } + + function mock__setRevert(bool shouldRevert, string calldata reason) external { + _shouldRevert = shouldRevert; + _revertReason = reason; + } + + function getLastSourcePubkeyFromGroup(uint256 groupIndex, uint256 keyIndex) external view returns (bytes memory) { + return _lastGroups[groupIndex].sourcePubkeys[keyIndex]; + } + + function getLastTargetPubkey(uint256 index) external view returns (bytes memory) { + return _lastGroups[index].targetPubkey; + } + + function getLastGroupsCount() external view returns (uint256) { + return _lastGroups.length; + } + + function getLastGroupSize(uint256 groupIndex) external view returns (uint256) { + return _lastGroups[groupIndex].sourcePubkeys.length; + } + + function getLastTotalPairsCount() external view returns (uint256) { + uint256 total = 0; + for (uint256 i = 0; i < _lastGroups.length; ++i) { + total += _lastGroups[i].sourcePubkeys.length; + } + return total; + } +} diff --git a/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol b/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol new file mode 100644 index 0000000000..49d2def147 --- /dev/null +++ b/test/0.8.25/contracts/ConsolidationGateway__MockForConsolidationBus.sol @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +import {IPredepositGuarantee} from "contracts/0.8.25/vaults/interfaces/IPredepositGuarantee.sol"; + +interface IConsolidationGateway { + struct ConsolidationWitnessGroup { + bytes[] sourcePubkeys; + IPredepositGuarantee.ValidatorWitness targetWitness; + } + + function addConsolidationRequests( + ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable; +} + +contract ConsolidationGateway__MockForConsolidationBus { + event AddConsolidationRequestsCalled(uint256 groupsCount, address refundRecipient, uint256 value); + + uint256 internal _fee; + bool internal _shouldRevert; + string internal _revertReason; + + constructor() { + _fee = 1; + } + + function addConsolidationRequests( + IConsolidationGateway.ConsolidationWitnessGroup[] calldata groups, + address refundRecipient + ) external payable { + if (_shouldRevert) { + revert(_revertReason); + } + + emit AddConsolidationRequestsCalled(groups.length, refundRecipient, msg.value); + + // Count total requests and simulate refund if excess ETH was sent + uint256 totalRequests = 0; + for (uint256 i = 0; i < groups.length; ++i) { + totalRequests += groups[i].sourcePubkeys.length; + } + uint256 totalFee = totalRequests * _fee; + if (msg.value > totalFee) { + (bool success, ) = refundRecipient.call{value: msg.value - totalFee}(""); + require(success, "Refund failed"); + } + } + + function mock__setFee(uint256 fee) external { + _fee = fee; + } + + function mock__setRevert(bool shouldRevert, string calldata reason) external { + _shouldRevert = shouldRevert; + _revertReason = reason; + } + + function mock__getFee() external view returns (uint256) { + return _fee; + } +} diff --git a/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol b/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol new file mode 100644 index 0000000000..1c77fc0975 --- /dev/null +++ b/test/0.8.25/contracts/DepositCallerWrapper__MockForStakingRouter.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity ^0.8.25; + +interface IStakingRouter { + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external payable; + function getStakingModuleMaxDepositsCount( + uint256 _stakingModuleId, + uint256 _depositableEth + ) external view returns (uint256); +} + +/// @notice Test-only wrapper that must be set as the authorized Lido caller in the router. +contract DepositCallerWrapper__MockForStakingRouter { + IStakingRouter public immutable stakingRouter; + + constructor(IStakingRouter _router) { + stakingRouter = _router; + } + + /// @notice Store temp values as operators and number of deposits per operator + deposit + /// No refund logic; requires exact msg.value. + function deposit(uint256 stakingModuleId) external payable { + stakingRouter.deposit{value: msg.value}(stakingModuleId, bytes("")); + } +} diff --git a/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol b/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..6a36208138 --- /dev/null +++ b/test/0.8.25/contracts/DepositSecurityModule__MockForConsolidationGateway.sol @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract DepositSecurityModule__MockForConsolidationGateway { + bool public isDepositsPaused; + + function mock__setDepositsPaused(bool _paused) external { + isDepositsPaused = _paused; + } +} diff --git a/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol b/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..2b898c487d --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForConsolidationGateway.sol @@ -0,0 +1,16 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract Lido__MockForConsolidationGateway { + bool public canDepositFlag = true; + + function mock__setCanDeposit(bool _value) external { + canDepositFlag = _value; + } + + function canDeposit() external view returns (bool) { + return canDepositFlag; + } +} diff --git a/test/0.8.25/contracts/Lido__MockForStakingRouter.sol b/test/0.8.25/contracts/Lido__MockForStakingRouter.sol new file mode 100644 index 0000000000..7f3b95e29f --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForStakingRouter.sol @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +interface IStakingRouter { + function receiveDepositableEther() external payable; +} + +contract Lido__MockForStakingRouter { + uint256 internal depositableEther__mocked; + address public stakingRouter; + + event WithdrawDepositableEtherCalled(uint256 amount, uint256 depositsCount); + + constructor() payable {} + + receive() external payable {} + + function setStakingRouter(address _stakingRouter) external { + stakingRouter = _stakingRouter; + } + + function setDepositableEther(uint256 _depositableEther) external { + depositableEther__mocked = _depositableEther; + } + + function getDepositableEther() external view returns (uint256) { + return depositableEther__mocked; + } + + function withdrawDepositableEther(uint256 _amount, uint256 _depositsCount) external { + require(msg.sender == stakingRouter, "ONLY_STAKING_ROUTER"); + require(_amount <= depositableEther__mocked, "NOT_ENOUGH_ETHER"); + + depositableEther__mocked -= _amount; + + emit WithdrawDepositableEtherCalled(_amount, _depositsCount); + + // Send ETH to staking router via receiveDepositableEther + IStakingRouter(stakingRouter).receiveDepositableEther{value: _amount}(); + } + + // Utility to fund the mock with ETH + function fund() external payable {} +} diff --git a/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol b/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol new file mode 100644 index 0000000000..2ce776a5fa --- /dev/null +++ b/test/0.8.25/contracts/Lido__MockForTopUpGateway.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +contract Lido__MockForTopUpGateway { + bool public canDepositFlag = true; + + function setCanDeposit(bool value) external { + canDepositFlag = value; + } + + function canDeposit() external view returns (bool) { + return canDepositFlag; + } +} diff --git a/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..a13dca1151 --- /dev/null +++ b/test/0.8.25/contracts/SourceModule__MockForConsolidationMigrator.sol @@ -0,0 +1,80 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for source staking module for ConsolidationMigrator tests. + * Implements the IStakingModule interface (getSigningKeys + getNodeOperatorSummary). + */ +contract SourceModule__MockForConsolidationMigrator { + uint256 public constant PUBKEY_LENGTH = 48; + uint256 public constant SIGNATURE_LENGTH = 96; + + struct NodeOperatorData { + uint256 totalDepositedValidators; + bytes[] pubkeys; + } + + // operatorId => data + mapping(uint256 => NodeOperatorData) internal _operators; + + function mock__setOperatorData( + uint256 operatorId, + uint256 totalDepositedValidators, + bytes[] calldata pubkeys + ) external { + _operators[operatorId].totalDepositedValidators = totalDepositedValidators; + delete _operators[operatorId].pubkeys; + for (uint256 i = 0; i < pubkeys.length; ++i) { + _operators[operatorId].pubkeys.push(pubkeys[i]); + } + } + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + totalDepositedValidators = op.totalDepositedValidators; + return (0, 0, 0, 0, 0, 0, totalDepositedValidators, 0); + } + + // NOR interface + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys, bytes memory signatures, bool[] memory used) { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + + pubkeys = new bytes(_limit * PUBKEY_LENGTH); + signatures = new bytes(_limit * SIGNATURE_LENGTH); + used = new bool[](_limit); + + for (uint256 i = 0; i < _limit; ++i) { + uint256 keyIndex = _offset + i; + if (keyIndex < op.pubkeys.length) { + bytes storage key = op.pubkeys[keyIndex]; + for (uint256 j = 0; j < PUBKEY_LENGTH; ++j) { + pubkeys[i * PUBKEY_LENGTH + j] = key[j]; + } + used[i] = keyIndex < op.totalDepositedValidators; + } + } + + return (pubkeys, signatures, used); + } +} diff --git a/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol b/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol new file mode 100644 index 0000000000..0f1654dbcf --- /dev/null +++ b/test/0.8.25/contracts/StakingModuleV2__MockForStakingRouter.sol @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; +import {IStakingModuleV2} from "contracts/common/interfaces/IStakingModuleV2.sol"; + +contract StakingModuleV2__MockForStakingRouter is IStakingModule, IStakingModuleV2 { + event Mock__TargetValidatorsLimitsUpdated(uint256 _nodeOperatorId, uint256 _targetLimitMode, uint256 _targetLimit); + event Mock__RefundedValidatorsCountUpdated(uint256 _nodeOperatorId, uint256 _refundedValidatorsCount); + event Mock__OnRewardsMinted(uint256 _totalShares); + event Mock__ExitedValidatorsCountUpdated(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + event Mock__reportValidatorExitDelay( + uint256 nodeOperatorId, + uint256 proofSlotTimestamp, + bytes publicKeys, + uint256 eligibleToExitInSec + ); + + event Mock__onValidatorExitTriggered( + uint256 _nodeOperatorId, + bytes publicKeys, + uint256 withdrawalRequestPaidFee, + uint256 exitType + ); + + function getType() external view returns (bytes32) { + return keccak256(abi.encodePacked("staking.module")); + } + + uint256 private totalExitedValidators__mocked; + uint256 private totalDepositedValidators__mocked; + uint256 private depositableValidatorsCount__mocked; + + function getStakingModuleSummary() + external + view + returns (uint256 totalExitedValidators, uint256 totalDepositedValidators, uint256 depositableValidatorsCount) + { + totalExitedValidators = totalExitedValidators__mocked; + totalDepositedValidators = totalDepositedValidators__mocked; + depositableValidatorsCount = depositableValidatorsCount__mocked; + } + + function mock__getStakingModuleSummary( + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) external { + totalExitedValidators__mocked = totalExitedValidators; + totalDepositedValidators__mocked = totalDepositedValidators; + depositableValidatorsCount__mocked = depositableValidatorsCount; + } + + uint256 private nodeOperatorTargetLimitMode__mocked; + uint256 private nodeOperatorTargetValidatorsCount__mocked; + uint256 private nodeOperatorStuckValidatorsCount__mocked; + uint256 private nodeOperatorRefundedValidatorsCount__mocked; + uint256 private nodeOperatorStuckPenaltyEndTimestamp__mocked; + uint256 private nodeOperatorNodeOperatorTotalExitedValidators__mocked; + uint256 private nodeOperatorNodeOperatorTotalDepositedValidators__mocked; + uint256 private nodeOperatorNodeOperatorDepositableValidatorsCount__mocked; + + function getNodeOperatorSummary( + uint256 + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + targetLimitMode = nodeOperatorTargetLimitMode__mocked; + targetValidatorsCount = nodeOperatorTargetValidatorsCount__mocked; + stuckValidatorsCount = nodeOperatorStuckValidatorsCount__mocked; + refundedValidatorsCount = nodeOperatorRefundedValidatorsCount__mocked; + stuckPenaltyEndTimestamp = nodeOperatorStuckPenaltyEndTimestamp__mocked; + totalExitedValidators = nodeOperatorNodeOperatorTotalExitedValidators__mocked; + totalDepositedValidators = nodeOperatorNodeOperatorTotalDepositedValidators__mocked; + depositableValidatorsCount = nodeOperatorNodeOperatorDepositableValidatorsCount__mocked; + } + + function mock__getNodeOperatorSummary( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) external { + nodeOperatorTargetLimitMode__mocked = targetLimitMode; + nodeOperatorTargetValidatorsCount__mocked = targetValidatorsCount; + nodeOperatorStuckValidatorsCount__mocked = stuckValidatorsCount; + nodeOperatorRefundedValidatorsCount__mocked = refundedValidatorsCount; + nodeOperatorStuckPenaltyEndTimestamp__mocked = stuckPenaltyEndTimestamp; + nodeOperatorNodeOperatorTotalExitedValidators__mocked = totalExitedValidators; + nodeOperatorNodeOperatorTotalDepositedValidators__mocked = totalDepositedValidators; + nodeOperatorNodeOperatorDepositableValidatorsCount__mocked = depositableValidatorsCount; + } + + uint256 private nonce; + + function getNonce() external view returns (uint256) { + return nonce; + } + + function mock__getNonce(uint256 newNonce) external { + nonce = newNonce; + } + + uint256 private nodeOperatorsCount__mocked; + uint256 private activeNodeOperatorsCount__mocked; + + function getNodeOperatorsCount() external view returns (uint256) { + return nodeOperatorsCount__mocked; + } + + function getActiveNodeOperatorsCount() external view returns (uint256) { + return activeNodeOperatorsCount__mocked; + } + + function mock__nodeOperatorsCount(uint256 total, uint256 active) external { + nodeOperatorsCount__mocked = total; + activeNodeOperatorsCount__mocked = active; + } + + function getNodeOperatorIsActive(uint256) external view returns (bool) { + return true; + } + + uint256[] private nodeOperatorsIds__mocked; + + function getNodeOperatorIds(uint256, uint256) external view returns (uint256[] memory nodeOperatorIds) { + return nodeOperatorsIds__mocked; + } + + function mock__getNodeOperatorIds(uint256[] calldata nodeOperatorsIds) external { + nodeOperatorsIds__mocked = nodeOperatorsIds; + } + + bool private onRewardsMintedShouldRevert = false; + bool private onRewardsMintedShouldRunOutGas = false; + + function onRewardsMinted(uint256 _totalShares) external { + require(!onRewardsMintedShouldRevert, "revert reason"); + + if (onRewardsMintedShouldRunOutGas) { + revert(); + } + + emit Mock__OnRewardsMinted(_totalShares); + } + + function mock__revertOnRewardsMinted(bool shouldRevert, bool shouldRunOutOfGas) external { + onRewardsMintedShouldRevert = shouldRevert; + onRewardsMintedShouldRunOutGas = shouldRunOutOfGas; + } + + event Mock__VettedSigningKeysCountDecreased(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + function decreaseVettedSigningKeysCount( + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external { + emit Mock__VettedSigningKeysCountDecreased(_nodeOperatorIds, _vettedSigningKeysCounts); + } + + event Mock__StuckValidatorsCountUpdated(bytes _nodeOperatorIds, bytes _stuckValidatorsCounts); + + function updateStuckValidatorsCount( + bytes calldata _nodeOperatorIds, + bytes calldata _stuckValidatorsCounts + ) external { + emit Mock__StuckValidatorsCountUpdated(_nodeOperatorIds, _stuckValidatorsCounts); + } + + function updateExitedValidatorsCount( + bytes calldata _nodeOperatorIds, + bytes calldata _stuckValidatorsCounts + ) external { + emit Mock__ExitedValidatorsCountUpdated(_nodeOperatorIds, _stuckValidatorsCounts); + } + + function updateTargetValidatorsLimits( + uint256 _nodeOperatorId, + uint256 _targetLimitMode, + uint256 _targetLimit + ) external { + emit Mock__TargetValidatorsLimitsUpdated(_nodeOperatorId, _targetLimitMode, _targetLimit); + } + + event Mock__ValidatorsCountUnsafelyUpdated(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount); + + function unsafeUpdateValidatorsCount(uint256 _nodeOperatorId, uint256 _exitedValidatorsCount) external { + emit Mock__ValidatorsCountUnsafelyUpdated(_nodeOperatorId, _exitedValidatorsCount); + } + + function obtainDepositData( + uint256 _depositsCount, + bytes calldata + ) external returns (bytes memory publicKeys, bytes memory signatures) { + publicKeys = new bytes(48 * _depositsCount); + signatures = new bytes(96 * _depositsCount); + } + + // --- Top-up mock data --- + + bytes[] private topUpPubkeys__mocked; + uint256[] private topUpAmounts__mocked; + bool private useCustomTopUpData__mocked; + bool private shouldRevert__mocked; + + function mock__setShouldRevert(bool shouldRevert) external { + shouldRevert__mocked = shouldRevert; + } + + function mock__setTopUpDepositData(uint256[] calldata amounts) external { + delete topUpAmounts__mocked; + + for (uint256 i = 0; i < amounts.length; ++i) { + topUpAmounts__mocked.push(amounts[i]); + } + + useCustomTopUpData__mocked = true; + } + + function mock__clearTopUpDepositData() external { + delete topUpAmounts__mocked; + useCustomTopUpData__mocked = false; + } + + // *** TOP-UP (used by topUp()) *** + function allocateDeposits( + uint256, + bytes[] calldata, + uint256[] calldata, + uint256[] calldata, + uint256[] calldata _topUpLimits + ) external returns (uint256[] memory topUpAmounts) { + require(!shouldRevert__mocked, "Mock: revert requested"); + + if (useCustomTopUpData__mocked) { + return topUpAmounts__mocked; + } + + return _topUpLimits; + } + + event Mock__onExitedAndStuckValidatorsCountsUpdated(); + + bool private onExitedAndStuckValidatorsCountsUpdatedShouldRevert = false; + bool private onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas = false; + + function onExitedAndStuckValidatorsCountsUpdated() external { + require(!onExitedAndStuckValidatorsCountsUpdatedShouldRevert, "revert reason"); + + if (onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas) { + revert(); + } + + emit Mock__onExitedAndStuckValidatorsCountsUpdated(); + } + + function mock__onExitedAndStuckValidatorsCountsUpdated(bool shouldRevert, bool shouldRunOutGas) external { + onExitedAndStuckValidatorsCountsUpdatedShouldRevert = shouldRevert; + onExitedAndStuckValidatorsCountsUpdatedShouldRunOutGas = shouldRunOutGas; + } + + event Mock__WithdrawalCredentialsChanged(); + + bool private onWithdrawalCredentialsChangedShouldRevert = false; + bool private onWithdrawalCredentialsChangedShouldRunOutGas = false; + + function onWithdrawalCredentialsChanged() external { + require(!onWithdrawalCredentialsChangedShouldRevert, "revert reason"); + + if (onWithdrawalCredentialsChangedShouldRunOutGas) { + revert(); + } + + emit Mock__WithdrawalCredentialsChanged(); + } + + function mock__onWithdrawalCredentialsChanged(bool shouldRevert, bool shouldRunOutGas) external { + onWithdrawalCredentialsChangedShouldRevert = shouldRevert; + onWithdrawalCredentialsChangedShouldRunOutGas = shouldRunOutGas; + } + + bool private shouldBePenalized__mocked; + + function reportValidatorExitDelay( + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKeys, + uint256 _eligibleToExitInSec + ) external { + emit Mock__reportValidatorExitDelay(_nodeOperatorId, _proofSlotTimestamp, _publicKeys, _eligibleToExitInSec); + } + + function onValidatorExitTriggered( + uint256 _nodeOperatorId, + bytes calldata _publicKeys, + uint256 _withdrawalRequestPaidFee, + uint256 _exitType + ) external { + emit Mock__onValidatorExitTriggered(_nodeOperatorId, _publicKeys, _withdrawalRequestPaidFee, _exitType); + } + + function isValidatorExitDelayPenaltyApplicable( + uint256 _nodeOperatorId, + uint256 _proofSlotTimestamp, + bytes calldata _publicKey, + uint256 _eligibleToExitInSec + ) external view returns (bool) { + return shouldBePenalized__mocked; + } + + function mock__isValidatorExitDelayPenaltyApplicable(bool _shouldBePenalized) external { + shouldBePenalized__mocked = _shouldBePenalized; + } + + uint256 private exitDeadlineThreshold__mocked; + + function exitDeadlineThreshold(uint256 _nodeOperatorId) external view returns (uint256) { + return exitDeadlineThreshold__mocked; + } + + function mock__exitDeadlineThreshold(uint256 _threshold) external { + exitDeadlineThreshold__mocked = _threshold; + } + + uint256 private totalModuleStake_mocked; + + function getTotalModuleStake() external view returns (uint256) { + return totalModuleStake_mocked; + } + + function mock__getTotalModuleStake(uint256 _totalModuleStake) external { + totalModuleStake_mocked = _totalModuleStake; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol b/test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol similarity index 92% rename from test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol rename to test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol index c7371a9268..65f3653a04 100644 --- a/test/0.8.9/contracts/StakingModule__MockForStakingRouter.sol +++ b/test/0.8.25/contracts/StakingModule__MockForStakingRouter.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; import {IStakingModule} from "contracts/common/interfaces/IStakingModule.sol"; @@ -205,12 +205,31 @@ contract StakingModule__MockForStakingRouter is IStakingModule { emit Mock__ValidatorsCountUnsafelyUpdated(_nodeOperatorId, _exitedValidatorsCount); } + bytes private obtainDepositData_publicKeys__mocked; + bytes private obtainDepositData_signatures__mocked; + bool private obtainDepositData_useCustom__mocked; + function obtainDepositData( uint256 _depositsCount, bytes calldata ) external returns (bytes memory publicKeys, bytes memory signatures) { - publicKeys = new bytes(48 * _depositsCount); - signatures = new bytes(96 * _depositsCount); + if (obtainDepositData_useCustom__mocked) { + publicKeys = obtainDepositData_publicKeys__mocked; + signatures = obtainDepositData_signatures__mocked; + } else { + publicKeys = new bytes(48 * _depositsCount); + signatures = new bytes(96 * _depositsCount); + } + } + + function mock__obtainDepositData(bytes calldata publicKeys, bytes calldata signatures) external { + obtainDepositData_publicKeys__mocked = publicKeys; + obtainDepositData_signatures__mocked = signatures; + obtainDepositData_useCustom__mocked = true; + } + + function mock__obtainDepositDataReset() external { + obtainDepositData_useCustom__mocked = false; } event Mock__onExitedAndStuckValidatorsCountsUpdated(); diff --git a/test/0.8.25/contracts/StakingRouter__Harness.sol b/test/0.8.25/contracts/StakingRouter__Harness.sol new file mode 100644 index 0000000000..e8ac5a8d6b --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__Harness.sol @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {StakingRouter} from "contracts/0.8.25/sr/StakingRouter.sol"; +import {SRLib} from "contracts/0.8.25/sr/SRLib.sol"; +import {SRStorage} from "contracts/0.8.25/sr/SRStorage.sol"; +import {StakingModuleStatus, ModuleStateAccounting, RouterStateAccounting} from "contracts/0.8.25/sr/SRTypes.sol"; +import {StorageSlot} from "@openzeppelin/contracts-v5.2/utils/StorageSlot.sol"; +import {EnumerableSet} from "@openzeppelin/contracts-v5.2/utils/structs/EnumerableSet.sol"; + +contract StakingRouter__Harness is StakingRouter { + using StorageSlot for bytes32; + using EnumerableSet for EnumerableSet.AddressSet; + + // Old storage slots (must match constants in old 0.8.9 StakingRouter and SRLib) + bytes32 internal constant WITHDRAWAL_CREDENTIALS_POSITION = keccak256("lido.StakingRouter.withdrawalCredentials"); + bytes32 internal constant LIDO_POSITION = keccak256("lido.StakingRouter.lido"); + bytes32 internal constant LAST_STAKING_MODULE_ID_POSITION = keccak256("lido.StakingRouter.lastStakingModuleId"); + bytes32 internal constant STAKING_MODULES_COUNT_POSITION = keccak256("lido.StakingRouter.stakingModulesCount"); + bytes32 internal constant CONTRACT_VERSION_POSITION = keccak256("lido.Versioned.contractVersion"); + + // New storage slots + // keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.Initializable")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 private constant INITIALIZABLE_STORAGE = 0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00; + + /// Mock values matching old 0.8.9 StakingRouter state + bytes32 public constant WC_01_MOCK = bytes32(0x0100000000000000000000001111111111111111111111111111111111111111); + address public constant LIDO_ADDRESS_MOCK = 0x2222222222222222222222222222222222222222; + uint256 public constant LAST_STAKING_MODULE_ID_MOCK = 1; + uint256 public constant STAKING_MODULES_COUNT_MOCK = 0; + uint256 public constant CONTRACT_VERSION_V3 = 3; + + constructor( + address _depositContract, + address _lido, + address _lidoLocator, + uint256 _maxEBType1, + uint256 _maxEBType2 + ) StakingRouter(_depositContract, _lido, _lidoLocator, _maxEBType1, _maxEBType2) {} + + /// @notice Simulates old 0.8.9 StakingRouter state before v4 migration. + /// Sets all old unstructured storage slots that _migrateStorage() reads and cleans up. + function testing_initializeV3() external { + WITHDRAWAL_CREDENTIALS_POSITION.getBytes32Slot().value = WC_01_MOCK; + LIDO_POSITION.getAddressSlot().value = LIDO_ADDRESS_MOCK; + LAST_STAKING_MODULE_ID_POSITION.getUint256Slot().value = LAST_STAKING_MODULE_ID_MOCK; + STAKING_MODULES_COUNT_POSITION.getUint256Slot().value = STAKING_MODULES_COUNT_MOCK; + CONTRACT_VERSION_POSITION.getUint256Slot().value = CONTRACT_VERSION_V3; + } + + /// @notice Checks that old storage slots are cleaned up after migration + function testing_getOldLidoPosition() external view returns (address) { + return LIDO_POSITION.getAddressSlot().value; + } + + function testing_getOldWcPosition() external view returns (bytes32) { + return WITHDRAWAL_CREDENTIALS_POSITION.getBytes32Slot().value; + } + + function testing_getOldContractVersion() external view returns (uint256) { + return CONTRACT_VERSION_POSITION.getUint256Slot().value; + } + + function testing_getOldLastModuleIdPosition() external view returns (uint256) { + return LAST_STAKING_MODULE_ID_POSITION.getUint256Slot().value; + } + + function testing_getOldModulesCountPosition() external view returns (uint256) { + return STAKING_MODULES_COUNT_POSITION.getUint256Slot().value; + } + + /// @notice Grant a role inside the OLD AccessControl storage (OZ v4.4) + function testing_grantRoleOld(bytes32 role, address account) external { + _storageRoles()[role].members[account] = true; + _storageRoleMembers()[role].add(account); + } + + /// @notice Read a role grant from the OLD AccessControl storage (OZ v4.4) + function testing_hasRoleOld(bytes32 role, address account) external view returns (bool) { + return _storageRoles()[role].members[account]; + } + + function testing_getLastModuleId() public view returns (uint256) { + return SRStorage.getRouterState().lastModuleId; + } + + function testing_setVersion(uint256 version) public { + _getInitializableStorage_Mock()._initialized = uint64(version); + } + + function testing_setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) external { + SRLib._setModuleStatus(_stakingModuleId, _status); + } + + function testing_setStakingModuleAccounting( + uint256 _stakingModuleId, + uint64 validatorsBalanceGwei, + uint64 exitedValidatorsCount + ) external { + ModuleStateAccounting storage moduleAcc = SRStorage.getModuleState(_stakingModuleId).accounting; + RouterStateAccounting storage routerAcc = SRStorage.getRouterState().accounting; + + uint64 totalValidatorsBalanceGwei = routerAcc.validatorsBalanceGwei; + + // update totals incrementally as we iterate through the part of modules in general case + // 1. subtract old values + unchecked { + totalValidatorsBalanceGwei -= moduleAcc.validatorsBalanceGwei; + } + // 2. validate and add new values + + unchecked { + totalValidatorsBalanceGwei += validatorsBalanceGwei; + } + + routerAcc.validatorsBalanceGwei = totalValidatorsBalanceGwei; + + moduleAcc.validatorsBalanceGwei = validatorsBalanceGwei; + moduleAcc.exitedValidatorsCount = exitedValidatorsCount; + } + + function _getInitializableStorage_Mock() private pure returns (InitializableStorage storage $) { + assembly { + $.slot := INITIALIZABLE_STORAGE + } + } + + // OZ AccessControl v.4.4 + + struct RoleDataOld { + mapping(address => bool) members; + bytes32 adminRole; + } + + /// @dev OZ AccessControlEnumerable _roleMembers mapping storage reference + function _storageRoleMembers() private pure returns (mapping(bytes32 => EnumerableSet.AddressSet) storage $) { + bytes32 position = keccak256("openzeppelin.AccessControlEnumerable._roleMembers"); + assembly { + $.slot := position + } + } + + /// @dev OZ AccessControl _roles mapping storage reference + function _storageRoles() private pure returns (mapping(bytes32 => RoleDataOld) storage $) { + bytes32 position = keccak256("openzeppelin.AccessControl._roles"); + assembly { + $.slot := position + } + } +} diff --git a/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..44fb9b29f6 --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__MockForConsolidationMigrator.sol @@ -0,0 +1,48 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract StakingRouter__MockForConsolidationMigrator { + struct StakingModule { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + mapping(uint256 => StakingModule) internal _modules; + + function mock__setStakingModule(uint256 moduleId, address moduleAddress) external { + _modules[moduleId] = StakingModule({ + id: uint24(moduleId), + stakingModuleAddress: moduleAddress, + stakingModuleFee: 0, + treasuryFee: 0, + stakeShareLimit: 0, + status: 0, + name: "", + lastDepositAt: 0, + lastDepositBlock: 0, + exitedValidatorsCount: 0, + priorityExitShareThreshold: 0, + maxDepositsPerBlock: 0, + minDepositBlockDistance: 0, + withdrawalCredentialsType: 0 + }); + } + + function getStakingModule(uint256 _stakingModuleId) external view returns (StakingModule memory) { + return _modules[_stakingModuleId]; + } +} diff --git a/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol b/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol new file mode 100644 index 0000000000..949b272031 --- /dev/null +++ b/test/0.8.25/contracts/StakingRouter__MockForTopUpGateway.sol @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +contract StakingRouter__MockForTopUpGateway { + mapping(uint256 => bytes32) internal withdrawalCredentials; + mapping(uint256 => bool) internal moduleExists; + mapping(uint256 => bool) internal moduleIsActive; + + event TopUpCalled( + uint256 stakingModuleId, + uint256[] keyIndices, + uint256[] operatorIds, + bytes[] pubkeys, + uint256[] topUpLimits + ); + + uint256 public topUpCalls; + + function setWithdrawalCredentials(uint256 moduleId, bytes32 wc) external { + withdrawalCredentials[moduleId] = wc; + moduleExists[moduleId] = true; + moduleIsActive[moduleId] = true; + } + + function setModuleActive(uint256 moduleId, bool active) external { + moduleExists[moduleId] = true; + moduleIsActive[moduleId] = active; + } + + function getStakingModuleWithdrawalCredentials(uint256 moduleId) external view returns (bytes32) { + return withdrawalCredentials[moduleId]; + } + + function hasStakingModule(uint256 moduleId) public view returns (bool) { + return moduleExists[moduleId]; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) && getStakingModuleIsActive(_stakingModuleId); + } + + function getStakingModuleIsActive(uint256 moduleId) public view returns (bool) { + return moduleIsActive[moduleId]; + } + + function topUp( + uint256 _stakingModuleId, + uint256[] calldata _keyIndices, + uint256[] calldata _operatorIds, + bytes[] calldata _pubkeys, + uint256[] calldata _topUpLimits + ) external { + unchecked { + ++topUpCalls; + } + + emit TopUpCalled(_stakingModuleId, _keyIndices, _operatorIds, _pubkeys, _topUpLimits); + } +} diff --git a/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol b/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol new file mode 100644 index 0000000000..1b53e90d8e --- /dev/null +++ b/test/0.8.25/contracts/TargetModule__MockForConsolidationMigrator.sol @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +/** + * @dev Mock for target staking module (CMv2/NOR) for ConsolidationMigrator tests + */ +contract TargetModule__MockForConsolidationMigrator { + uint256 public constant PUBKEY_LENGTH = 48; + + struct NodeOperatorData { + uint256 totalDepositedValidators; + bytes[] pubkeys; + } + + // operatorId => data + mapping(uint256 => NodeOperatorData) internal _operators; + + function mock__setOperatorData( + uint256 operatorId, + uint256 totalDepositedValidators, + bytes[] calldata pubkeys + ) external { + _operators[operatorId].totalDepositedValidators = totalDepositedValidators; + delete _operators[operatorId].pubkeys; + for (uint256 i = 0; i < pubkeys.length; ++i) { + _operators[operatorId].pubkeys.push(pubkeys[i]); + } + } + + function getNodeOperatorSummary( + uint256 _nodeOperatorId + ) + external + view + returns ( + uint256 targetLimitMode, + uint256 targetValidatorsCount, + uint256 stuckValidatorsCount, + uint256 refundedValidatorsCount, + uint256 stuckPenaltyEndTimestamp, + uint256 totalExitedValidators, + uint256 totalDepositedValidators, + uint256 depositableValidatorsCount + ) + { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + totalDepositedValidators = op.totalDepositedValidators; + // Other values are zero by default + return (0, 0, 0, 0, 0, 0, totalDepositedValidators, 0); + } + + // CMv2 interface + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys) { + NodeOperatorData storage op = _operators[_nodeOperatorId]; + + pubkeys = new bytes(_limit * PUBKEY_LENGTH); + + for (uint256 i = 0; i < _limit; ++i) { + uint256 keyIndex = _offset + i; + if (keyIndex < op.pubkeys.length) { + bytes storage key = op.pubkeys[keyIndex]; + for (uint256 j = 0; j < PUBKEY_LENGTH; ++j) { + pubkeys[i * PUBKEY_LENGTH + j] = key[j]; + } + } + } + + return pubkeys; + } +} diff --git a/test/0.8.25/contracts/TopUpGateway__Harness.sol b/test/0.8.25/contracts/TopUpGateway__Harness.sol new file mode 100644 index 0000000000..cd4026e308 --- /dev/null +++ b/test/0.8.25/contracts/TopUpGateway__Harness.sol @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +import {TopUpGateway} from "contracts/0.8.25/TopUpGateway.sol"; +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; + +contract TopUpGateway__Harness is TopUpGateway { + constructor( + address _lidoLocator, + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot, + uint256 _slotsPerEpoch + ) TopUpGateway(_lidoLocator, _gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot, _slotsPerEpoch) {} + + function harness_setLastTopUpData() external { + _setLastTopUpData(); + } + + function harness_setLastTopUpTimestamp(uint256 _timestamp) external { + _gatewayStorage().lastTopUpTimestamp = uint32(_timestamp); + } + + function harness_setLastTopUpBlock(uint256 _block) external { + _gatewayStorage().lastTopUpBlock = uint32(_block); + } + + function harness_setMaxValidatorsPerTopUp(uint256 newValue) external { + _setMaxValidatorsPerTopUp(newValue); + } + + function harness_setMinBlockDistance(uint256 newValue) external { + _setMinBlockDistance(newValue); + } + + function harness_getLocator() external view returns (address) { + return address(LOCATOR); + } + + function _verifyValidator( + BeaconRootData calldata, + ValidatorWitness calldata, + uint256, + bytes32 + ) internal view override { + // no-op for harness; verification is covered separately + } +} diff --git a/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol b/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol new file mode 100644 index 0000000000..bc4c4b0c8f --- /dev/null +++ b/test/0.8.25/contracts/WithdrawalVault__MockForConsolidationGateway.sol @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity 0.8.25; + +contract WithdrawalVault__MockForConsolidationGateway { + event AddConsolidationRequestsCalled(bytes[] sourcePubkeys, bytes[] targetPubkeys); + + uint256 internal _fee; + + constructor() { + _fee = 1; + } + + function addConsolidationRequests(bytes[] calldata sourcePubkeys, bytes[] calldata targetPubkeys) external payable { + emit AddConsolidationRequestsCalled(sourcePubkeys, targetPubkeys); + } + + function getConsolidationRequestFee() external view returns (uint256) { + return _fee; + } + + function mock__setFee(uint256 fee) external { + _fee = fee; + } +} diff --git a/test/0.8.25/srv3/clValidatorProofVerifier.test.ts b/test/0.8.25/srv3/clValidatorProofVerifier.test.ts new file mode 100644 index 0000000000..556dfaf74e --- /dev/null +++ b/test/0.8.25/srv3/clValidatorProofVerifier.test.ts @@ -0,0 +1,523 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { CLValidatorVerifier__Harness, SSZValidatorsMerkleTree } from "typechain-types"; + +import { generateBeaconHeader, generateValidator, randomBytes32, setBeaconBlockRoot } from "lib/pdg"; +import { prepareLocalMerkleTree } from "lib/top-ups"; + +const STATIC_VALIDATOR = { + blockRoot: "0xbe928e3a9fa76b916df79d78a8b67237f9b133269bb421f37490b7624abad452", + gIFirstValidator: "0x0000000000000000000000000000000000000000000000000096000000000028", + beaconRootData: { + childBlockTimestamp: 1769723675n, + slot: 13574970n, + proposerIndex: 1704508n, + }, + validators: [ + { + index: 12345, + witness: { + proofValidator: [ + "0x216b6e8fa6cc4f005b56c12afdf98fad45ece56133c8e460fa4141d4003776aa", + "0xc9cd3df16c39ee2ab805653e93aa7c66dfa8b4313b42367e0e2c93b97c467a7c", + "0xbccc857f25b04e4ffbfb3bb4a739f2ee21668f9ac5e6d6ffe243a83bd53773dd", + "0x9428eb489f519010c69549cec7acc9e93ed5be99de26feda1434d36821ae325d", + "0x286483026731535ec459bbe6299db5d838261f2da5cbafd85630bca4e8ebebb0", + "0x8b6f3cb97fe65b7cdbda7ee19b403bc148a6f4c185b2e06aa24f26696edf9274", + "0x2502294ada8a819553c36c45e10f6d37230b1bfe4a60c3b122e25ec7687e7b06", + "0x71d33773e8b437e94c30b30980472ef59686fc07c79eb513dae455c2b3feeddb", + "0x4fa851a66a442c140c6cb5d038ee03e9f2538780d455c57cb752eca56e874f2a", + "0x9e4db5b11d21e0d57de169caa2a129555275cf59e612cefea0da82d9f2a9b56a", + "0x7d95d434555b5cbfac0c34585b232314a53cc11a3f80cab5a3bd3c8824247e08", + "0x94d35de0bc90861fef95220f1dc8bd90738f2057ac801454ca7a81ecdc2f5a0e", + "0xf9161f3c69d468aae3ae78deae56e59e4a9722dae2dab2d8427e16ec401acafa", + "0xfdfde41dd4fbee3943abf104c54689f1587e821f018ddee1ce6838d4f1fe3024", + "0x6f4a3562c9b16e8e63d5b956a3305b37efb4e716777867bbf22825e9185b67b4", + "0x2ba6010fd77fc624970171c55647a13f75680122802f168fe16553a4bf251d33", + "0xb8a2b7d9d041154028a82877bdc2b4adb76475d4797f3ac586d319c76644309e", + "0xfb2f06c2b4c43f7252844db5fff60e0bfc207bdcecaae2fc53c37f1b9e03e50a", + "0x32c59b5c8c804d2a3c4c72415f1afc3d0db5c80c0bd5a8e404150121ad340abe", + "0x9a07eeffcc8578a939d457d107ec733bf3b121a7ff9f84e179931ee9237be7cb", + "0xf302fc1c45667fe834ab5537774ae4679dd6d9d4fca3e0a6b6dc6d6dd84d48ba", + "0xff6fa857e6a6b00c6f71ea4c5bf522535561ca25abd32389677b26c5a4b140df", + "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7", + "0xc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886722ff", + "0x1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5", + "0x2f075ae229646b6f6aed19a5e372cf295081401eb893ff599b3f9acc0c0d3e7d", + "0x328921deb59612076801e8cd61592107b5c67c79b846595cc6320c395b46362c", + "0xbfb909fdb236ad2411b4e4883810a074b840464689986c3f8a8091827e17c327", + "0x55d8fb3687ba3ba49f342c77f5a1f89bec83d811446e1a467139213d640b6a74", + "0xf7210d4f8e7e1039790e7bf4efa207555a10a6db1dd4b95da313aaa88b88fe76", + "0xad21b516cbc645ffe34ab5de1c8aef8cd4e7f8d2b51e8e1456adc7563cda206f", + "0x5e8d210000000000000000000000000000000000000000000000000000000000", + "0xc6341f0000000000000000000000000000000000000000000000000000000000", + "0x797d496cea42b783b4ada624d44fa8d0fd7ff09214509c1fab9dd7618dec8db3", + "0xd492b2a4246027ef1a1fa848bbae345f077680e86b5fe0a394251b63da1f9381", + "0x044cd392c78edc7bbda4544fa482c11effa29ac38ea4c87a4dd11bb0b4f5e0b5", + "0x4db7cb7fae529d04f8c42467d5ab71190aba9ef6982e8c5aecfb682eaaf0024e", + "0x79a3cf55bfd7c33308555b76aec5b6b6dfa1c4b628773cd0657a5bd00c9255d5", + "0xa78bc2eae77405eb3badf1a31e7c5b46cf44e0fb90b25c1ac3e39d9368c73ac3", + "0x4a4eb09f597003c58696430554b7154a878f31c09422870e70aa3b34c928e30c", + "0x420db8b9116cae945235fb92dd224c30bda527f31b71e859e8be5ad8b33f83ba", + ], + pubkey: "0x80773a007f9e496a196b8f28fae04ddaa72fa65c0f8a98145a1e192082c3edcf7cee891ccf1d6b6fee0abe0045b9f61b", + effectiveBalance: 0n, + activationEligibilityEpoch: 0n, + activationEpoch: 0n, + exitEpoch: 400136n, + withdrawableEpoch: 400392n, + slashed: false, + }, + }, + { + index: 67890, + witness: { + proofValidator: [ + "0x48c9ab2d18314cc8b31d343abaf430e32165ffece1333c4b30598c3e653bb8c4", + "0xe1150bdb10f20186ac2d48c874bcd8ee07201d1082351e56ad6b232b6ede0ed9", + "0x0e51272c8f40696dd2a1af27f1b4941676e778bd015fe03aee65901ba576da74", + "0x8ee28dca9c22ac9c0ffe72524012dde0e36ae3b421768fa08bcfb78231515aeb", + "0x6ac30c0e3188ecdb2c7f7ad4c27e936bb449d0c2d89d43a6f0c4348b5d3f8da9", + "0x2ef2d2287454ef3bb5066e139c4dfb3042b423c3c05e379d8885fd5feb205827", + "0x9ac6b7bba6408f9e9be2f9a0fca03771d3a5f1c817314c90a0402b868319fb5f", + "0x00acd7dfea9c4c686a6ccd697ddd9d3dbe44b771c6bc7fa73943b23256320b34", + "0x88e66715b98a5621b58dc5d9baa5fb4a9c07cd8e28a2ff771e02d1ddc7b1af52", + "0xc3c233aa48ac7d546942cd42a162e12276fe8a061df5bb07adc8c0d1f1e5f94a", + "0xdc76934849f2b932a88326e3ff1aa140c042bc541a3453e5d0909c2f4378850a", + "0x57455c42a8f749c6c849c9ba01b2279d192ab1e18ee8319ace30bd549c375349", + "0x99276db9294041d58d89ce524a33a4d60bc2b5019bf6bc3d6de02e952f6e34bc", + "0xfb73dc74945a6f29dd4b3f1b0fa938f77926e4a1256ea3407067cf66ff284af4", + "0x19641f85d86a45b1bff5d9792c0a622328f645815e32184e1b0c13bd2eedd0f0", + "0x261abd8edbccfb08a970621e9330115f97177619f9f657c091d6b05b2056a59d", + "0x2f015c6b4fc7f03cbd3366bbbf96574901b81742af5e98b0f01cc50705b25ceb", + "0xfb2f06c2b4c43f7252844db5fff60e0bfc207bdcecaae2fc53c37f1b9e03e50a", + "0x32c59b5c8c804d2a3c4c72415f1afc3d0db5c80c0bd5a8e404150121ad340abe", + "0x9a07eeffcc8578a939d457d107ec733bf3b121a7ff9f84e179931ee9237be7cb", + "0xf302fc1c45667fe834ab5537774ae4679dd6d9d4fca3e0a6b6dc6d6dd84d48ba", + "0xff6fa857e6a6b00c6f71ea4c5bf522535561ca25abd32389677b26c5a4b140df", + "0xfeb3c337d7a51a6fbf00b9e34c52e1c9195c969bd4e7a0bfd51d5c5bed9c1167", + "0xe71f0aa83cc32edfbefa9f4d3e0174ca85182eec9f3a09f6a6c0df6377a510d7", + "0x31206fa80a50bb6abe29085058f16212212a60eec8f049fecb92d8c8e0a84bc0", + "0x21352bfecbeddde993839f614c3dac0a3ee37543f9b412b16199dc158e23b544", + "0x619e312724bb6d7c3153ed9de791d764a366b389af13c58bf8a8d90481a46765", + "0x7cdd2986268250628d0c10e385c58c6191e6fbe05191bcc04f133f2cea72c1c4", + "0x848930bd7ba8cac54661072113fb278869e07bb8587f91392933374d017bcbe1", + "0x8869ff2c22b28cc10510d9853292803328be4fb0e80495e8bb8d271f5b889636", + "0xb5fe28e79f1b850f8658246ce9b6a1e7b49fc06db7143e8fe0b4f2b0c5523a5c", + "0x985e929f70af28d0bdd1a90a808f977f597c7c778c489e98d3bd8910d31ac0f7", + "0xc6f67e02e6e4e1bdefb994c6098953f34636ba2b6ca20a4721d2b26a886722ff", + "0x1c9a7e5ff1cf48b4ad1582d3f4e4a1004f3b20d8c5a2b71387a4254ad933ebc5", + "0x2f075ae229646b6f6aed19a5e372cf295081401eb893ff599b3f9acc0c0d3e7d", + "0x328921deb59612076801e8cd61592107b5c67c79b846595cc6320c395b46362c", + "0xbfb909fdb236ad2411b4e4883810a074b840464689986c3f8a8091827e17c327", + "0x55d8fb3687ba3ba49f342c77f5a1f89bec83d811446e1a467139213d640b6a74", + "0xf7210d4f8e7e1039790e7bf4efa207555a10a6db1dd4b95da313aaa88b88fe76", + "0xad21b516cbc645ffe34ab5de1c8aef8cd4e7f8d2b51e8e1456adc7563cda206f", + "0x5e8d210000000000000000000000000000000000000000000000000000000000", + "0xc6341f0000000000000000000000000000000000000000000000000000000000", + "0x797d496cea42b783b4ada624d44fa8d0fd7ff09214509c1fab9dd7618dec8db3", + "0xd492b2a4246027ef1a1fa848bbae345f077680e86b5fe0a394251b63da1f9381", + "0x044cd392c78edc7bbda4544fa482c11effa29ac38ea4c87a4dd11bb0b4f5e0b5", + "0x4db7cb7fae529d04f8c42467d5ab71190aba9ef6982e8c5aecfb682eaaf0024e", + "0x79a3cf55bfd7c33308555b76aec5b6b6dfa1c4b628773cd0657a5bd00c9255d5", + "0xa78bc2eae77405eb3badf1a31e7c5b46cf44e0fb90b25c1ac3e39d9368c73ac3", + "0x4a4eb09f597003c58696430554b7154a878f31c09422870e70aa3b34c928e30c", + "0x420db8b9116cae945235fb92dd224c30bda527f31b71e859e8be5ad8b33f83ba", + ], + pubkey: "0x85c12b9cd79c0fd7712db78245d14583c465e7c4cf4045b83ca34b1f148d85a1fe16dd2004f3332e8dc6312793f5db4a", + effectiveBalance: 0n, + activationEligibilityEpoch: 7074n, + activationEpoch: 11751n, + exitEpoch: 195058n, + withdrawableEpoch: 195314n, + slashed: false, + }, + }, + ], +}; + +describe("CLTopUpProofVerifier", () => { + let sszMerkleTree: SSZValidatorsMerkleTree; + let gIFirstValidator: string; + let firstValidatorLeafIndex: bigint; + let verifier: CLValidatorVerifier__Harness; + + before(async () => { + // 1) Build a local SSZ tree once + const localTree = await prepareLocalMerkleTree(); + sszMerkleTree = localTree.stateTree; + gIFirstValidator = localTree.gIFirstValidator; + firstValidatorLeafIndex = localTree.firstValidatorLeafIndex; + + // populate merkle tree with validators + for (let i = 1; i < 100; i++) { + const v = generateValidator().container; + await sszMerkleTree.addValidatorsLeaf(v); + } + + // 2) Deploy the verifier (same GI for prev/curr, no pivot) + verifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + gIFirstValidator, // GI_FIRST_VALIDATOR_PREV + gIFirstValidator, // GI_FIRST_VALIDATOR_CURR + 0, // PIVOT_SLOT + ]); + }); + + it("verifies full Validator container at head under EIP-4788", async () => { + // 1) Create an 'active' validator at the target epoch + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 2n; + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + // Insert validator into the local SSZ tree + await sszMerkleTree.addValidatorsLeaf(v.container); + + // Compute its index in validators[i] + const leafCount = await sszMerkleTree.validatorsLeafCount(); + // Index = (current leaves - 1) - firstValidatorLeafIndex + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + // Anchor the current state_root into EIP-4788 via a header at SLOT + const SLOT = 3200; // epoch = 100 (greater than activationEpoch) + const stateRoot = await sszMerkleTree.getStateRoot(); + const beaconBlockHeader = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(beaconBlockHeader); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + // Build proof: + // - stateProof: validators[i] → validators_root → state_root + // - headerProof: state_root → … → beacon_block_root (contains parent(slot, proposer) node) + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + + // state_root -> beacon_block_root + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(beaconBlockHeader); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + }; + + // 2) Validator witness (validator container only) + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + // 4) Call harness + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + + // 5) Negative: wrong WC must fail + const wrongWC = "0x" + "11".repeat(32); + await expect(verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, wrongWC)).to.be + .reverted; + }); + + it("don't revert with ValidatorIsSlashed when slashed = true", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = true; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 2n; + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + await sszMerkleTree.addValidatorsLeaf(v.container); + + const leafCount = await sszMerkleTree.validatorsLeafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + const SLOT = 3200; // epoch = 100 + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + // validator[i] -> validators_root -> state_root' + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await expect(verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC)).to.not.be + .rejected; + }); + + it("don't revert when activationEpoch > epoch(slot)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 101n; // > epoch(slot=3200)=100 + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + + const expectedWC = v.container.withdrawalCredentials; + + await sszMerkleTree.addValidatorsLeaf(v.container); + + const leafCount = await sszMerkleTree.validatorsLeafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + + const SLOT = 3200; + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("don't reverts when activationEpoch == epoch(slot)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + v.container.slashed = false; + v.container.activationEligibilityEpoch = 1n; + v.container.activationEpoch = 100n; // == epoch(slot) + v.container.exitEpoch = FAR_FUTURE; + v.container.withdrawableEpoch = FAR_FUTURE; + const expectedWC = v.container.withdrawalCredentials; + await sszMerkleTree.addValidatorsLeaf(v.container); + const leafCount = await sszMerkleTree.validatorsLeafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + const SLOT = 3200; // epoch=100 + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("don't revert when a validator with non-FAR_FUTURE exitEpoch (proof mismatch)", async () => { + const v = generateValidator(); + const FAR_FUTURE = (1n << 64n) - 1n; + v.container.slashed = false; + v.container.activationEligibilityEpoch = 70n; + v.container.activationEpoch = 90n; + const SLOT = 3200; // epoch(slot) = 100 + v.container.exitEpoch = 101n; // + v.container.withdrawableEpoch = FAR_FUTURE; + const expectedWC = v.container.withdrawalCredentials; + await sszMerkleTree.addValidatorsLeaf(v.container); + const leafCount = await sszMerkleTree.validatorsLeafCount(); + const validatorIndex = Number(leafCount - 1n - firstValidatorLeafIndex); + const stateRoot = await sszMerkleTree.getStateRoot(); + const header = await generateBeaconHeader(stateRoot, SLOT); + const headerHash = await sszMerkleTree.beaconBlockHeaderHashTreeRoot(header); + const childBlockTimestamp = await setBeaconBlockRoot(headerHash); + const validator_proofs = await sszMerkleTree.getValidatorProof(firstValidatorLeafIndex + BigInt(validatorIndex)); + const headerMerkle = await sszMerkleTree.getBeaconBlockHeaderProof(header); + const proofValidator = [...validator_proofs, ...headerMerkle.proof]; + const beaconRootData = { + childBlockTimestamp, + slot: header.slot, + proposerIndex: header.proposerIndex, + }; + const validatorWitness = { + proofValidator, + pubkey: v.container.pubkey, + effectiveBalance: v.container.effectiveBalance, + slashed: v.container.slashed, + exitEpoch: v.container.exitEpoch, + activationEligibilityEpoch: v.container.activationEligibilityEpoch, + activationEpoch: v.container.activationEpoch, + withdrawableEpoch: v.container.withdrawableEpoch, + }; + + await verifier.TEST_verifyValidator(beaconRootData, validatorWitness, validatorIndex, expectedWC); + }); + + it("should verify static validator 12345 with real mainnet proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + await staticVerifier.TEST_verifyValidator( + beaconRootData, + v.witness, + v.index, + "0x010000000000000000000000ddc6ed6e6a9c1e55c87b155b9a40bac4721a6dac", + ); + }); + + it("should verify static validator 67890 with real mainnet proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[1]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + await staticVerifier.TEST_verifyValidator( + beaconRootData, + v.witness, + v.index, + "0x010000000000000000000000210b3cb99fa1de0a64085fa80e18c22fe4722a1b", + ); + }); + + it("should reject static validator with wrong withdrawal credentials", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + const wrongWC = "0x" + "11".repeat(32); + await expect(staticVerifier.TEST_verifyValidator(beaconRootData, v.witness, v.index, wrongWC)).to.be.reverted; + }); + + it("should reject static validator with fake proof", async () => { + const staticVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [ + STATIC_VALIDATOR.gIFirstValidator, + STATIC_VALIDATOR.gIFirstValidator, + 0, + ]); + + const timestamp = await setBeaconBlockRoot(STATIC_VALIDATOR.blockRoot); + + const v = STATIC_VALIDATOR.validators[0]; + const beaconRootData = { + ...STATIC_VALIDATOR.beaconRootData, + childBlockTimestamp: timestamp, + }; + + const tamperedWitness = { + ...v.witness, + proofValidator: [...v.witness.proofValidator], + }; + tamperedWitness.proofValidator[0] = "0x" + "aa".repeat(32); + + await expect( + staticVerifier.TEST_verifyValidator( + beaconRootData, + tamperedWitness, + v.index, + "0x010000000000000000000000ddc6ed6e6a9c1e55c87b155b9a40bac4721a6dac", + ), + ).to.be.reverted; + }); + + it("should change gIndex on pivot slot", async () => { + const pivotSlot = 1000; + const giPrev = randomBytes32(); + const giCurr = randomBytes32(); + + const proofVerifier = await ethers.deployContract("CLValidatorVerifier__Harness", [giPrev, giCurr, pivotSlot], {}); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot - 1)).to.equal(giPrev); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot)).to.equal(giCurr); + expect(await proofVerifier.TEST_getValidatorGI(0n, pivotSlot + 1)).to.equal(giCurr); + }); +}); diff --git a/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol b/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol new file mode 100644 index 0000000000..129600062a --- /dev/null +++ b/test/0.8.25/srv3/contracts/CLValidatorVerifier__Harness.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {GIndex} from "contracts/common/lib/GIndex.sol"; +import {CLValidatorVerifier} from "contracts/0.8.25/CLValidatorVerifier.sol"; +import {BeaconRootData, ValidatorWitness} from "contracts/common/interfaces/TopUpWitness.sol"; + +contract CLValidatorVerifier__Harness is CLValidatorVerifier { + constructor( + GIndex _gIFirstValidatorPrev, + GIndex _gIFirstValidatorCurr, + uint64 _pivotSlot + ) CLValidatorVerifier(_gIFirstValidatorPrev, _gIFirstValidatorCurr, _pivotSlot) {} + + function TEST_verifyValidator( + BeaconRootData calldata beaconData, + ValidatorWitness calldata vw, + uint256 validatorIndex, + bytes32 withdrawalCredentials + ) public view { + _verifyValidator(beaconData, vw, validatorIndex, withdrawalCredentials); + } + + function TEST_getParentBlockRoot(uint64 parentBlockTimestamp) public view returns (bytes32) { + return _getParentBlockRoot(parentBlockTimestamp); + } + + function TEST_getValidatorGI(uint256 offset, uint64 slot) public view returns (GIndex) { + return _getValidatorGI(offset, slot); + } +} diff --git a/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol b/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol new file mode 100644 index 0000000000..ac4d4673e8 --- /dev/null +++ b/test/0.8.25/srv3/contracts/SSZValidatorsMerkleTree.sol @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.25; + +import {GIndex, pack, concat, fls} from "contracts/common/lib/GIndex.sol"; +import {SSZ} from "contracts/common/lib/SSZ.sol"; +import {BLS12_381} from "contracts/common/lib/BLS.sol"; +import {SSZBLSHelpers} from "../../vaults/predepositGuarantee/contracts/SSZBLSHelpers.sol"; + +/// Merkle tree implementation +/// NOT gas optimized, for testing purposes only +contract SSZValidatorsMerkleTree is SSZBLSHelpers { + uint256 public immutable VALIDATORS_DEPTH; + + uint256 public validatorsLeafCount = 0; // Number of leaves in the tree + + uint256 public immutable VALIDATORS_BASE_INDEX; + + mapping(uint256 => bytes32) public nodes; + + /// @notice Initializes the Merkle tree with a given depth and pre-filled nodes so GIndex can closely match CL + constructor(GIndex validatorsBase) { + uint256 depthValidators = depth(validatorsBase); + + VALIDATORS_DEPTH = depthValidators; + + // allows to simulate middle part of the tree + validatorsLeafCount = validatorsBase.index() - (1 << VALIDATORS_DEPTH); + + VALIDATORS_BASE_INDEX = validatorsBase.index(); + } + + // Below methods copied and adapted versions of methods from ../vaults/SSZMerkleTree.sol + + /// @notice Adds a new leaf to the validators tree + /// @param validator The leaf value + /// @return index The index of the added leaf + function addValidatorsLeaf(SSZBLSHelpers.Validator calldata validator) public returns (uint256) { + bytes32 leaf = validatorHashTreeRootCalldata(validator); + + require(validatorsLeafCount < (1 << VALIDATORS_DEPTH), "Validators tree is full"); + + uint256 gi = VALIDATORS_BASE_INDEX + validatorsLeafCount; + nodes[gi] = leaf; + validatorsLeafCount++; + + _updateTree(gi); // Update the Merkle tree structure + + return gi; + } + + function getStateRoot() public view returns (bytes32) { + return nodes[1]; + } + + function getValidatorProof(uint256 leafIndex) public view returns (bytes32[] memory) { + require(leafIndex < validatorsLeafCount, "Invalid leaf index"); + uint256 gi = VALIDATORS_BASE_INDEX + leafIndex; + return _getMerkleProof(gi); + } + + /// generalized index for validators[position] + function getValidatorGeneralizedIndex(uint256 position) public view returns (GIndex) { + require(position < (1 << VALIDATORS_DEPTH), "Invalid position"); + uint256 gi = VALIDATORS_BASE_INDEX + position; + return pack(gi, uint8(VALIDATORS_DEPTH)); + } + + /// @notice Computes and returns the Merkle proof for a given *global* index + function _getMerkleProof(uint256 index) internal view returns (bytes32[] memory) { + // Use fls(index) to get actual tree depth, not TREE_DEPTH which may be incorrect + // for indices that overflow to next power of 2 + // floor(log2) + uint256 actualDepth = fls(index); + bytes32[] memory proof = new bytes32[](actualDepth); + + for (uint256 i = 0; i < actualDepth; ++i) { + uint256 siblingIndex = index % 2 == 0 ? index + 1 : index - 1; + proof[i] = nodes[siblingIndex]; + index /= 2; + } + + return proof; + } + + /// @dev Updates the tree after adding a leaf + /// @param index The index of the new leaf + function _updateTree(uint256 index) internal { + while (index > 1) { + uint256 parentIndex = index / 2; + uint256 siblingIndex = index % 2 == 0 ? index + 1 : index - 1; + + bytes32 left = nodes[index % 2 == 0 ? index : siblingIndex]; + bytes32 right = nodes[index % 2 == 0 ? siblingIndex : index]; + + nodes[parentIndex] = sha256(abi.encodePacked(left, right)); + + index = parentIndex; + } + } +} diff --git a/test/0.8.25/stakingRouter/helpers/index.ts b/test/0.8.25/stakingRouter/helpers/index.ts new file mode 100644 index 0000000000..6d2d2b69d2 --- /dev/null +++ b/test/0.8.25/stakingRouter/helpers/index.ts @@ -0,0 +1,127 @@ +import { expect } from "chai"; +import { randomBytes } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + StakingModule__MockForStakingRouter, + StakingModuleV2__MockForStakingRouter, + StakingRouter__Harness, +} from "typechain-types"; + +import { wcTypeMaxEB } from "lib"; +import { ONE_GWEI, StakingModuleStatus, TOTAL_BASIS_POINTS, WithdrawalCredentialsType } from "lib/constants"; + +export const DEFAULT_CONFIG: ModuleConfig = { + stakeShareLimit: TOTAL_BASIS_POINTS, + priorityExitShareThreshold: TOTAL_BASIS_POINTS, + moduleFee: 5_00n, + treasuryFee: 5_00n, + maxDepositsPerBlock: 150n, + minDepositBlockDistance: 25n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, +}; +export const DEFAULT_MEB = wcTypeMaxEB(DEFAULT_CONFIG.withdrawalCredentialsType); + +type SetupModuleResult = T extends WithdrawalCredentialsType.WC0x02 + ? [StakingModuleV2__MockForStakingRouter, bigint] + : T extends WithdrawalCredentialsType.WC0x01 + ? [StakingModule__MockForStakingRouter, bigint] + : [StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, bigint]; + +export async function setupModule( + ctx: CtxConfig, + cfg: ModuleConfig & { withdrawalCredentialsType: T }, +): Promise>; + +export async function setupModule( + { stakingRouter, admin, deployer }: CtxConfig, + { + stakeShareLimit, + priorityExitShareThreshold, + moduleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + exited = 0n, + deposited = 0n, + depositable = 0n, + status = StakingModuleStatus.Active, + withdrawalCredentialsType = WithdrawalCredentialsType.WC0x01, + validatorsBalanceGwei = 0n, + totalModuleStake = 0n, + }: ModuleConfig, +): Promise<[StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, bigint]> { + const modulesCount = await stakingRouter.getStakingModulesCount(); + const moduleId = modulesCount + 1n; + + const stakingModuleConfig = { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee: moduleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType, + }; + + const initializeModule = async ( + module: StakingModule__MockForStakingRouter | StakingModuleV2__MockForStakingRouter, + ) => { + await stakingRouter + .connect(admin) + .addStakingModule(randomBytes(8).toString(), await module.getAddress(), stakingModuleConfig); + + expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount + 1n); + + await module.mock__getStakingModuleSummary(exited, deposited, depositable); + if (validatorsBalanceGwei == 0n && deposited > 0n) { + validatorsBalanceGwei = (deposited * wcTypeMaxEB(withdrawalCredentialsType)) / ONE_GWEI; + } + await stakingRouter.testing_setStakingModuleAccounting(moduleId, validatorsBalanceGwei, exited); + + if (status != StakingModuleStatus.Active) { + await stakingRouter.setStakingModuleStatus(moduleId, status); + } + }; + + if (withdrawalCredentialsType === WithdrawalCredentialsType.WC0x02) { + const module = await ethers.deployContract("StakingModuleV2__MockForStakingRouter", deployer); + await initializeModule(module); + + if (totalModuleStake == 0n && deposited > 0n) { + totalModuleStake = deposited * wcTypeMaxEB(WithdrawalCredentialsType.WC0x01); + } + await module.mock__getTotalModuleStake(totalModuleStake); + + return [module, moduleId]; + } + + const module = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await initializeModule(module); + + return [module, moduleId]; +} + +export interface CtxConfig { + deployer: HardhatEthersSigner; + admin: HardhatEthersSigner; + stakingRouter: StakingRouter__Harness; +} + +export interface ModuleConfig { + stakeShareLimit: bigint; + priorityExitShareThreshold: bigint; + moduleFee: bigint; + treasuryFee: bigint; + maxDepositsPerBlock: bigint; + minDepositBlockDistance: bigint; + withdrawalCredentialsType: WithdrawalCredentialsType; + exited?: bigint; + deposited?: bigint; + depositable?: bigint; + status?: StakingModuleStatus; + validatorsBalanceGwei?: bigint; + totalModuleStake?: bigint; +} diff --git a/test/0.8.9/stakingRouter/stakingRouter.exit.test.ts b/test/0.8.25/stakingRouter/stakingRouter.exit.test.ts similarity index 63% rename from test/0.8.9/stakingRouter/stakingRouter.exit.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.exit.test.ts index bf5e78656d..d4746ac8c7 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.exit.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.exit.test.ts @@ -1,34 +1,34 @@ import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { - DepositContract__MockForBeaconChainDepositor, - StakingModule__MockForTriggerableWithdrawals, - StakingRouter__Harness, -} from "typechain-types"; +import { LidoLocator, StakingModule__MockForTriggerableWithdrawals, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, ether, proxify, randomString } from "lib"; +import { certainAddress, ether, randomString, randomWCType1, WithdrawalCredentialsType } from "lib"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; + describe("StakingRouter.sol:exit", () => { let deployer: HardhatEthersSigner; - let proxyAdmin: HardhatEthersSigner; + let admin: HardhatEthersSigner; let stakingRouterAdmin: HardhatEthersSigner; let user: HardhatEthersSigner; let reporter: HardhatEthersSigner; - let depositContract: DepositContract__MockForBeaconChainDepositor; + let locator: LidoLocator; let stakingRouter: StakingRouter__Harness; let stakingModule: StakingModule__MockForTriggerableWithdrawals; let originalState: string; const lido = certainAddress("test:staking-router:lido"); - const withdrawalCredentials = hexlify(randomBytes(32)); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + const withdrawalCredentials = randomWCType1(); const STAKE_SHARE_LIMIT = 1_00n; const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; const MODULE_FEE = 5_00n; @@ -39,21 +39,19 @@ describe("StakingRouter.sol:exit", () => { const NODE_OPERATOR_ID = 1n; before(async () => { - [deployer, proxyAdmin, stakingRouterAdmin, user, reporter] = await ethers.getSigners(); - - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + [deployer, admin, stakingRouterAdmin, user, reporter] = await ethers.getSigners(); + + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - [stakingRouter] = await proxify({ impl, admin: proxyAdmin, caller: user }); + // deploy staking router + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); // Initialize StakingRouter - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); + await stakingRouter.initialize(stakingRouterAdmin.address, withdrawalCredentials); // Deploy mock staking module stakingModule = await ethers.deployContract("StakingModule__MockForTriggerableWithdrawals", deployer); @@ -63,19 +61,36 @@ describe("StakingRouter.sol:exit", () => { .connect(stakingRouterAdmin) .grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), stakingRouterAdmin); + const stakingModuleConfig = { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + stakeShareLimit: STAKE_SHARE_LIMIT, + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + stakingModuleFee: MODULE_FEE, + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + treasuryFee: TREASURY_FEE, + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + /// @notice Withdrawal credential type used by the module. + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + // Add staking module await stakingRouter .connect(stakingRouterAdmin) - .addStakingModule( - randomString(8), - await stakingModule.getAddress(), - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); + .addStakingModule(randomString(8), await stakingModule.getAddress(), stakingModuleConfig); // Grant necessary roles to reporter await stakingRouter @@ -94,7 +109,7 @@ describe("StakingRouter.sol:exit", () => { context("reportValidatorExitDelay", () => { const proofSlotTimestamp = Math.floor(Date.now() / 1000); const eligibleToExitInSec = 86400; // 1 day - const publicKey = hexlify(randomBytes(48)); + const publicKey = randomString(48); it("calls reportValidatorExitDelay on the staking module", async () => { await expect( @@ -125,16 +140,14 @@ describe("StakingRouter.sol:exit", () => { publicKey, eligibleToExitInSec, ), - ).to.be.revertedWith( - `AccessControl: account ${user.address.toLowerCase()} is missing role ${await stakingRouter.REPORT_VALIDATOR_EXITING_STATUS_ROLE()}`, - ); + ).to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount"); }); }); context("onValidatorExitTriggered", () => { const withdrawalRequestPaidFee = ether("0.01"); const exitType = 1n; - const publicKey = hexlify(randomBytes(48)); + const publicKey = randomString(48); it("calls onValidatorExitTriggered on the staking module for each validator", async () => { const validatorExitData = [ @@ -199,9 +212,7 @@ describe("StakingRouter.sol:exit", () => { await expect( stakingRouter.connect(user).onValidatorExitTriggered(validatorExitData, withdrawalRequestPaidFee, exitType), - ).to.be.revertedWith( - `AccessControl: account ${user.address.toLowerCase()} is missing role ${await stakingRouter.REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE()}`, - ); + ).to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount"); }); }); }); diff --git a/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts b/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts new file mode 100644 index 0000000000..d948c4d403 --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.getDepositAllocations.test.ts @@ -0,0 +1,767 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + AccountingOracle__MockForStakingRouter, + Lido__MockForStakingRouter, + LidoLocator, + StakingRouter__Harness, +} from "typechain-types"; + +import { randomWCType1 } from "lib"; +import { ONE_GWEI, StakingModuleStatus, WithdrawalCredentialsType } from "lib/constants"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { CtxConfig, DEFAULT_CONFIG, DEFAULT_MEB, setupModule } from "./helpers"; + +describe("StakingRouter.sol:getDepositAllocations", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; + let lidoMock: Lido__MockForStakingRouter; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let originalState: string; + + let ctx: CtxConfig; + + const withdrawalCredentials = randomWCType1(); + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + + before(async () => { + [deployer, admin] = await ethers.getSigners(); + + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: await lidoMock.getAddress(), + depositSecurityModule, + accountingOracle: await accountingOracle.getAddress(), + }); + + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator, lido: lidoMock })); + + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + await stakingRouter.initialize(admin, withdrawalCredentials); + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + + ctx = { + deployer, + admin, + stakingRouter, + }; + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("getDepositAllocations with _isTopUp = false (initial deposits)", () => { + it("Returns empty arrays when there are no modules registered", async () => { + const result = await stakingRouter.getDepositAllocations(100n, false); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([]); + expect(result.newAllocations).to.deep.equal([]); + }); + + it("Returns all allocations to a single module if there is only one", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 100n, + }; + + await setupModule(ctx, config); + + const ethToDeposit = 150n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([moduleAllocation]); + expect(result.allocated).to.deep.equal([moduleAllocation]); + }); + + it("Allocates evenly if target shares are equal and capacities allow for that", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation * 2n); + expect(result.newAllocations).to.deep.equal([moduleAllocation, moduleAllocation]); + expect(result.allocated).to.deep.equal([moduleAllocation, moduleAllocation]); + }); + + it("Does not allocate to non-Active modules", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); + + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([moduleAllocation, 0n]); + expect(result.allocated).to.deep.equal([moduleAllocation, 0n]); + }); + + it("Allocates according to capacities at equal target shares", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 100n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = module1Config.depositable * DEFAULT_MEB; + const module2Allocation = module2Config.depositable * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates according to target shares", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 60_00n, + priorityExitShareThreshold: 60_00n, + depositable: 100n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 40_00n, + priorityExitShareThreshold: 40_00n, + depositable: 100n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = 100n * DEFAULT_MEB; + const module2Allocation = 80n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates with unlimited (100%) and 20% limited share modules", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + depositable: 200n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + depositable: 200n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // totalValidators = 0 + 0 + 200 = 200 + // Module 1 target: (10000 * 200) / 10000 = 200, cap = min(200, 200) = 200 + // Module 2 target: (2000 * 200) / 10000 = 40, cap = min(40, 200) = 40 + // MinFirst: [0,0] caps [200,40] + // fill both to 40: cost 80, remaining 120 + // module 2 at cap, module 1 gets 120 + // result: [160, 40], total = 200 + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Allocation = 160n * DEFAULT_MEB; + const module2Allocation = 40n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([module1Allocation, module2Allocation]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Unlimited module absorbs excess when 20% module hits share limit with pre-existing deposits", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + depositable: 100n, + deposited: 50n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + depositable: 100n, + deposited: 50n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // totalValidators = 50 + 50 + 200 = 300 + // Module 1 target: (10000 * 300) / 10000 = 300, cap = min(300, 150) = 150 + // Module 2 target: (2000 * 300) / 10000 = 60, cap = min(60, 150) = 60 + // MinFirst: [50,50] caps [150,60] + // fill both to 60: cost 20, remaining 180 + // module 2 at cap, module 1 gets min(180, 90) = 90 + // result: [150, 60], total allocated = 110 + const ethToDeposit = 200n * DEFAULT_MEB; + const module1Delta = 100n * DEFAULT_MEB; + const module2Delta = 10n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([150n * DEFAULT_MEB, 60n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Returns zero allocated array when deposit amount is zero", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + }; + + await setupModule(ctx, config); + + const result = await stakingRouter.getDepositAllocations(0n, false); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([0n]); + // newAllocations should reflect current allocation state (no deposited = 0) + expect(result.newAllocations).to.deep.equal([0n]); + }); + }); + + context("getDepositAllocations with _isTopUp = true (top-up deposits)", () => { + it("Returns empty arrays when there are no modules registered", async () => { + const result = await stakingRouter.getDepositAllocations(100n, true); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([]); + expect(result.newAllocations).to.deep.equal([]); + }); + + it("Returns all allocations to a single module if there is only one", async () => { + // For top-up 0x02 modules, capacity = activeValidators * maxEBType2 / maxEBType1 + // We need deposited validators with initial balance (32 ETH each) to create top-up room + const deposited = 10n; + const config = { + ...DEFAULT_CONFIG, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, // each validator at initial 32 ETH + }; + + await setupModule(ctx, config); + + // capacity_equiv = 10 * 2048/32 = 640, current_equiv = 10, room = 630 + const ethToDeposit = 631n * DEFAULT_MEB; + const moduleAllocation = 630n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([(deposited + 630n) * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation]); + }); + + it("Allocates evenly if target shares are equal and capacities allow for that", async () => { + const deposited = 1n; + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + // capacity_equiv = 1 * 2048/32 = 64, current_equiv = 1, room = 63 + const ethToDeposit = 50n * DEFAULT_MEB; + const moduleAllocation = 25n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation * 2n); + expect(result.newAllocations).to.deep.equal([(deposited + 25n) * DEFAULT_MEB, (deposited + 25n) * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation, moduleAllocation]); + }); + + it("Does not allocate to non-Active modules", async () => { + const deposited = 1n; + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); + + // Module 1: capacity_equiv = 1 * 2048/32 = 64, current_equiv = 1, room = 63 + const ethToDeposit = 200n * DEFAULT_MEB; + const moduleAllocation = deposited * 63n * DEFAULT_MEB; // all to module 1 since module 2 is paused + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(moduleAllocation); + expect(result.newAllocations).to.deep.equal([(deposited + 63n) * DEFAULT_MEB, deposited * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([moduleAllocation, 0n]); + }); + + it("Allocates according to capacities at equal target shares", async () => { + // Module with more active validators has more top-up capacity + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited: 10n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 10n * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + deposited: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 2n * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Module 1: capacity_equiv = 10 * 2048/32 = 640, current_equiv = 10, room = 630 + // Module 2: capacity_equiv = 2 * 2048/32 = 128, current_equiv = 2, room = 126 + // + // cap1_raw = 10*64=640, cap2_raw = 2*64=128 + // total = 10+2+1000 = 1012, target = 506 each + // cap1 = min(506, 640)=506, cap2 = min(506, 128)=128 + // MinFirst: [10,2] caps [506,128] + // fill 2→10: +8, remaining 992 + // fill equally to 128: each +118, remaining 756 + // module 2 at cap, module 1 gets min(756, 506-128)=378 + // total = 8+236+378 = 622 + // module1 delta = 496, module2 delta = 126 + const ethToDeposit = 1000n * DEFAULT_MEB; + const module1Allocation = 496n * DEFAULT_MEB; + const module2Allocation = 126n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([506n * DEFAULT_MEB, 128n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Allocation, module2Allocation]); + }); + + it("Allocates according to target shares", async () => { + // Same deposited count, different share limits → allocation driven by target shares + const deposited = 10n; + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 60_00n, + priorityExitShareThreshold: 60_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 40_00n, + priorityExitShareThreshold: 40_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // total = 10+10+80 = 100 + // target1 = 60, target2 = 40, cap_raw = 10*64=640 each + // cap1 = min(60,640)=60, cap2 = min(40,640)=40 + // MinFirst: [10,10] caps [60,40] + // fill equally to 40: each +30, remaining 20 + // module 2 at cap, module 1 gets 20 + // total = 80 + const ethToDeposit = 80n * DEFAULT_MEB; + const module1Allocation = 50n * DEFAULT_MEB; + const module2Allocation = 30n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Allocation + module2Allocation); + expect(result.newAllocations).to.deep.equal([60n * DEFAULT_MEB, 40n * DEFAULT_MEB]); + }); + + it("Allocates with unlimited (100%) and 20% limited share modules for top-up", async () => { + const deposited = 10n; + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + deposited, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: deposited * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Each module: cap_raw = 10 * 2048/32 = 640 equiv validators + // Current: 10 equiv each + // totalValidators = 10 + 10 + 100 = 120 + // Module 1 target: (10000 * 120) / 10000 = 120, cap = min(120, 640) = 120 + // Module 2 target: (2000 * 120) / 10000 = 24, cap = min(24, 640) = 24 + // MinFirst: [10,10] caps [120,24] + // fill both to 24: cost 28, remaining 72 + // module 2 at cap, module 1 gets min(72, 96) = 72 + // result: [96, 24], total allocated = 100 + const ethToDeposit = 100n * DEFAULT_MEB; + const module1Delta = 86n * DEFAULT_MEB; + const module2Delta = 14n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([96n * DEFAULT_MEB, 24n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Unlimited module absorbs excess when 20% module has fewer active validators for top-up", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 100_00n, + priorityExitShareThreshold: 100_00n, + deposited: 10n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 10n * 32n * ONE_GWEI, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 20_00n, + priorityExitShareThreshold: 20_00n, + deposited: 1n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: 1n * 32n * ONE_GWEI, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + // Module 1: cap_raw = 10 * 64 = 640, current = 10 + // Module 2: cap_raw = 1 * 64 = 64, current = 1 + // totalValidators = 10 + 1 + 600 = 611 + // Module 1 target: (10000 * 611) / 10000 = 611, cap = min(611, 640) = 611 + // Module 2 target: (2000 * 611) / 10000 = 122, cap = min(122, 64) = 64 + const ethToDeposit = 600n * DEFAULT_MEB; + const module1Delta = 537n * DEFAULT_MEB; + const module2Delta = 63n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + expect(result.totalAllocated).to.equal(module1Delta + module2Delta); + expect(result.newAllocations).to.deep.equal([547n * DEFAULT_MEB, 64n * DEFAULT_MEB]); + expect(result.allocated).to.deep.equal([module1Delta, module2Delta]); + }); + + it("Returns zero allocated array when deposit amount is zero", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }; + + await setupModule(ctx, config); + + const result = await stakingRouter.getDepositAllocations(0n, true); + expect(result.totalAllocated).to.equal(0n); + expect(result.allocated).to.deep.equal([0n]); + }); + }); + + context("multi-module top-up scenarios", () => { + // Module balances from SR accounting (wei) + const MODULE_1_BALANCE_GWEI = 960_006_155_190_000_000_000n / ONE_GWEI; // ~960.006 ETH ~ 31 validators + const MODULE_2_BALANCE_GWEI = 0n; + const MODULE_3_BALANCE_GWEI = 1_600_010_258_650_000_000_000n / ONE_GWEI; // ~1600.01 ETH ~ 51 validators + const MODULE_4_BALANCE_GWEI = 1_988_080_734_502_000_000_000n / ONE_GWEI; // ~1988.08 ETH ~ 63 validators + // in total 145 validators + + const BUFFER = 5_552_649_867_953_000_000_001n; // ~5552.65 ETH + + const sharesDefault = new Map(); + sharesDefault.set(1, { stakeShareLimit: 10000n, priorityExitShareThreshold: 10000n }); + sharesDefault.set(2, { stakeShareLimit: 400n, priorityExitShareThreshold: 10000n }); + sharesDefault.set(3, { stakeShareLimit: 2000n, priorityExitShareThreshold: 2500n }); + sharesDefault.set(4, { stakeShareLimit: 2000n, priorityExitShareThreshold: 2500n }); + + async function setupModules( + shares: Map = sharesDefault, + ) { + // Module 1: Curated (0x01, 100% share limit, 30 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(1)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(1)!.priorityExitShareThreshold, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 30n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_1_BALANCE_GWEI, + }); + + // Module 2: SimpleDVT (0x01, 4% share limit, 0 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(2)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(2)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 0n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_2_BALANCE_GWEI, + }); + + // Module 3: Community Staking (0x01, 20% share limit, 50 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(3)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(3)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + maxDepositsPerBlock: 30n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + deposited: 50n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_3_BALANCE_GWEI, + }); + + // Module 4: curated-onchain-v2 (0x02, variable share limit, 25 deposited, 0 depositable) + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: shares.get(4)!.stakeShareLimit, + priorityExitShareThreshold: shares.get(4)!.priorityExitShareThreshold, + moduleFee: 8_00n, + treasuryFee: 2_00n, + maxDepositsPerBlock: 30n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + deposited: 25n, + exited: 0n, + depositable: 0n, + validatorsBalanceGwei: MODULE_4_BALANCE_GWEI, + totalModuleStake: MODULE_4_BALANCE_GWEI * ONE_GWEI, + }); + } + + it("Returns zero new allocation when 0x01 modules have no depositable keys and 0x02 module is at share limit", async () => { + await setupModules(); + + // allocations - is array containing new allocation per module + already allocated amount of Eth + // allocated - is a total new sum of deposits + // this test expect 0 new allocated Eth + const result = await stakingRouter.getDepositAllocations(BUFFER, true); + expect(result.totalAllocated).to.equal(0n, "totalAllocated should be 0 — no capacity in any modules"); + + // newAllocations array returns per-module new total allocations (including existing), + // verify it has an entry per module + expect(result.newAllocations.length).to.equal(4); + + const ETH32 = 32n * 10n ** 18n; + // for type2 modules: newAllocations[i] = ceilDiv(totalModuleStake, 32 ETH) * 32 ETH + const toValidatorETH = (balance: bigint) => ((balance + ETH32 - 1n) / ETH32) * ETH32; + + expect(result.newAllocations[0]).to.equal(30n * ETH32); + expect(result.newAllocations[1]).to.equal(0n); + expect(result.newAllocations[2]).to.equal(50n * ETH32); + expect(result.newAllocations[3]).to.equal(toValidatorETH(MODULE_4_BALANCE_GWEI * ONE_GWEI)); + + // all allocated deltas should be 0 + for (const a of result.allocated) { + expect(a).to.equal(0n); + } + }); + + it("Allocates to 0x02 module when buffer is large enough to push target above current allocation", async () => { + await setupModules(); + + // to make some top up in 4 module -> it should have 64 validators + // 64 * 32 = X * 32 * 20/100 -> X = 320 validators in total + // already have 143 validators (30 + 50 + 63) + // need 177 validators = 320 - 143 + // 177*32 = 5664 eth - minimum buffer + + const INCREASED_BUFFER = 5670n * 10n ** 18n; + + // Snapshot current state for comparison + const resultBefore = await stakingRouter.getDepositAllocations(BUFFER, true); + expect(resultBefore.totalAllocated).to.equal(0n, "sanity check: original buffer gives 0"); + + const result = await stakingRouter.getDepositAllocations(INCREASED_BUFFER, true); + + // Module 4 (0x02) now has capacity — new ETH is allocated + expect(result.totalAllocated).to.be.gt(32n, "totalAllocated should be > 0 with larger buffer"); + + // Modules 1-3 didn't change (0x01, no depositable keys — capacity == current) + expect(result.newAllocations[0]).to.equal(resultBefore.newAllocations[0], "module 1 unchanged"); + expect(result.newAllocations[1]).to.equal(resultBefore.newAllocations[1], "module 2 unchanged"); + expect(result.newAllocations[2]).to.equal(resultBefore.newAllocations[2], "module 3 unchanged"); + + // Module 4 grew + expect(result.newAllocations[3]).to.be.gt(resultBefore.newAllocations[3], "module 4 allocation increased"); + + // Delta for module 4 = newAllocation - currentAllocation = totalAllocated (since only module 4 grew) + const module4Delta = result.newAllocations[3] - resultBefore.newAllocations[3]; + expect(module4Delta).to.equal(result.totalAllocated, "all new allocation went to module 4"); + + // Verify allocated array reflects the same delta + expect(result.allocated[0]).to.equal(0n, "module 1 delta is 0"); + expect(result.allocated[1]).to.equal(0n, "module 2 delta is 0"); + expect(result.allocated[2]).to.equal(0n, "module 3 delta is 0"); + expect(result.allocated[3]).to.equal(module4Delta, "module 4 delta matches"); + }); + }); + + context("getDepositAllocations allocated (delta) array", () => { + it("Returns per-module deltas that sum to totalAllocated", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + + let allocatedSum = 0n; + for (const a of result.allocated) { + allocatedSum += a; + } + expect(allocatedSum).to.equal(result.totalAllocated); + }); + + it("Delta is zero for modules with no capacity", async () => { + const module1Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + const module2Config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 0n, + }; + + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + expect(result.allocated.length).to.equal(2); + expect(result.allocated[0]).to.equal(module1Config.depositable * DEFAULT_MEB); + expect(result.allocated[1]).to.equal(0n); + }); + + it("Delta reflects newly allocated amount with pre-existing deposits", async () => { + const config = { + ...DEFAULT_CONFIG, + depositable: 50n, + deposited: 100n, + }; + + await setupModule(ctx, config); + + const ethToDeposit = 50n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, false); + + // allocated[0] is the delta (new allocation) + // newAllocations[0] includes existing validators + new + expect(result.allocated[0]).to.equal(result.totalAllocated); + expect(result.newAllocations[0]).to.be.equal(150n * DEFAULT_MEB); // 100 existing + 50 new = 150 total allocation after deposit + }); + + it("Returns per-module deltas that sum to totalAllocated for top-up", async () => { + const config = { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 50n, + }; + + await setupModule(ctx, config); + await setupModule(ctx, config); + + const ethToDeposit = 200n * DEFAULT_MEB; + + const result = await stakingRouter.getDepositAllocations(ethToDeposit, true); + + let allocatedSum = 0n; + for (const a of result.allocated) { + allocatedSum += a; + } + expect(allocatedSum).to.equal(result.totalAllocated); + }); + }); +}); diff --git a/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts b/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts new file mode 100644 index 0000000000..9d78f6a744 --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.misc.test.ts @@ -0,0 +1,233 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { AccountingOracle__MockForStakingRouter, LidoLocator, StakingRouter__Harness } from "typechain-types"; + +import { certainAddress, ether, randomAddress, randomBytes32, randomWCType1 } from "lib"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +describe("StakingRouter.sol:misc", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let stakingRouterAdmin: HardhatEthersSigner; + let user: HardhatEthersSigner; + let locator: LidoLocator; + let accountingOracle: AccountingOracle__MockForStakingRouter; + let stakingRouter: StakingRouter__Harness; + let impl: StakingRouter__Harness; + + let originalState: string; + + const lido = certainAddress("test:staking-router:lido"); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + const accounting = certainAddress("test:staking-router:accounting"); + const withdrawalCredentials = randomWCType1(); + + before(async () => { + [deployer, admin, stakingRouterAdmin, user] = await ethers.getSigners(); + + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, + accountingOracle, + }); + + ({ stakingRouter, impl } = await deployStakingRouter( + { deployer, admin, user }, + { + lidoLocator: locator, + }, + )); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + context("initialize", () => { + it("Reverts if admin is zero address", async () => { + await expect(stakingRouter.initialize(ZeroAddress, withdrawalCredentials)).to.be.revertedWithCustomError( + stakingRouter, + "ZeroAddress", + ); + }); + + it("Initializes the contract version, sets up roles and variables", async () => { + await expect(stakingRouter.initialize(stakingRouterAdmin.address, withdrawalCredentials)) + .to.emit(stakingRouter, "Initialized") + .withArgs(4) + .and.to.emit(stakingRouter, "RoleGranted") + .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address) + .and.to.emit(stakingRouter, "WithdrawalCredentialsSet") + .withArgs(withdrawalCredentials, user.address); + + expect(await stakingRouter.getContractVersion()).to.equal(4); + expect(await stakingRouter.LIDO_LOCATOR()).to.equal(locator); + expect(await stakingRouter.getWithdrawalCredentials()).to.equal(withdrawalCredentials); + + // fails with InvalidInitialization error when called after initialize + await expect(stakingRouter.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + context("finalizeUpgrade_v4()", () => { + let DEFAULT_ADMIN_ROLE: string; + let STAKING_MODULE_MANAGE_ROLE: string; + let REPORT_EXITED_VALIDATORS_ROLE: string; + let REPORT_REWARDS_MINTED_ROLE: string; + let MANAGE_WITHDRAWAL_CREDENTIALS_ROLE: string; + let STAKING_MODULE_UNVETTING_ROLE: string; + let REPORT_VALIDATOR_EXITING_STATUS_ROLE: string; + let REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE: string; + let UNSAFE_SET_EXITED_VALIDATORS_ROLE: string; + let roles: string[]; + + beforeEach(async () => { + // Simulate old 0.8.9 StakingRouter state (v3): + // sets WITHDRAWAL_CREDENTIALS_POSITION, LIDO_POSITION, LAST_STAKING_MODULE_ID_POSITION, + // STAKING_MODULES_COUNT_POSITION, CONTRACT_VERSION_POSITION + await stakingRouter.testing_initializeV3(); + + // simulate old OZ v4.4 AccessControl state: admin has DEFAULT_ADMIN_ROLE and STAKING_MODULE_MANAGE_ROLE + DEFAULT_ADMIN_ROLE = await stakingRouter.DEFAULT_ADMIN_ROLE(); + STAKING_MODULE_MANAGE_ROLE = await stakingRouter.STAKING_MODULE_MANAGE_ROLE(); + // AccountingOracle + REPORT_EXITED_VALIDATORS_ROLE = await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE(); + // Accounting + REPORT_REWARDS_MINTED_ROLE = await stakingRouter.REPORT_REWARDS_MINTED_ROLE(); + + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE = await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE(); + // DSM + STAKING_MODULE_UNVETTING_ROLE = await stakingRouter.STAKING_MODULE_UNVETTING_ROLE(); + // VEBO + REPORT_VALIDATOR_EXITING_STATUS_ROLE = await stakingRouter.REPORT_VALIDATOR_EXITING_STATUS_ROLE(); + // TW + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE = await stakingRouter.REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE(); + UNSAFE_SET_EXITED_VALIDATORS_ROLE = await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE(); + + roles = [ + // DEFAULT_ADMIN_ROLE, + STAKING_MODULE_MANAGE_ROLE, + REPORT_EXITED_VALIDATORS_ROLE, + REPORT_REWARDS_MINTED_ROLE, + MANAGE_WITHDRAWAL_CREDENTIALS_ROLE, + STAKING_MODULE_UNVETTING_ROLE, + REPORT_VALIDATOR_EXITING_STATUS_ROLE, + REPORT_VALIDATOR_EXIT_TRIGGERED_ROLE, + UNSAFE_SET_EXITED_VALIDATORS_ROLE, + ]; + + await stakingRouter.testing_grantRoleOld(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address); + await stakingRouter.testing_grantRoleOld(STAKING_MODULE_MANAGE_ROLE, stakingRouterAdmin.address); + await stakingRouter.testing_grantRoleOld(REPORT_EXITED_VALIDATORS_ROLE, accountingOracle); + await stakingRouter.testing_grantRoleOld(REPORT_REWARDS_MINTED_ROLE, accounting); + + // simulate oracle report + await accountingOracle.mock_setProcessingState(1, true, true); + }); + + it("fails with InvalidInitialization error when called on implementation", async () => { + await expect(impl.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + + it("sets correct contract version, withdrawal credentials and admin role", async () => { + // OZ Initializable slot is 0 before migration (old Versioned used a different slot) + expect(await stakingRouter.getContractVersion()).to.equal(0); + // but old Versioned slot has v3 + expect(await stakingRouter.testing_getOldContractVersion()).to.equal(3); + + await expect(stakingRouter.finalizeUpgrade_v4()) + .to.emit(stakingRouter, "Initialized") + .withArgs(4) + .and.to.emit(stakingRouter, "RoleGranted") + .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address); + + // new OZ version is set + expect(await stakingRouter.getContractVersion()).to.be.equal(4); + + // data migrated correctly + expect(await stakingRouter.getWithdrawalCredentials()).to.equal(await stakingRouter.WC_01_MOCK()); + expect(await stakingRouter.testing_getLastModuleId()).to.equal(await stakingRouter.LAST_STAKING_MODULE_ID_MOCK()); + + // admin role granted + expect(await stakingRouter.hasRole(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address)).to.be + .true; + }); + + it("cleans up old storage slots after migration", async () => { + await stakingRouter.finalizeUpgrade_v4(); + + // all old unstructured storage slots should be zeroed + expect(await stakingRouter.testing_getOldLidoPosition()).to.equal(ZeroAddress); + expect(await stakingRouter.testing_getOldWcPosition()).to.equal(ethers.ZeroHash); + expect(await stakingRouter.testing_getOldContractVersion()).to.equal(0); + expect(await stakingRouter.testing_getOldLastModuleIdPosition()).to.equal(0); + expect(await stakingRouter.testing_getOldModulesCountPosition()).to.equal(0); + }); + + it("migrate all defined AccessControl role and skip undefined", async () => { + const someAccount = randomAddress(); + const someNewRole = randomBytes32(); + + for (const role of roles) { + await stakingRouter.testing_grantRoleOld(role, someAccount); + } + // grant undefined role + await stakingRouter.testing_grantRoleOld(someNewRole, someAccount); + + // old slots are populated + for (const role of roles) { + expect(await stakingRouter.testing_hasRoleOld(role, someAccount)).to.be.true; + } + expect(await stakingRouter.testing_hasRoleOld(someNewRole, someAccount)).to.be.true; + + // but new OZ 5.2 hasRole() reads from a different ERC-7201 slot — roles are invisible + expect(await stakingRouter.hasRole(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address)).to.be.false; + for (const role of roles) { + expect(await stakingRouter.hasRole(role, someAccount)).to.be.false; + } + expect(await stakingRouter.hasRole(someNewRole, someAccount)).to.be.false; + + // migration writes DEFAULT_ADMIN_ROLE to the NEW slot, but does NOT touch old slots + await stakingRouter.finalizeUpgrade_v4(); + + // after migration: all roles should be reassigned + expect(await stakingRouter.hasRole(DEFAULT_ADMIN_ROLE, stakingRouterAdmin.address)).to.be.true; + for (const role of roles) { + expect(await stakingRouter.hasRole(role, someAccount)).to.be.true; + } + // undefined role is not migrated + expect(await stakingRouter.hasRole(someNewRole, someAccount)).to.be.false; + + // old AccessControl slots are NOT cleaned up (orphaned, inaccessible by new code) + for (const role of roles) { + expect(await stakingRouter.testing_hasRoleOld(role, someAccount)).to.be.true; + } + expect(await stakingRouter.testing_hasRoleOld(someNewRole, someAccount)).to.be.true; + }); + + it("cannot be called twice", async () => { + await stakingRouter.finalizeUpgrade_v4(); + await expect(stakingRouter.finalizeUpgrade_v4()).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + context("receive", () => { + it("Reverts", async () => { + await expect( + user.sendTransaction({ + to: stakingRouter, + value: ether("1.0"), + }), + ).to.be.revertedWithCustomError(stakingRouter, "DirectETHTransfer"); + }); + }); +}); diff --git a/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts b/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts new file mode 100644 index 0000000000..ef4cc3ceaf --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.module-management.test.ts @@ -0,0 +1,679 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { LidoLocator, StakingRouter } from "typechain-types"; + +import { certainAddress, getNextBlock, randomString, randomWCType1, WithdrawalCredentialsType } from "lib"; + +import { deployLidoLocator } from "test/deploy"; + +import { deployStakingRouter } from "../../deploy/stakingRouter"; + +const UINT64_MAX = 2n ** 64n - 1n; + +describe("StakingRouter.sol:module-management", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let user: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter; + + const withdrawalCredentials = randomWCType1(); + const lido = certainAddress("test:staking-router-modules:lido"); // mock lido address + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + + beforeEach(async () => { + [deployer, admin, user] = await ethers.getSigners(); + + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, + }); + + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); + + // initialize staking router + await stakingRouter.initialize(admin, withdrawalCredentials); + + // grant roles + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + }); + + context("addStakingModule", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router:staking-module"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + const stakingModuleConfig = { + /// @notice Maximum stake share that can be allocated to a module, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%). + stakeShareLimit: STAKE_SHARE_LIMIT, + /// @notice Module's share threshold, upon crossing which, exits of validators from the module will be prioritized, in BP. + /// @dev Must be less than or equal to TOTAL_BASIS_POINTS (10_000 BP = 100%) and + /// greater than or equal to `stakeShareLimit`. + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + /// @notice Part of the fee taken from staking rewards that goes to the staking module, in BP. + /// @dev Together with `treasuryFee`, must not exceed TOTAL_BASIS_POINTS. + stakingModuleFee: MODULE_FEE, + /// @notice Part of the fee taken from staking rewards that goes to the treasury, in BP. + /// @dev Together with `stakingModuleFee`, must not exceed TOTAL_BASIS_POINTS. + treasuryFee: TREASURY_FEE, + /// @notice The maximum number of validators that can be deposited in a single block. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must not exceed type(uint64).max. + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + /// @notice The minimum distance between deposits in blocks. + /// @dev Must be harmonized with `OracleReportSanityChecker.appearedValidatorsPerDayLimit`. + /// Value must be > 0 and ≤ type(uint64).max. + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + /// @notice The type of withdrawal credentials for creation of validators. + /// @dev 1 = 0x01 withdrawals, 2 = 0x02 withdrawals. + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).addStakingModule(NAME, ADDRESS, stakingModuleConfig)) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if the target share is greater than 100%", async () => { + const STAKE_SHARE_LIMIT_OVER_100 = 100_01; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + stakeShareLimit: STAKE_SHARE_LIMIT_OVER_100, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the sum of module and treasury fees is greater than 100%", async () => { + const MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + stakingModuleFee: MODULE_FEE_INVALID, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + + const TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; + + await expect( + stakingRouter.addStakingModule(NAME, ADDRESS, { + ...stakingModuleConfig, + treasuryFee: TREASURY_FEE_INVALID, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if the staking module address is zero address", async () => { + await expect( + stakingRouter.addStakingModule(NAME, ZeroAddress, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddress"); + }); + + it("Reverts if the staking module name is empty string", async () => { + const NAME_EMPTY_STRING = ""; + + await expect( + stakingRouter.addStakingModule(NAME_EMPTY_STRING, ADDRESS, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); + }); + + it("Reverts if the staking module name is too long", async () => { + const MAX_STAKING_MODULE_NAME_LENGTH = await stakingRouter.MAX_STAKING_MODULE_NAME_LENGTH(); + const NAME_TOO_LONG = randomString(Number(MAX_STAKING_MODULE_NAME_LENGTH + 1n)); + + await expect( + stakingRouter.addStakingModule(NAME_TOO_LONG, ADDRESS, stakingModuleConfig), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); + }); + + it("Reverts if the max number of staking modules is reached", async () => { + const MAX_STAKING_MODULES_COUNT = await stakingRouter.MAX_STAKING_MODULES_COUNT(); + + const moduleConfig = { + stakeShareLimit: 100, + priorityExitShareThreshold: 100, + stakingModuleFee: 100, + treasuryFee: 100, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + for (let i = 0; i < MAX_STAKING_MODULES_COUNT; i++) { + await stakingRouter.addStakingModule( + randomString(8), + certainAddress(`test:staking-router:staking-module-${i}`), + moduleConfig, + ); + } + + expect(await stakingRouter.getStakingModulesCount()).to.equal(MAX_STAKING_MODULES_COUNT); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModulesLimitExceeded", + ); + }); + + it("Reverts if adding a module with the same address", async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModuleAddressExists", + ); + }); + + it("Reverts if the module fee sum differs from existing modules", async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + + await expect( + stakingRouter.addStakingModule("StakingModule2", certainAddress("test:staking-router:staking-module-2"), { + ...stakingModuleConfig, + stakingModuleFee: MODULE_FEE + 1n, + }), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Adds the module to stakingRouter and emits events", async () => { + const stakingModuleId = (await stakingRouter.getStakingModulesCount()) + 1n; + const moduleAddedBlock = await getNextBlock(); + + await expect(stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig)) + .to.be.emit(stakingRouter, "StakingRouterETHDeposited") + .withArgs(stakingModuleId, 0) + .and.to.be.emit(stakingRouter, "StakingModuleAdded") + .withArgs(stakingModuleId, ADDRESS, NAME, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(stakingModuleId, STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(stakingModuleId, MODULE_FEE, TREASURY_FEE, admin.address); + + expect(await stakingRouter.getStakingModule(stakingModuleId)).to.deep.equal([ + stakingModuleId, + ADDRESS, + MODULE_FEE, + TREASURY_FEE, + STAKE_SHARE_LIMIT, + 0n, // status active + NAME, + moduleAddedBlock.timestamp, + moduleAddedBlock.number, + 0n, // exited validators, + PRIORITY_EXIT_SHARE_THRESHOLD, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + WithdrawalCredentialsType.WC0x01, + 0, + ]); + }); + }); + + context("updateStakingModule", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router-modules:staking-module"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + let ID: bigint; + + const NEW_STAKE_SHARE_LIMIT = 2_00n; + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = NEW_STAKE_SHARE_LIMIT; + + const NEW_MODULE_FEE = 6_00n; + const NEW_TREASURY_FEE = 4_00n; + + const NEW_MAX_DEPOSITS_PER_BLOCK = 100n; + const NEW_MIN_DEPOSIT_BLOCK_DISTANCE = 20n; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + ID = await stakingRouter.getStakingModulesCount(); + }); + + it("Reverts if the caller does not have the role", async () => { + stakingRouter = stakingRouter.connect(user); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if the new target share is greater than 100%", async () => { + const NEW_STAKE_SHARE_LIMIT_OVER_100 = 100_01; + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT_OVER_100, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the new priority exit share is greater than 100%", async () => { + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new priority exit share is less than stake share limit", async () => { + const UPGRADED_STAKE_SHARE_LIMIT = 55_00n; + const UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00n; + await expect( + stakingRouter.updateStakingModule( + ID, + UPGRADED_STAKE_SHARE_LIMIT, + UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new deposit block distance is zero", async () => { + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + 0n, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); + }); + + it("Reverts if the new deposit block distance is great then uint64 max", async () => { + await stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + UINT64_MAX, + ); + + expect((await stakingRouter.getStakingModule(ID)).minDepositBlockDistance).to.be.equal(UINT64_MAX); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + UINT64_MAX + 1n, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); + }); + + it("Reverts if the new max deposits per block is great then uint64 max", async () => { + await stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + UINT64_MAX, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ); + + expect((await stakingRouter.getStakingModule(ID)).maxDepositsPerBlock).to.be.equal(UINT64_MAX); + + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + UINT64_MAX + 1n, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidMaxDepositPerBlockValue"); + }); + + it("Reverts if the sum of the new module and treasury fees is greater than 100%", async () => { + const NEW_MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; + + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE_INVALID, + TREASURY_FEE, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + + const NEW_TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + MODULE_FEE, + NEW_TREASURY_FEE_INVALID, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if the new fee sum differs from other modules", async () => { + await stakingRouter.addStakingModule( + "StakingModule2", + certainAddress("test:staking-router-modules:staking-module-2"), + { + ...stakingModuleConfig, + }, + ); + + await expect( + stakingRouter.updateStakingModule( + ID, + STAKE_SHARE_LIMIT, + PRIORITY_EXIT_SHARE_THRESHOLD, + MODULE_FEE + 1n, + TREASURY_FEE, + MAX_DEPOSITS_PER_BLOCK, + MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Update target share, module and treasury fees and emits events", async () => { + await expect( + stakingRouter.updateStakingModule( + ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + NEW_MODULE_FEE, + NEW_TREASURY_FEE, + NEW_MAX_DEPOSITS_PER_BLOCK, + NEW_MIN_DEPOSIT_BLOCK_DISTANCE, + ), + ) + .to.be.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(ID, NEW_MODULE_FEE, NEW_TREASURY_FEE, admin.address); + }); + }); + + context("updateModuleShares", () => { + const NAME = "StakingModule"; + const ADDRESS = certainAddress("test:staking-router-modules:staking-module-shares"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + let ID: bigint; + + const NEW_STAKE_SHARE_LIMIT = 2_00; + const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = 3_00; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(NAME, ADDRESS, stakingModuleConfig); + ID = await stakingRouter.getStakingModulesCount(); + + // grant the STAKING_MODULE_SHARE_MANAGE_ROLE to admin + await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_SHARE_MANAGE_ROLE(), admin); + }); + + it("Reverts if the caller does not have the role", async () => { + await expect( + stakingRouter.connect(user).updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD), + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_SHARE_MANAGE_ROLE()); + }); + + it("Reverts if the staking module id does not exist", async () => { + const NON_EXISTENT_MODULE_ID = 999; + await expect( + stakingRouter.updateModuleShares( + NON_EXISTENT_MODULE_ID, + NEW_STAKE_SHARE_LIMIT, + NEW_PRIORITY_EXIT_SHARE_THRESHOLD, + ), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleUnregistered"); + }); + + it("Reverts if the new stake share limit is greater than 100%", async () => { + const STAKE_SHARE_LIMIT_OVER_100 = 100_01; + await expect( + stakingRouter.updateModuleShares(ID, STAKE_SHARE_LIMIT_OVER_100, STAKE_SHARE_LIMIT_OVER_100), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); + }); + + it("Reverts if the new priority exit share threshold is greater than 100%", async () => { + const PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; + await expect( + stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Reverts if the new priority exit share threshold is less than stake share limit", async () => { + const HIGHER_STAKE_SHARE_LIMIT = 55_00; + const LOWER_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00; + await expect( + stakingRouter.updateModuleShares(ID, HIGHER_STAKE_SHARE_LIMIT, LOWER_PRIORITY_EXIT_SHARE_THRESHOLD), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); + }); + + it("Updates share params and emits StakingModuleShareLimitSet event", async () => { + await expect(stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(NEW_STAKE_SHARE_LIMIT); + expect(moduleAfter.priorityExitShareThreshold).to.equal(NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + }); + + it("Does not modify other module params (fees, deposits config)", async () => { + const moduleBefore = await stakingRouter.getStakingModule(ID); + + await stakingRouter.updateModuleShares(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + + // share params should change + expect(moduleAfter.stakeShareLimit).to.equal(NEW_STAKE_SHARE_LIMIT); + expect(moduleAfter.priorityExitShareThreshold).to.equal(NEW_PRIORITY_EXIT_SHARE_THRESHOLD); + + // other params should remain unchanged + expect(moduleAfter.stakingModuleFee).to.equal(moduleBefore.stakingModuleFee); + expect(moduleAfter.treasuryFee).to.equal(moduleBefore.treasuryFee); + expect(moduleAfter.stakingModuleAddress).to.equal(moduleBefore.stakingModuleAddress); + expect(moduleAfter.maxDepositsPerBlock).to.equal(moduleBefore.maxDepositsPerBlock); + expect(moduleAfter.minDepositBlockDistance).to.equal(moduleBefore.minDepositBlockDistance); + }); + + it("Allows setting stake share limit and priority exit share threshold to the same value", async () => { + const SAME_VALUE = 50_00; + await expect(stakingRouter.updateModuleShares(ID, SAME_VALUE, SAME_VALUE)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, SAME_VALUE, SAME_VALUE, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(SAME_VALUE); + expect(moduleAfter.priorityExitShareThreshold).to.equal(SAME_VALUE); + }); + + it("Allows setting both values to zero", async () => { + await expect(stakingRouter.updateModuleShares(ID, 0, 0)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, 0, 0, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(0); + expect(moduleAfter.priorityExitShareThreshold).to.equal(0); + }); + + it("Allows setting both values to 100%", async () => { + const MAX_BP = 100_00; + await expect(stakingRouter.updateModuleShares(ID, MAX_BP, MAX_BP)) + .to.emit(stakingRouter, "StakingModuleShareLimitSet") + .withArgs(ID, MAX_BP, MAX_BP, admin.address); + + const moduleAfter = await stakingRouter.getStakingModule(ID); + expect(moduleAfter.stakeShareLimit).to.equal(MAX_BP); + expect(moduleAfter.priorityExitShareThreshold).to.equal(MAX_BP); + }); + }); + + context("updateAllStakingModulesFees", () => { + const MODULE_ONE_NAME = "StakingModule1"; + const MODULE_TWO_NAME = "StakingModule2"; + const MODULE_ONE_ADDRESS = certainAddress("test:staking-router-modules:staking-module-batch-1"); + const MODULE_TWO_ADDRESS = certainAddress("test:staking-router-modules:staking-module-batch-2"); + const STAKE_SHARE_LIMIT = 1_00n; + const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; + const MODULE_FEE = 5_00n; + const TREASURY_FEE = 5_00n; + const MAX_DEPOSITS_PER_BLOCK = 150n; + const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; + + const stakingModuleConfig = { + stakeShareLimit: STAKE_SHARE_LIMIT, + priorityExitShareThreshold: PRIORITY_EXIT_SHARE_THRESHOLD, + stakingModuleFee: MODULE_FEE, + treasuryFee: TREASURY_FEE, + maxDepositsPerBlock: MAX_DEPOSITS_PER_BLOCK, + minDepositBlockDistance: MIN_DEPOSIT_BLOCK_DISTANCE, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + beforeEach(async () => { + await stakingRouter.addStakingModule(MODULE_ONE_NAME, MODULE_ONE_ADDRESS, stakingModuleConfig); + await stakingRouter.addStakingModule(MODULE_TWO_NAME, MODULE_TWO_ADDRESS, stakingModuleConfig); + }); + + it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 3_00n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + }); + + it("Reverts if batch arrays length differs from modules count", async () => { + await expect(stakingRouter.updateAllStakingModulesFees([6_00n], [4_00n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); + + await expect(stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); + }); + + it("Reverts if any fee sum is greater than 100%", async () => { + await expect( + stakingRouter.updateAllStakingModulesFees([100_01n, 7_00n], [0n, 3_00n]), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); + }); + + it("Reverts if fee sums differ inside the batch", async () => { + await expect( + stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 4_00n]), + ).to.be.revertedWithCustomError(stakingRouter, "InconsistentFeeSum"); + }); + + it("Updates fees for all modules atomically and emits events", async () => { + await expect(stakingRouter.updateAllStakingModulesFees([6_00n, 7_00n], [4_00n, 3_00n])) + .to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(1n, 6_00n, 4_00n, admin.address) + .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") + .withArgs(2n, 7_00n, 3_00n, admin.address); + + const moduleOne = await stakingRouter.getStakingModule(1n); + expect(moduleOne.stakingModuleFee).to.equal(6_00n); + expect(moduleOne.treasuryFee).to.equal(4_00n); + expect(moduleOne.stakeShareLimit).to.equal(STAKE_SHARE_LIMIT); + expect(moduleOne.maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + expect(moduleOne.minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); + + const moduleTwo = await stakingRouter.getStakingModule(2n); + expect(moduleTwo.stakingModuleFee).to.equal(7_00n); + expect(moduleTwo.treasuryFee).to.equal(3_00n); + expect(moduleTwo.stakeShareLimit).to.equal(STAKE_SHARE_LIMIT); + expect(moduleTwo.maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + expect(moduleTwo.minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); + }); + }); +}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts b/test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts similarity index 71% rename from test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts index 85a4a3015d..590cb486ae 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.module-sync.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.module-sync.test.ts @@ -1,29 +1,46 @@ import { bigintToHex, bufToHex } from "bigint-conversion"; import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { + AccountingOracle__MockForStakingRouter, DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, StakingModule__MockForStakingRouter, - StakingRouter, + StakingRouter__Harness, } from "typechain-types"; +import { ValidatorsCountsCorrectionStruct } from "typechain-types/contracts/0.8.25/sr/StakingRouter"; -import { ether, getNextBlock, proxify } from "lib"; - +import { + ether, + getNextBlock, + impersonate, + randomString, + randomWCType1, + StakingModuleStatus, + wcTypeMaxEB, + WithdrawalCredentialsType, +} from "lib"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; import { Snapshot } from "test/suite"; describe("StakingRouter.sol:module-sync", () => { let deployer: HardhatEthersSigner; let admin: HardhatEthersSigner; let user: HardhatEthersSigner; - let lido: HardhatEthersSigner; + let dsmSigner: HardhatEthersSigner; - let stakingRouter: StakingRouter; + let stakingRouter: StakingRouter__Harness; let stakingModule: StakingModule__MockForStakingRouter; let depositContract: DepositContract__MockForBeaconChainDepositor; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let locator: LidoLocator; + let lidoMock: Lido__MockForStakingRouter; let moduleId: bigint; let stakingModuleAddress: string; @@ -34,34 +51,45 @@ describe("StakingRouter.sol:module-sync", () => { const name = "myStakingModule"; const stakingModuleFee = 5_00n; const treasuryFee = 5_00n; - const stakeShareLimit = 1_00n; - const priorityExitShareThreshold = 2_00n; + const stakeShareLimit = 100_00n; + const priorityExitShareThreshold = 100_00n; const maxDepositsPerBlock = 150n; const minDepositBlockDistance = 25n; + const withdrawalCredentials = randomWCType1(); + const topUpGateway = "0x0000000000000000000000000000000000000001"; + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + let originalState: string; before(async () => { - [deployer, admin, user, lido] = await ethers.getSigners(); + [deployer, admin, user] = await ethers.getSigners(); + + // Deploy Lido mock + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: lidoMock, + topUpGateway, + depositSecurityModule, + accountingOracle, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); + ({ stakingRouter, depositContract } = await deployStakingRouter( + { deployer, admin }, + { lidoLocator: locator, lido: lidoMock }, + )); - [stakingRouter] = await proxify({ impl, admin }); + // initialize staking router with Lido mock + await stakingRouter.initialize(admin, withdrawalCredentials); - // initialize staking router - await stakingRouter.initialize( - admin, - lido, - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + // Set staking router address on Lido mock so it can send ETH + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + + // Get DSM signer for deposit tests + dsmSigner = await impersonate(depositSecurityModule, ether("10.0")); // grant roles @@ -81,16 +109,17 @@ describe("StakingRouter.sol:module-sync", () => { lastDepositAt = timestamp; lastDepositBlock = number; - await stakingRouter.addStakingModule( - name, - stakingModuleAddress, + const stakingModuleConfig = { stakeShareLimit, priorityExitShareThreshold, stakingModuleFee, treasuryFee, maxDepositsPerBlock, minDepositBlockDistance, - ); + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + + await stakingRouter.addStakingModule(name, stakingModuleAddress, stakingModuleConfig); moduleId = await stakingRouter.getStakingModulesCount(); }); @@ -114,24 +143,39 @@ describe("StakingRouter.sol:module-sync", () => { bigint, bigint, bigint, + number, + bigint, ]; // module mock state + const exitedValidators = 100n; + const depositedValidators = 1000n; + const depositableValidators = 200n; const stakingModuleSummary: Parameters = [ - 100n, // exitedValidators - 1000, // depositedValidators - 200, // depositableValidators + exitedValidators, // exitedValidators + depositedValidators, // depositedValidators + depositableValidators, // depositableValidators + ]; + + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + depositedValidators - exitedValidators, + ); + const stakingModuleAccounting: Parameters = [ + 0n, // moduleId + balance, // effectiveBalanceGwei + exitedValidators, // exitedValidators ]; const nodeOperatorSummary: Parameters = [ - 1, // targetLimitMode - 100n, // targetValidatorsCount + 0, // targetLimitMode + 0n, // targetValidatorsCount 0n, // stuckValidatorsCount 0n, // refundedValidatorsCount 0n, // stuckPenaltyEndTimestamp - 50, // totalExitedValidators - 1000n, // totalDepositedValidators - 200n, // depositableValidatorsCount + exitedValidators, // totalExitedValidators + depositedValidators, // totalDepositedValidators + depositableValidators, // depositableValidatorsCount ]; const nodeOperatorsCounts: Parameters = [ @@ -148,18 +192,22 @@ describe("StakingRouter.sol:module-sync", () => { stakingModuleFee, treasuryFee, stakeShareLimit, - Status.Active, + StakingModuleStatus.Active, name, lastDepositAt, lastDepositBlock, - 0n, // exitedValidatorsCount, + exitedValidators, priorityExitShareThreshold, maxDepositsPerBlock, minDepositBlockDistance, + WithdrawalCredentialsType.WC0x01, + balance, ]; // mocking module state await stakingModule.mock__getStakingModuleSummary(...stakingModuleSummary); + stakingModuleAccounting[0] = moduleId; + await stakingRouter.testing_setStakingModuleAccounting(...stakingModuleAccounting); await stakingModule.mock__getNodeOperatorSummary(...nodeOperatorSummary); await stakingModule.mock__nodeOperatorsCount(...nodeOperatorsCounts); await stakingModule.mock__getNodeOperatorIds(nodeOperatorsIds); @@ -289,10 +337,10 @@ describe("StakingRouter.sol:module-sync", () => { context("getStakingModuleActiveValidatorsCount", () => { it("Returns the number of active validators in the module", async () => { - const [exitedValidators, depositedValidators] = stakingModuleSummary; + const [exited, deposited] = stakingModuleSummary; expect(await stakingRouter.getStakingModuleActiveValidatorsCount(moduleId)).to.equal( - Number(depositedValidators) - Number(exitedValidators), + Number(deposited) - Number(exited), ); }); }); @@ -300,13 +348,19 @@ describe("StakingRouter.sol:module-sync", () => { context("setWithdrawalCredentials", () => { it("Reverts if the caller does not have the role", async () => { + await expect(stakingRouter.connect(user).setWithdrawalCredentials(randomWCType1())) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE()); + }); + + it("Reverts if withdrawal credentials are empty", async () => { await expect( - stakingRouter.connect(user).setWithdrawalCredentials(hexlify(randomBytes(32))), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.MANAGE_WITHDRAWAL_CREDENTIALS_ROLE()); + stakingRouter.connect(admin).setWithdrawalCredentials(bigintToHex(0n, true, 32)), + ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddress"); }); it("Set new withdrawal credentials and informs modules", async () => { - const newWithdrawalCredentials = hexlify(randomBytes(32)); + const newWithdrawalCredentials = randomWCType1(); await expect(stakingRouter.setWithdrawalCredentials(newWithdrawalCredentials)) .to.emit(stakingRouter, "WithdrawalCredentialsSet") @@ -326,7 +380,7 @@ describe("StakingRouter.sol:module-sync", () => { "72657665727420726561736f6e00000000000000000000000000000000000000", ].join(""); - await expect(stakingRouter.setWithdrawalCredentials(hexlify(randomBytes(32)))) + await expect(stakingRouter.setWithdrawalCredentials(randomWCType1())) .to.emit(stakingRouter, "WithdrawalsCredentialsChangeFailed") .withArgs(moduleId, revertReasonEncoded); }); @@ -335,7 +389,7 @@ describe("StakingRouter.sol:module-sync", () => { const shouldRunOutOfGas = true; await stakingModule.mock__onWithdrawalCredentialsChanged(false, shouldRunOutOfGas); - await expect(stakingRouter.setWithdrawalCredentials(hexlify(randomBytes(32)))).to.be.revertedWithCustomError( + await expect(stakingRouter.setWithdrawalCredentials(randomWCType1())).to.be.revertedWithCustomError( stakingRouter, "UnrecoverableModuleError", ); @@ -352,7 +406,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .updateTargetValidatorsLimits(moduleId, NODE_OPERATOR_ID, TARGET_LIMIT_MODE, TARGET_LIMIT), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); }); it("Redirects the call to the staking module", async () => { @@ -366,15 +422,16 @@ describe("StakingRouter.sol:module-sync", () => { context("reportRewardsMinted", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).reportRewardsMinted([moduleId], [0n]), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_REWARDS_MINTED_ROLE()); + await expect(stakingRouter.connect(user).reportRewardsMinted([moduleId], [0n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_REWARDS_MINTED_ROLE()); }); it("Reverts if the arrays have different lengths", async () => { - await expect(stakingRouter.reportRewardsMinted([moduleId], [0n, 1n])) - .to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch") - .withArgs(1n, 2n); + await expect(stakingRouter.reportRewardsMinted([moduleId], [0n, 1n])).to.be.revertedWithCustomError( + stakingRouter, + "ArraysLengthMismatch", + ); }); it("Does nothing if the total shares is 0", async () => { @@ -425,17 +482,60 @@ describe("StakingRouter.sol:module-sync", () => { }); }); + context("validateReportValidatorBalancesByStakingModule", () => { + it("reverts if the report does not include all registered modules", async () => { + const secondStakingModule = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await stakingRouter.addStakingModule(name + "-2", await secondStakingModule.getAddress(), { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + await expect( + stakingRouter.validateReportValidatorBalancesByStakingModule([moduleId], [1n]), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); + }); + + it("reverts if the report module ids are not in router order", async () => { + const secondStakingModule = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); + await stakingRouter.addStakingModule(name + "-2", await secondStakingModule.getAddress(), { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + const secondModuleId = await stakingRouter.getStakingModulesCount(); + + await expect(stakingRouter.validateReportValidatorBalancesByStakingModule([secondModuleId, moduleId], [1n, 2n])) + .to.be.revertedWithCustomError(stakingRouter, "UnexpectedModuleId") + .withArgs(moduleId, secondModuleId); + }); + + it("reverts if a reported balance exceeds the allowed gwei range", async () => { + await expect( + stakingRouter.validateReportValidatorBalancesByStakingModule([moduleId], [10n ** 27n]), + ).to.be.revertedWithCustomError(stakingRouter, "InvalidAmountGwei"); + }); + }); + context("updateExitedValidatorsCountByStakingModule", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).updateExitedValidatorsCountByStakingModule([moduleId], [0n]), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + await expect(stakingRouter.connect(user).updateExitedValidatorsCountByStakingModule([moduleId], [0n])) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the array lengths are different", async () => { - await expect(stakingRouter.updateExitedValidatorsCountByStakingModule([moduleId], [0n, 1n])) - .to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch") - .withArgs(1n, 2n); + await expect( + stakingRouter.updateExitedValidatorsCountByStakingModule([moduleId], [0n, 1n]), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); }); it("Reverts if the new number of exited validators is less than the previous one", async () => { @@ -531,7 +631,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .reportStakingModuleExitedValidatorsCountByNodeOperator(moduleId, NODE_OPERATOR_IDS, VALIDATORS_COUNTS), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the node operators ids are packed incorrectly", async () => { @@ -619,8 +721,8 @@ describe("StakingRouter.sol:module-sync", () => { }; const operatorSummary = { - targetLimitMode: 1, - targetValidatorsCount: 100n, + targetLimitMode: 0, + targetValidatorsCount: 0n, stuckValidatorsCount: 0n, refundedValidatorsCount: 0n, stuckPenaltyEndTimestamp: 0n, @@ -629,7 +731,7 @@ describe("StakingRouter.sol:module-sync", () => { depositableValidatorsCount: 1n, }; - const correction: StakingRouter.ValidatorsCountsCorrectionStruct = { + const correction: ValidatorsCountsCorrectionStruct = { currentModuleExitedValidatorsCount: moduleSummary.totalExitedValidators, currentNodeOperatorExitedValidatorsCount: operatorSummary.totalExitedValidators, newModuleExitedValidatorsCount: moduleSummary.totalExitedValidators, @@ -642,6 +744,11 @@ describe("StakingRouter.sol:module-sync", () => { moduleSummary.totalDepositedValidators, moduleSummary.depositableValidatorsCount, ); + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + moduleSummary.totalDepositedValidators - moduleSummary.totalExitedValidators, + ); + await stakingRouter.testing_setStakingModuleAccounting(moduleId, balance, moduleSummary.totalExitedValidators); const nodeOperatorSummary: Parameters = [ operatorSummary.targetLimitMode, @@ -662,7 +769,9 @@ describe("StakingRouter.sol:module-sync", () => { it("Reverts if the caller does not have the role", async () => { await expect( stakingRouter.connect(user).unsafeSetExitedValidatorsCount(moduleId, nodeOperatorId, true, correction), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.UNSAFE_SET_EXITED_VALIDATORS_ROLE()); }); it("Reverts if the number of exited validators in the module does not match what is stored on the contract", async () => { @@ -731,9 +840,9 @@ describe("StakingRouter.sol:module-sync", () => { context("onValidatorsCountsByNodeOperatorReportingFinished", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).onValidatorsCountsByNodeOperatorReportingFinished(), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); + await expect(stakingRouter.connect(user).onValidatorsCountsByNodeOperatorReportingFinished()) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.REPORT_EXITED_VALIDATORS_ROLE()); }); it("Calls the hook on the staking module", async () => { @@ -789,7 +898,9 @@ describe("StakingRouter.sol:module-sync", () => { stakingRouter .connect(user) .decreaseStakingModuleVettedKeysCountByNodeOperator(moduleId, NODE_OPERATOR_IDS, VETTED_KEYS_COUNTS), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_UNVETTING_ROLE()); + ) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_UNVETTING_ROLE()); }); it("Reverts if the node operators ids are packed incorrectly", async () => { @@ -869,69 +980,105 @@ describe("StakingRouter.sol:module-sync", () => { context("deposit", () => { beforeEach(async () => { - stakingRouter = stakingRouter.connect(lido); - }); - - it("Reverts if the caller is not Lido", async () => { - await expect(stakingRouter.connect(user).deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( - stakingRouter, - "AppAuthLidoFailed", + // Set up Lido mock with depositable ether and fund it + const depositableAmount = ether("320.0"); // Enough for 10 deposits + await lidoMock.setDepositableEther(depositableAmount); + await lidoMock.fund({ value: depositableAmount }); + + // Set up staking module with depositable validators + await stakingModule.mock__getStakingModuleSummary(0n, 100n, 10n); // 10 depositable validators + const balance = _getBalanceByValidatorsCount( + WithdrawalCredentialsType.WC0x01, + 100n, // active validators ); + await stakingRouter.testing_setStakingModuleAccounting(moduleId, balance, 0); }); - it("Reverts if withdrawal credentials are not set", async () => { - await stakingRouter.connect(admin).setWithdrawalCredentials(bigintToHex(0n, true, 32)); - - await expect(stakingRouter.deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( + it("Reverts if the caller is not DSM", async () => { + await expect(stakingRouter.connect(user).deposit(moduleId, "0x")).to.be.revertedWithCustomError( stakingRouter, - "EmptyWithdrawalsCredentials", + "NotAuthorized", ); }); it("Reverts if the staking module is not active", async () => { - await stakingRouter.connect(admin).setStakingModuleStatus(moduleId, Status.DepositsPaused); + await stakingRouter.connect(admin).setStakingModuleStatus(moduleId, StakingModuleStatus.DepositsPaused); - await expect(stakingRouter.deposit(100n, moduleId, "0x")).to.be.revertedWithCustomError( + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( stakingRouter, "StakingModuleNotActive", ); }); - it("Reverts if ether does correspond to the number of deposits", async () => { - const deposits = 2n; - const depositValue = ether("32.0"); - const correctAmount = deposits * depositValue; - const etherToSend = correctAmount + 1n; + it("Revert when 0 deposits", async () => { + // Set depositable ether to 0 + await lidoMock.setDepositableEther(0n); + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( + stakingRouter, + "ZeroDeposits", + ); + }); - await expect( - stakingRouter.deposit(deposits, moduleId, "0x", { - value: etherToSend, - }), - ) - .to.be.revertedWithCustomError(stakingRouter, "InvalidDepositsValue") - .withArgs(etherToSend, deposits); + it("Successfully deposits when depositable ether is available", async () => { + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.emit( + depositContract, + "Deposited__MockEvent", + ); }); - it("Does not submit 0 deposits", async () => { - await expect(stakingRouter.deposit(0n, moduleId, "0x")).not.to.emit(depositContract, "Deposited__MockEvent"); + it("Successfully deposits for module type 0x02 (New)", async () => { + const stakingRouterAsAdmin = stakingRouter.connect(admin); + + const newStakingModule = await ethers.deployContract("StakingModuleV2__MockForStakingRouter", deployer); + const newStakingModuleAddress = await newStakingModule.getAddress(); + const withdrawalCredentialsType = WithdrawalCredentialsType.WC0x02; + const stakingModuleConfigNew = { + stakeShareLimit, + priorityExitShareThreshold, + stakingModuleFee, + treasuryFee, + maxDepositsPerBlock, + minDepositBlockDistance, + withdrawalCredentialsType, + }; + + await stakingRouterAsAdmin.addStakingModule(`${name}-new`, newStakingModuleAddress, stakingModuleConfigNew); + + const newModuleId = await stakingRouter.getStakingModulesCount(); + + // Set up the new module with depositable validators + const exitedValidators = 0n; + const depositedValidators = 0n; + const depositableValidators = 10n; + await newStakingModule.mock__getStakingModuleSummary( + exitedValidators, + depositedValidators, + depositableValidators, + ); // 10 depositable validators + const validatorsBalanceGwei = _getBalanceByValidatorsCount(withdrawalCredentialsType, depositedValidators); + await stakingRouter.testing_setStakingModuleAccounting(newModuleId, validatorsBalanceGwei, exitedValidators); + + await expect(stakingRouter.connect(dsmSigner).deposit(newModuleId, "0x")).to.emit( + depositContract, + "Deposited__MockEvent", + ); }); - it("Reverts if ether does correspond to the number of deposits", async () => { - const deposits = 2n; - const depositValue = ether("32.0"); - const correctAmount = deposits * depositValue; + it("Reverts if module returns pubkeys with invalid length (not divisible by 48)", async () => { + // Mock the module to return pubkeys with invalid length (47 bytes instead of 48) + const invalidPubkeys = randomString(47); // Not divisible by PUBKEY_LENGTH (48) + const signatures = randomString(96); // Valid signature length - await expect( - stakingRouter.deposit(deposits, moduleId, "0x", { - value: correctAmount, - }), - ).to.emit(depositContract, "Deposited__MockEvent"); + await stakingModule.mock__obtainDepositData(invalidPubkeys, signatures); + + await expect(stakingRouter.connect(dsmSigner).deposit(moduleId, "0x")).to.be.revertedWithCustomError( + stakingRouter, + "WrongPubkeyLength", + ); }); }); }); -enum Status { - Active, - DepositsPaused, - Stopped, +function _getBalanceByValidatorsCount(wcType: WithdrawalCredentialsType, validatorsCount: bigint): bigint { + return (validatorsCount * wcTypeMaxEB(wcType)) / 1_000_000_000n; // in gwei } diff --git a/test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts b/test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts similarity index 63% rename from test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts index 04a60586c0..45099066c8 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.rewards.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.rewards.test.ts @@ -1,59 +1,62 @@ import { expect } from "chai"; -import { hexlify, randomBytes } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { StakingModule__MockForStakingRouter, StakingRouter } from "typechain-types"; +import { LidoLocator, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, ether, proxify } from "lib"; -import { TOTAL_BASIS_POINTS } from "lib/constants"; +import { certainAddress, ether, randomWCType1 } from "lib"; +import { StakingModuleStatus, WithdrawalCredentialsType } from "lib/constants"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; + +import { CtxConfig, DEFAULT_CONFIG, setupModule } from "./helpers"; + describe("StakingRouter.sol:rewards", () => { let deployer: HardhatEthersSigner; let admin: HardhatEthersSigner; - let stakingRouter: StakingRouter; + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; let originalState: string; + let ctx: CtxConfig; + const DEPOSIT_VALUE = ether("32.0"); - const DEFAULT_CONFIG: ModuleConfig = { - stakeShareLimit: TOTAL_BASIS_POINTS, - priorityExitShareThreshold: TOTAL_BASIS_POINTS, - moduleFee: 5_00n, - treasuryFee: 5_00n, - maxDepositsPerBlock: 150n, - minDepositBlockDistance: 25n, - }; + + const withdrawalCredentials = randomWCType1(); + const lido = certainAddress("test:staking-router-modules:lido"); // mock lido address + + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); before(async () => { [deployer, admin] = await ethers.getSigners(); - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin }); + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); // initialize staking router - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-modules:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + await stakingRouter.initialize(admin, withdrawalCredentials); // grant roles await Promise.all([stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin)]); + + ctx = { + deployer, + admin, + stakingRouter, + }; }); beforeEach(async () => (originalState = await Snapshot.take())); @@ -76,110 +79,61 @@ describe("StakingRouter.sol:rewards", () => { depositable: 100n, }; - const [, id] = await setupModule(config); + const [, id] = await setupModule(ctx, config); expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, maxDeposits * DEPOSIT_VALUE)).to.equal( config.depositable, ); }); - it("Returns even allocation between modules if target shares are equal and capacities allow for that", async () => { - const maxDeposits = 200n; + it("Returns the maximum allocation to a single module based on the value and module capacity for new module", async () => { + const maxDeposits = 150n; const config = { ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - depositable: 50n, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, }; - const [, id1] = await setupModule(config); - const [, id2] = await setupModule(config); + const [, id] = await setupModule(ctx, config); - expect(await stakingRouter.getStakingModuleMaxDepositsCount(id1, maxDeposits * DEPOSIT_VALUE)).to.equal( - config.depositable, - ); - expect(await stakingRouter.getStakingModuleMaxDepositsCount(id2, maxDeposits * DEPOSIT_VALUE)).to.equal( + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, maxDeposits * DEPOSIT_VALUE)).to.equal( config.depositable, ); }); - }); - - context("getDepositsAllocation", () => { - it("Returns 0 allocated and empty allocations when there are no modules registered", async () => { - expect(await stakingRouter.getDepositsAllocation(100n)).to.deep.equal([0, []]); - }); - - it("Returns all allocations to a single module if there is only one", async () => { - const config = { - ...DEFAULT_CONFIG, - depositable: 100n, - }; - - await setupModule(config); - expect(await stakingRouter.getDepositsAllocation(150n)).to.deep.equal([config.depositable, [config.depositable]]); - }); + it("Returns the maximum allocation based on the value and module capacity if one module on pause", async () => { + const depositableEther = ether("32") * 100n + 10n; - it("Allocates evenly if target shares are equal and capacities allow for that", async () => { const config = { ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, - depositable: 50n, + depositable: 150n, }; - await setupModule(config); - await setupModule(config); + const [, id] = await setupModule(ctx, config); + await setupModule(ctx, { ...config, status: StakingModuleStatus.DepositsPaused }); - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([ - config.depositable * 2n, - [config.depositable, config.depositable], - ]); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id, depositableEther)).to.equal(100n); }); - it("Allocates according to capacities at equal target shares", async () => { - const module1Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, - depositable: 100n, - }; + it("Returns even allocation between modules if target shares are equal and capacities allow for that", async () => { + const maxDeposits = 200n; - const module2Config = { + const config = { ...DEFAULT_CONFIG, stakeShareLimit: 50_00n, - priorityExitShareThreshold: 50_00n, depositable: 50n, }; - await setupModule(module1Config); - await setupModule(module2Config); - - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([ - module1Config.depositable + module2Config.depositable, - [module1Config.depositable, module2Config.depositable], - ]); - }); - - it("Allocates according to target shares", async () => { - const module1Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 60_00n, - priorityExitShareThreshold: 60_00n, - depositable: 100n, - }; - - const module2Config = { - ...DEFAULT_CONFIG, - stakeShareLimit: 40_00n, - priorityExitShareThreshold: 40_00n, - depositable: 100n, - }; - - await setupModule(module1Config); - await setupModule(module2Config); + const [, id1] = await setupModule(ctx, config); + const [, id2] = await setupModule(ctx, config); - expect(await stakingRouter.getDepositsAllocation(200n)).to.deep.equal([180n, [100n, 80n]]); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id1, maxDeposits * DEPOSIT_VALUE)).to.equal( + config.depositable, + ); + expect(await stakingRouter.getStakingModuleMaxDepositsCount(id2, maxDeposits * DEPOSIT_VALUE)).to.equal( + config.depositable, + ); }); }); @@ -195,7 +149,7 @@ describe("StakingRouter.sol:rewards", () => { }); it("Returns empty values if there are modules but no active validators", async () => { - await setupModule(DEFAULT_CONFIG); + await setupModule(ctx, DEFAULT_CONFIG); expect(await stakingRouter.getStakingRewardsDistribution()).to.deep.equal([ [], @@ -212,7 +166,7 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module, id] = await setupModule(config); + const [module, id] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -235,8 +189,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module1, id1] = await setupModule(config); - const [module2, id2] = await setupModule(config); + const [module1, id1] = await setupModule(ctx, config); + const [module2, id2] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -270,8 +224,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 0n, }; - const [module1, id1] = await setupModule(module1Config); - await setupModule(module2Config); + const [module1, id1] = await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -293,10 +247,10 @@ describe("StakingRouter.sol:rewards", () => { const config = { ...DEFAULT_CONFIG, deposited: 1000n, - status: Status.Stopped, + status: StakingModuleStatus.Stopped, }; - const [module, id] = await setupModule(config); + const [module, id] = await setupModule(ctx, config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -330,8 +284,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - const [module1, id1] = await setupModule(module1Config); - const [module2, id2] = await setupModule(module2Config); + const [module1, id1] = await setupModule(ctx, module1Config); + const [module2, id2] = await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); const basisPoints = await stakingRouter.TOTAL_BASIS_POINTS(); @@ -381,8 +335,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); - await setupModule(module2Config); + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); const precision = await stakingRouter.FEE_PRECISION_POINTS(); @@ -418,8 +372,8 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); - await setupModule(module2Config); + await setupModule(ctx, module1Config); + await setupModule(ctx, module2Config); expect(await stakingRouter.getStakingFeeAggregateDistributionE4Precision()).to.deep.equal([500n, 500n]); }); @@ -440,68 +394,9 @@ describe("StakingRouter.sol:rewards", () => { deposited: 1000n, }; - await setupModule(module1Config); + await setupModule(ctx, module1Config); expect(await stakingRouter.getTotalFeeE4Precision()).to.equal(10_00n); }); }); - - async function setupModule({ - stakeShareLimit, - priorityExitShareThreshold, - moduleFee, - treasuryFee, - maxDepositsPerBlock, - minDepositBlockDistance, - exited = 0n, - deposited = 0n, - depositable = 0n, - status = Status.Active, - }: ModuleConfig): Promise<[StakingModule__MockForStakingRouter, bigint]> { - const modulesCount = await stakingRouter.getStakingModulesCount(); - const module = await ethers.deployContract("StakingModule__MockForStakingRouter", deployer); - - await stakingRouter - .connect(admin) - .addStakingModule( - randomBytes(8).toString(), - await module.getAddress(), - stakeShareLimit, - priorityExitShareThreshold, - moduleFee, - treasuryFee, - maxDepositsPerBlock, - minDepositBlockDistance, - ); - - const moduleId = modulesCount + 1n; - expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount + 1n); - - await module.mock__getStakingModuleSummary(exited, deposited, depositable); - - if (status != Status.Active) { - await stakingRouter.setStakingModuleStatus(moduleId, status); - } - - return [module, moduleId]; - } }); - -enum Status { - Active, - DepositsPaused, - Stopped, -} - -interface ModuleConfig { - stakeShareLimit: bigint; - priorityExitShareThreshold: bigint; - moduleFee: bigint; - treasuryFee: bigint; - maxDepositsPerBlock: bigint; - minDepositBlockDistance: bigint; - exited?: bigint; - deposited?: bigint; - depositable?: bigint; - status?: Status; -} diff --git a/test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts b/test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts similarity index 75% rename from test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts rename to test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts index a023e4410a..220b596028 100644 --- a/test/0.8.9/stakingRouter/stakingRouter.status-control.test.ts +++ b/test/0.8.25/stakingRouter/stakingRouter.status-control.test.ts @@ -1,16 +1,16 @@ import { expect } from "chai"; -import { randomBytes } from "crypto"; -import { hexlify } from "ethers"; import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { StakingRouter__Harness } from "typechain-types"; +import { LidoLocator, StakingRouter__Harness } from "typechain-types"; -import { certainAddress, proxify } from "lib"; +import { certainAddress, randomWCType1, WithdrawalCredentialsType } from "lib"; +import { deployLidoLocator } from "test/deploy"; import { Snapshot } from "test/suite"; +import { deployStakingRouter } from "../../deploy/stakingRouter"; enum Status { Active, DepositsPaused, @@ -22,46 +22,49 @@ context("StakingRouter.sol:status-control", () => { let admin: HardhatEthersSigner; let user: HardhatEthersSigner; + let locator: LidoLocator; let stakingRouter: StakingRouter__Harness; let moduleId: bigint; let originalState: string; + const lido = certainAddress("test:staking-router-status:lido"); + const withdrawalCredentials = randomWCType1(); + const topUpGateway = certainAddress("test:staking-router:topUpGateway"); + const depositSecurityModule = certainAddress("test:staking-router:depositSecurityModule"); + before(async () => { [deployer, admin, user] = await ethers.getSigners(); - // deploy staking router - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, + locator = await deployLidoLocator({ + lido, + topUpGateway, + depositSecurityModule, }); - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); + // deploy staking router + ({ stakingRouter } = await deployStakingRouter({ deployer, admin }, { lidoLocator: locator })); - [stakingRouter] = await proxify({ impl, admin }); - - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-status:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); + await stakingRouter.initialize(admin, withdrawalCredentials); // give the necessary role to the admin await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); + const stakingModuleConfig = { + stakeShareLimit: 1_00, + priorityExitShareThreshold: 1_00, + stakingModuleFee: 5_00, + treasuryFee: 5_00, + maxDepositsPerBlock: 150, + minDepositBlockDistance: 25, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }; + // add staking module await stakingRouter.addStakingModule( "myStakingModule", certainAddress("test:staking-router-status:staking-module"), // mock staking module address - 1_00, // target share - 1_00, // target share - 5_00, // module fee - 5_00, // treasury fee - 150, // max deposits per block - 25, // min deposit block distance + stakingModuleConfig, ); moduleId = await stakingRouter.getStakingModulesCount(); @@ -73,9 +76,9 @@ context("StakingRouter.sol:status-control", () => { context("setStakingModuleStatus", () => { it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter.connect(user).setStakingModuleStatus(moduleId, Status.DepositsPaused), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); + await expect(stakingRouter.connect(user).setStakingModuleStatus(moduleId, Status.DepositsPaused)) + .to.be.revertedWithCustomError(stakingRouter, "AccessControlUnauthorizedAccount") + .withArgs(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); }); it("Reverts if the new status is the same", async () => { diff --git a/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts b/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts new file mode 100644 index 0000000000..6c26c407ee --- /dev/null +++ b/test/0.8.25/stakingRouter/stakingRouter.topUp.test.ts @@ -0,0 +1,327 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + AccountingOracle__MockForStakingRouter, + DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, + StakingModuleV2__MockForStakingRouter, + StakingRouter__Harness, +} from "typechain-types"; + +import { findEventsWithInterfaces, randomString, randomWCType1, wcTypeMaxEB } from "lib"; +import { ONE_GWEI, WithdrawalCredentialsType } from "lib/constants"; + +import { deployLidoLocator, deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +import { CtxConfig, DEFAULT_CONFIG, setupModule } from "./helpers"; + +describe("StakingRouter.sol:topUp", () => { + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let topUpGatewaySigner: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + let locator: LidoLocator; + let stakingRouter: StakingRouter__Harness; + let depositContract: DepositContract__MockForBeaconChainDepositor; + let lidoMock: Lido__MockForStakingRouter; + let accountingOracle: AccountingOracle__MockForStakingRouter; + + let originalState: string; + + let ctx: CtxConfig; + + const NEW_MEB = wcTypeMaxEB(WithdrawalCredentialsType.WC0x02); + const WEI_PER_GWEI = 1_000_000_000n; + const withdrawalCredentials = randomWCType1(); + const depositSecurityModule = "0x0000000000000000000000000000000000000002"; + + before(async () => { + [deployer, admin, topUpGatewaySigner, stranger] = await ethers.getSigners(); + // Deploy Lido mock + lidoMock = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + + // deploy oracle + accountingOracle = await ethers.deployContract("AccountingOracle__MockForStakingRouter", deployer); + + locator = await deployLidoLocator({ + lido: lidoMock, + topUpGateway: await topUpGatewaySigner.getAddress(), + depositSecurityModule, + accountingOracle, + }); + + // deploy staking router + ({ stakingRouter, depositContract } = await deployStakingRouter( + { deployer, admin }, + { lidoLocator: locator, lido: lidoMock }, + )); + + await lidoMock.setStakingRouter(await stakingRouter.getAddress()); + + // initialize staking router with the mock lido and topUpGateway as a signer + await stakingRouter.initialize(admin, withdrawalCredentials); + + // grant roles + await Promise.all([stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin)]); + + ctx = { + deployer, + admin, + stakingRouter, + }; + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + context("topUp", () => { + const KEY_INDEX = 0n; + const OPERATOR_ID = 1n; + const TOP_UP_LIMIT_GWEI = 10n * ONE_GWEI; // 10 ETH in ONE_GWEI + + function makeValidTopUpData() { + const keyIndices = [KEY_INDEX]; + const operatorIds = [OPERATOR_ID]; + // topUpLimits are now in wei (TOP_UP_LIMIT_GWEI is already 10 ETH in gwei, convert to wei) + const topUpLimits = [TOP_UP_LIMIT_GWEI * WEI_PER_GWEI]; + const pubkeys = [randomString(48)]; + + return { keyIndices, operatorIds, topUpLimits, pubkeys }; + } + + it("Reverts if caller is not TopUpGateway", async () => { + const config = { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }; + + const [, id] = await setupModule(ctx, config); + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(stranger).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "NotAuthorized"); + }); + + it("Reverts if the module does not exist", async () => { + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(1n, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleUnregistered"); + }); + + it("Reverts if the module is Legacy (top-ups only supported for 0x02)", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + const { keyIndices, operatorIds, topUpLimits, pubkeys } = makeValidTopUpData(); + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "WrongWithdrawalCredentialsType"); + }); + + it("Reverts if keyIndices array is empty", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const keyIndices: bigint[] = []; + const operatorIds: bigint[] = []; + const topUpLimits: bigint[] = []; + const pubkeys: string[] = []; + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "EmptyKeysList"); + }); + + it("Reverts if pubkeys array length doesn't match keyIndices count", async () => { + const [, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const keyIndices = [0n, 1n]; + const operatorIds = [0n, 0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI, 20n * ONE_GWEI * WEI_PER_GWEI]; // in wei + const pubkeys = [randomString(48)]; // Only 1 key, but 2 expected + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits), + ).to.be.revertedWithCustomError(stakingRouter, "ArraysLengthMismatch"); + }); + + it("Does not perform deposits when module allocation is 0", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + depositable: 0n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + // Set depositable ether to 0 (no ETH available) + await lidoMock.setDepositableEther(0n); + + const pubkeys = [randomString(48)]; + // Mock module returns 0 allocations + await stakingModule.mock__setTopUpDepositData([0n]); + + const keyIndices = [0n]; + const operatorIds = [0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI]; // in wei + + const tx = await stakingRouter + .connect(topUpGatewaySigner) + .topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(0); + }); + + it("Performs top-up for a New module for all keys", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + deposited: 100n, + validatorsBalanceGwei: 100n * 32n * 10n ** 9n, //100 x 32 eth / 1 gwei + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + // topUpLimits are now in wei + const topUpWei = [ + 10n * 10n ** 18n, // 10 ETH in wei + 20n * 10n ** 18n, // 20 ETH in wei + 30n * 10n ** 18n, // 30 ETH in wei + ]; + + const pubkeys = [randomString(48), randomString(48), randomString(48)]; + + // Mock module to return these allocations (in wei) + await stakingModule.mock__setTopUpDepositData(topUpWei); + + const totalTopUpWei = topUpWei.reduce((acc, v) => acc + v, 0n); + + // Set depositable ether in lido mock + await lidoMock.setDepositableEther(100n * NEW_MEB); + // Fund lido mock with ETH + await lidoMock.fund({ value: totalTopUpWei }); + + const keyIndices = [0n, 1n, 2n]; + const operatorIds = [0n, 0n, 0n]; + + const tx = await stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(topUpWei.length); + }); + + it("Reverts when allocation exceeds module's target", async () => { + const [stakingModule, id] = (await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, // 50% + priorityExitShareThreshold: 50_00n, + depositable: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + })) as [StakingModuleV2__MockForStakingRouter, bigint]; + + // Add second module to split allocation + await setupModule(ctx, { + ...DEFAULT_CONFIG, + stakeShareLimit: 50_00n, + priorityExitShareThreshold: 50_00n, + depositable: 2n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const depositableEth = 2n * NEW_MEB; + + // Mock module returns allocations that exceed target (in wei) + const pubkeys = [randomString(48), randomString(48)]; + // These allocations will exceed 50% of depositableEth + const topUpWei = [1500n * ONE_GWEI * WEI_PER_GWEI, 1500n * ONE_GWEI * WEI_PER_GWEI]; // 3000 ETH total, but module only gets 50% = 2048 ETH + await stakingModule.mock__setTopUpDepositData(topUpWei); + + await lidoMock.setDepositableEther(depositableEth); + + const keyIndices = [0n, 1n]; + const operatorIds = [0n, 0n]; + + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei), + ).to.be.revertedWithCustomError(stakingRouter, "ModuleReturnExceedTarget"); + }); + + it("Reverts when top up amount for key is below 1 ETH", async () => { + const reducedBalanceGwei = (100n * NEW_MEB - 64n * 10n ** 18n) / 10n ** 9n; + + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + deposited: 100n, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + validatorsBalanceGwei: reducedBalanceGwei, + }); + + const pubkeys = [randomString(48)]; + const topUpWei = [500_000_000n * WEI_PER_GWEI]; // 0.5 ETH in wei + await stakingModule.mock__setTopUpDepositData(topUpWei); + + const depositableEth = 100n * NEW_MEB; + await lidoMock.setDepositableEther(depositableEth); + await lidoMock.fund({ value: depositableEth }); + + const keyIndices = [0n]; + const operatorIds = [0n]; + + const beaconChainDepositor = await ethers.getContractFactory("BeaconChainDepositor"); + await expect( + stakingRouter.connect(topUpGatewaySigner).topUp(id, keyIndices, operatorIds, pubkeys, topUpWei), + ).to.be.revertedWithCustomError(beaconChainDepositor, "DepositAmountTooLow"); + }); + + it("Zero allocations from module result in no deposits", async () => { + const [stakingModule, id] = await setupModule(ctx, { + ...DEFAULT_CONFIG, + depositable: 100n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x02, + }); + + const pubkeys = [randomString(48)]; + // Mock module returns 0 allocation + await stakingModule.mock__setTopUpDepositData([0n]); + + await lidoMock.setDepositableEther(100n * NEW_MEB); + + const keyIndices = [0n]; + const operatorIds = [0n]; + const topUpLimits = [10n * ONE_GWEI * WEI_PER_GWEI]; // in wei + + const tx = await stakingRouter + .connect(topUpGatewaySigner) + .topUp(id, keyIndices, operatorIds, pubkeys, topUpLimits); + + const receipt = await tx.wait(); + const depositEvents = findEventsWithInterfaces(receipt!, "Deposited__MockEvent", [depositContract.interface]); + + expect(depositEvents.length).to.equal(0); + }); + }); +}); diff --git a/test/0.8.25/topUpGateway/topUpGateway.test.ts b/test/0.8.25/topUpGateway/topUpGateway.test.ts new file mode 100644 index 0000000000..2e6fa070ba --- /dev/null +++ b/test/0.8.25/topUpGateway/topUpGateway.test.ts @@ -0,0 +1,577 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { time } from "@nomicfoundation/hardhat-network-helpers"; + +import type { TopUpGateway__Harness } from "typechain-types"; +import { Lido__MockForTopUpGateway, LidoLocator, StakingRouter__MockForTopUpGateway } from "typechain-types"; + +import { proxify } from "lib/proxy"; + +import { deployLidoLocator } from "test/deploy"; +import { Snapshot } from "test/suite"; + +describe("TopUpGateway.sol", () => { + let admin: HardhatEthersSigner; + let topUpOperator: HardhatEthersSigner; + let limitsManager: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + let lido: Lido__MockForTopUpGateway; + let locator: LidoLocator; + let stakingRouter: StakingRouter__MockForTopUpGateway; + let topUpGateway: TopUpGateway__Harness; + + let snapshot: string; + let topUpRole: string; + let manageLimitsRole: string; + + const MODULE_ID = 1n; + const FAR_FUTURE_EPOCH = (1n << 64n) - 1n; + const SAMPLE_PUBKEY = `0x${"11".repeat(48)}`; + const DEFAULT_MAX_VALIDATORS = 5n; + const DEFAULT_MIN_BLOCK_DISTANCE = 1n; + const DEFAULT_MAX_ROOT_AGE = 300n; + const G_INDEX = ethers.zeroPadValue("0x01", 32); + const ZERO_BYTES_31 = "00".repeat(31); + const WC_TYPE_02 = `0x02${ZERO_BYTES_31}`; + const WC_TYPE_01 = `0x01${ZERO_BYTES_31}`; + // Mainnet-like values: targetBalance = 2046.75 ETH, minTopUp = 1 ETH + const DEFAULT_TARGET_BALANCE_GWEI = 204675n * 10n ** 7n; // 2046.75 ETH in Gwei + const DEFAULT_MIN_TOP_UP_GWEI = 1n * 10n ** 9n; // 1 ETH in Gwei + const SLOTS_PER_EPOCH = 32n; + + type TopUpData = { + moduleId: bigint; + keyIndices: bigint[]; + operatorIds: bigint[]; + validatorIndices: bigint[]; + beaconRootData: { + childBlockTimestamp: bigint; + slot: bigint; + proposerIndex: bigint; + }; + validatorWitness: Array<{ + proofValidator: string[]; + pubkey: string; + effectiveBalance: bigint; + slashed: boolean; + activationEligibilityEpoch: bigint; + activationEpoch: bigint; + exitEpoch: bigint; + withdrawableEpoch: bigint; + }>; + pendingBalanceGwei: bigint[]; + }; + + beforeEach(async () => { + [admin, topUpOperator, limitsManager, stranger] = await ethers.getSigners(); + snapshot = await Snapshot.take(); + lido = await ethers.deployContract("Lido__MockForTopUpGateway"); + stakingRouter = await ethers.deployContract("StakingRouter__MockForTopUpGateway"); + locator = await deployLidoLocator({ + stakingRouter: await stakingRouter.getAddress(), + lido: await lido.getAddress(), + }); + + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + + [topUpGateway] = await proxify({ impl, admin }); + + await topUpGateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ); + + topUpRole = await topUpGateway.TOP_UP_ROLE(); + manageLimitsRole = await topUpGateway.MANAGE_LIMITS_ROLE(); + await topUpGateway.grantRole(topUpRole, topUpOperator.address); + await topUpGateway.grantRole(manageLimitsRole, limitsManager.address); + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_02); + }); + + afterEach(async () => { + await Snapshot.restore(snapshot); + }); + + const buildTopUpData = async (): Promise => { + const timestamp = BigInt(await time.latest()); + + return { + moduleId: MODULE_ID, + keyIndices: [1n], + operatorIds: [1n], + validatorIndices: [1n], + beaconRootData: { + childBlockTimestamp: timestamp, + slot: 123n, + proposerIndex: 1n, + }, + validatorWitness: [ + { + proofValidator: [], + pubkey: SAMPLE_PUBKEY, + effectiveBalance: 32n * 10n ** 9n, + slashed: false, + activationEligibilityEpoch: 0n, + activationEpoch: 0n, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }, + ], + pendingBalanceGwei: [0n], + }; + }; + + describe("initialize", () => { + it("initializes config and roles", async () => { + expect(await topUpGateway.getMaxValidatorsPerTopUp()).to.equal(DEFAULT_MAX_VALIDATORS); + expect(await topUpGateway.getMinBlockDistance()).to.equal(DEFAULT_MIN_BLOCK_DISTANCE); + expect(await topUpGateway.getLastTopUpTimestamp()).to.equal(0n); + expect(await topUpGateway.hasRole(await topUpGateway.DEFAULT_ADMIN_ROLE(), admin.address)).to.be.true; + expect(await topUpGateway.hasRole(topUpRole, admin.address)).to.be.false; + expect(await topUpGateway.harness_getLocator()).to.equal(await locator.getAddress()); + }); + + it("reverts on double initialization", async () => { + await expect( + topUpGateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(topUpGateway, "InvalidInitialization"); + }); + + it("reverts when maxValidatorsPerTopUp is zero", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + const [gateway] = await proxify({ impl, admin }); + await expect( + gateway.initialize( + admin.address, + 0n, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(gateway, "ZeroValue"); + }); + + it("reverts when minBlockDistance is zero", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + const [gateway] = await proxify({ impl, admin }); + await expect( + gateway.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + 0n, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(gateway, "ZeroValue"); + }); + + it("reverts when calling initialize on the implementation directly", async () => { + const impl = await ethers.deployContract("TopUpGateway__Harness", [ + await locator.getAddress(), + G_INDEX, + G_INDEX, + 0, + SLOTS_PER_EPOCH, + ]); + await expect( + impl.initialize( + admin.address, + DEFAULT_MAX_VALIDATORS, + DEFAULT_MIN_BLOCK_DISTANCE, + DEFAULT_MAX_ROOT_AGE, + DEFAULT_TARGET_BALANCE_GWEI, + DEFAULT_MIN_TOP_UP_GWEI, + ), + ).to.be.revertedWithCustomError(impl, "InvalidInitialization"); + }); + }); + + describe("limits management", () => { + it("allows manage limits role to set the max validators per top up", async () => { + const newLimit = DEFAULT_MAX_VALIDATORS + 1n; + await expect(topUpGateway.connect(limitsManager).setMaxValidatorsPerTopUp(newLimit)) + .to.emit(topUpGateway, "MaxValidatorsPerTopUpChanged") + .withArgs(newLimit); + expect(await topUpGateway.getMaxValidatorsPerTopUp()).to.equal(newLimit); + }); + + it("reverts when non-manager tries to set the max validators per top up", async () => { + await expect(topUpGateway.connect(stranger).setMaxValidatorsPerTopUp(DEFAULT_MAX_VALIDATORS + 1n)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("allows manage limits role to set the min block distance", async () => { + const newDistance = DEFAULT_MIN_BLOCK_DISTANCE + 10n; + await expect(topUpGateway.connect(limitsManager).setMinBlockDistance(newDistance)) + .to.emit(topUpGateway, "MinBlockDistanceChanged") + .withArgs(newDistance); + expect(await topUpGateway.getMinBlockDistance()).to.equal(newDistance); + }); + + it("reverts when non-manager tries to set the min block distance", async () => { + await expect(topUpGateway.connect(stranger).setMinBlockDistance(DEFAULT_MIN_BLOCK_DISTANCE + 10n)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("allows manage limits role to set top-up balance limits", async () => { + const newTarget = DEFAULT_TARGET_BALANCE_GWEI + 10n ** 9n; + const newMinTopUp = DEFAULT_MIN_TOP_UP_GWEI + 10n ** 8n; + await expect(topUpGateway.connect(limitsManager).setTopUpBalanceLimits(newTarget, newMinTopUp)) + .to.emit(topUpGateway, "TopUpBalanceLimitsChanged") + .withArgs(newTarget, newMinTopUp); + expect(await topUpGateway.getTargetBalanceGwei()).to.equal(newTarget); + expect(await topUpGateway.getMinTopUpGwei()).to.equal(newMinTopUp); + }); + + it("reverts when non-manager tries to set top-up balance limits", async () => { + await expect( + topUpGateway.connect(stranger).setTopUpBalanceLimits(DEFAULT_TARGET_BALANCE_GWEI, DEFAULT_MIN_TOP_UP_GWEI), + ) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, manageLimitsRole); + }); + + it("reverts when minTopUp exceeds targetBalance", async () => { + await expect(topUpGateway.connect(limitsManager).setTopUpBalanceLimits(100n, 200n)).to.be.revertedWithCustomError( + topUpGateway, + "MinTopUpExceedsTarget", + ); + }); + }); + + describe("topUp", () => { + it("reverts when caller lacks the role", async () => { + const data = await buildTopUpData(); + await expect(topUpGateway.connect(stranger).topUp(data)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, topUpRole); + }); + + it("reverts when validator list is empty", async () => { + const data = await buildTopUpData(); + data.validatorIndices = []; + data.keyIndices = []; + data.operatorIds = []; + data.validatorWitness = []; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongArrayLength", + ); + }); + + it("reverts when array lengths mismatch", async () => { + const data = await buildTopUpData(); + data.keyIndices = [1n, 2n]; + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongArrayLength", + ); + }); + + it("reverts when validators count exceeds the limit", async () => { + await topUpGateway.connect(limitsManager).setMaxValidatorsPerTopUp(1n); + const data = await buildTopUpData(); + data.validatorIndices = [1n, 2n]; + data.keyIndices = [1n, 2n]; + data.operatorIds = [1n, 2n]; + const secondPubkey = `0x${"22".repeat(48)}`; + data.validatorWitness = [ + data.validatorWitness[0], + { + ...data.validatorWitness[0], + pubkey: secondPubkey, + }, + ]; + data.pendingBalanceGwei = [0n, 0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "MaxValidatorsPerTopUpExceeded", + ); + }); + it("reverts when validatorIndices contain duplicates", async () => { + const data = await buildTopUpData(); + data.validatorIndices = [1n, 1n]; + data.keyIndices = [1n, 1n]; + data.operatorIds = [1n, 1n]; + const secondPubkey = `0x${"22".repeat(48)}`; + data.validatorWitness = [ + data.validatorWitness[0], + { + ...data.validatorWitness[0], + pubkey: secondPubkey, + }, + ]; + data.pendingBalanceGwei = [0n, 0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "DuplicateValidatorIndex", + ); + }); + + it("reverts when beacon data is too old", async () => { + await time.increase(400); + const now = BigInt(await time.latest()); + const data = await buildTopUpData(); + data.beaconRootData.childBlockTimestamp = now - 400n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "RootIsTooOld", + ); + }); + + it("reverts when root precedes last top up", async () => { + const timestamp = BigInt(await time.latest()); + await topUpGateway.harness_setLastTopUpTimestamp(timestamp); + const data = await buildTopUpData(); + data.beaconRootData.childBlockTimestamp = timestamp; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "RootPrecedesLastTopUp", + ); + }); + + it("reverts when withdrawal credentials type is not 0x02", async () => { + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_01); + const data = await buildTopUpData(); + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongWithdrawalCredentials", + ); + }); + + it("reverts when block distance is not met", async () => { + // Set a large min block distance so we can test the revert + await topUpGateway.connect(limitsManager).setMinBlockDistance(100n); + + // First successful top-up sets lastTopUpBlock + const data = await buildTopUpData(); + await topUpGateway.connect(topUpOperator).topUp(data); + + // Immediately try again - should fail since we haven't mined enough blocks + const data2 = await buildTopUpData(); + data2.beaconRootData.slot = data.beaconRootData.slot + 1n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data2)).to.be.revertedWithCustomError( + topUpGateway, + "MinBlockDistanceNotMet", + ); + }); + + it("returns zero top-up limit when balance exceeds target", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI + 1n; + data.pendingBalanceGwei = [0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("reverts when pubkey length is invalid", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].pubkey = "0x1234"; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "WrongPubkeyLength", + ); + }); + + it("calls StakingRouter.topUp and updates last timestamp", async () => { + const data = await buildTopUpData(); + data.pendingBalanceGwei = [0n]; + // topUp = targetBalance - currentTotal + const expectedTopUpGwei = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance; + const expectedTopUpWei = expectedTopUpGwei * 1_000_000_000n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]) + .and.to.emit(topUpGateway, "LastTopUpChanged"); + + const lastTimestamp = await topUpGateway.getLastTopUpTimestamp(); + expect(lastTimestamp).to.be.gt(0n); + expect(await stakingRouter.topUpCalls()).to.equal(1n); + }); + + it("reduces top-up limit by pending deposit amount", async () => { + const data = await buildTopUpData(); + const pendingAmount = 100n * 10n ** 9n; + data.pendingBalanceGwei = [pendingAmount]; // 100 Gwei + + const expectedTopUpGwei = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance - pendingAmount; + // topUpLimits are now in wei + const expectedTopUpWei = expectedTopUpGwei * 1_000_000_000n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]); + }); + + it("returns zero when topUp < minTopUp (balance + pending just below target)", async () => { + const data = await buildTopUpData(); + // Set balance so that topUp = targetBalance - currentTotal < minTopUp + // targetBalance = 2046.75 ETH, minTopUp = 1 ETH → threshold = 2045.75 ETH + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI + 1n; + data.pendingBalanceGwei = [0n]; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when balance + pending exactly equals target", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].effectiveBalance = 2045n * 10n ** 9n; + data.pendingBalanceGwei[0] = DEFAULT_TARGET_BALANCE_GWEI - data.validatorWitness[0].effectiveBalance; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns exactly minTopUp when balance is at threshold", async () => { + const data = await buildTopUpData(); + // Set balance so topUp = exactly minTopUp (= 1 ETH) + data.validatorWitness[0].effectiveBalance = DEFAULT_TARGET_BALANCE_GWEI - DEFAULT_MIN_TOP_UP_GWEI; + data.pendingBalanceGwei = [0n]; + + const expectedTopUpWei = DEFAULT_MIN_TOP_UP_GWEI * 1_000_000_000n; + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [expectedTopUpWei]); + }); + + it("returns zero when validator is slashed", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].slashed = true; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when validator has exitEpoch set", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].exitEpoch = 1000n; // not FAR_FUTURE_EPOCH + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("returns zero when validator has withdrawableEpoch set", async () => { + const data = await buildTopUpData(); + data.validatorWitness[0].withdrawableEpoch = 2000n; // not FAR_FUTURE_EPOCH + + await expect(topUpGateway.connect(topUpOperator).topUp(data)) + .to.emit(stakingRouter, "TopUpCalled") + .withArgs(MODULE_ID, data.keyIndices, data.operatorIds, [SAMPLE_PUBKEY], [0n]); + }); + + it("revert if validator is not active", async () => { + const data = await buildTopUpData(); + const epoch = data.beaconRootData.slot / SLOTS_PER_EPOCH; + // Validator should be activated earlier than current epoch + data.validatorWitness[0].activationEpoch = epoch + 1n; + + await expect(topUpGateway.connect(topUpOperator).topUp(data)).to.be.revertedWithCustomError( + topUpGateway, + "ValidatorIsNotActivated", + ); + }); + }); + + describe("role management", () => { + it("DEFAULT_ADMIN_ROLE can grant roles", async () => { + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.false; + await topUpGateway.connect(admin).grantRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.true; + }); + + it("DEFAULT_ADMIN_ROLE can revoke roles", async () => { + await topUpGateway.connect(admin).grantRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.true; + await topUpGateway.connect(admin).revokeRole(topUpRole, stranger.address); + expect(await topUpGateway.hasRole(topUpRole, stranger.address)).to.be.false; + }); + + it("non-admin cannot grant roles", async () => { + await expect(topUpGateway.connect(stranger).grantRole(topUpRole, stranger.address)) + .to.be.revertedWithCustomError(topUpGateway, "AccessControlUnauthorizedAccount") + .withArgs(stranger.address, await topUpGateway.DEFAULT_ADMIN_ROLE()); + }); + }); + + describe("canTopUp", () => { + it("returns false when module is not registered", async () => { + expect(await topUpGateway.canTopUp(999n)).to.equal(false); + }); + + it("returns false when module is inactive", async () => { + await stakingRouter.setModuleActive(MODULE_ID, false); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when block distance is not met", async () => { + await topUpGateway.connect(limitsManager).setMinBlockDistance(DEFAULT_MIN_BLOCK_DISTANCE + 1n); + await topUpGateway.harness_setLastTopUpData(); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when Lido cannot deposit", async () => { + await lido.setCanDeposit(false); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns false when withdrawal credentials are not 0x02", async () => { + await stakingRouter.setWithdrawalCredentials(MODULE_ID, WC_TYPE_01); + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(false); + }); + + it("returns true when all conditions are satisfied", async () => { + expect(await topUpGateway.canTopUp(MODULE_ID)).to.equal(true); + }); + }); +}); diff --git a/test/0.8.9/accounting.handleOracleReport.test.ts b/test/0.8.9/accounting.handleOracleReport.test.ts index 4615d2293b..426771bc0f 100644 --- a/test/0.8.9/accounting.handleOracleReport.test.ts +++ b/test/0.8.9/accounting.handleOracleReport.test.ts @@ -56,6 +56,14 @@ describe("Accounting.sol:report", () => { new VaultHub__MockForAccountingReport__factory(deployer).deploy(), ]); + await stakingRouter.mock__getStakingRewardsDistribution( + [], // recipients + [], // stakingModuleIds + [], // stakingModuleFees + 0, // totalFee + 100n * 10n ** 18n, // precisionPoints = 100% + ); + locator = await deployLidoLocator( { lido, @@ -69,7 +77,8 @@ describe("Accounting.sol:report", () => { deployer, ); - const accountingImpl = await ethers.deployContract("Accounting", [locator, lido], deployer); + const accountingImpl = await ethers.deployContract("Accounting", [locator, lido]); + const accountingProxy = await ethers.deployContract( "OssifiableProxy", [accountingImpl, deployer, new Uint8Array()], @@ -83,11 +92,12 @@ describe("Accounting.sol:report", () => { }); function report(overrides?: Partial): ReportValuesStruct { + const now = Math.floor(Date.now() / 1000); return { - timestamp: 0n, - timeElapsed: 0n, - clValidators: 0n, - clBalance: 0n, + timestamp: BigInt(now), + timeElapsed: 12n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, @@ -124,28 +134,32 @@ describe("Accounting.sol:report", () => { }); context("handleOracleReport", () => { - it("Update CL validators count if reported more", async () => { - let depositedValidators = 100n; - await lido.mock__setDepositedValidators(depositedValidators); + it("Update CL balances when reported", async () => { + await lido.mock__setDepositedValidators(100n); + + // Setup deposits mock in StakingRouter + await stakingRouter.mock__setDepositAmountFromLastSlot(ether("150")); - // first report, 100 validators await accounting.handleOracleReport( report({ - clValidators: depositedValidators, + clValidatorsBalance: ether("100"), + clPendingBalance: ether("50"), }), ); - expect(await lido.reportClValidators()).to.equal(depositedValidators); + expect(await lido.reportClValidatorsBalance()).to.equal(ether("100")); + expect(await lido.reportClPendingBalance()).to.equal(ether("50")); - depositedValidators = 101n; - await lido.mock__setDepositedValidators(depositedValidators); + await lido.mock__setDepositedValidators(101n); + await stakingRouter.mock__setDepositAmountFromLastSlot(ether("20")); - // second report, 101 validators await accounting.handleOracleReport( report({ - clValidators: depositedValidators, + clValidatorsBalance: ether("110"), + clPendingBalance: ether("60"), }), ); - expect(await lido.reportClValidators()).to.equal(depositedValidators); + expect(await lido.reportClValidatorsBalance()).to.equal(ether("110")); + expect(await lido.reportClPendingBalance()).to.equal(ether("60")); }); it("Reverts if the `checkAccountingOracleReport` sanity check fails", async () => { @@ -181,19 +195,6 @@ describe("Accounting.sol:report", () => { ).to.be.revertedWithCustomError(accounting, "IncorrectReportTimestamp"); }); - it("Reverts if the reported validators count is less than the current count", async () => { - const depositedValidators = 100n; - await expect( - accounting.handleOracleReport( - report({ - clValidators: depositedValidators, - }), - ), - ) - .to.be.revertedWithCustomError(accounting, "IncorrectReportValidators") - .withArgs(100n, 0n, 0n); - }); - it("Does not revert if the `checkWithdrawalQueueOracleReport` sanity check fails but no withdrawal batches were reported", async () => { await oracleReportSanityChecker.mock__checkWithdrawalQueueOracleReportReverts(true); await withdrawalQueue.mock__isPaused(true); @@ -283,7 +284,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, // made 1 wei of profit, triggers reward processing + clValidatorsBalance: 1n, // made 1 wei of profit, triggers reward processing }), ), ).to.be.revertedWithPanic(0x01); // assert @@ -312,7 +313,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, // made 1 wei of profit, triggers reward processing + clValidatorsBalance: 1n, // made 1 wei of profit, triggers reward processing }), ), ).to.be.revertedWithPanic(0x01); // assert @@ -338,7 +339,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: 1n, + clValidatorsBalance: 1n, }), ), ).not.to.emit(stakingRouter, "Mock__MintedRewardsReported"); @@ -363,10 +364,10 @@ describe("Accounting.sol:report", () => { precisionPoints, ); - const clBalance = ether("1.0"); + const clValidatorsBalance = ether("1.0"); const expectedSharesToMint = - (clBalance * totalFee * (await lido.getTotalShares())) / - (((await lido.getTotalPooledEther()) + clBalance) * precisionPoints - clBalance * totalFee); + (clValidatorsBalance * totalFee * (await lido.getTotalShares())) / + (((await lido.getTotalPooledEther()) + clValidatorsBalance) * precisionPoints - clValidatorsBalance * totalFee); const expectedModuleRewardInShares = expectedSharesToMint / (totalFee / stakingModule.fee); const expectedTreasuryCutInShares = expectedSharesToMint - expectedModuleRewardInShares; @@ -374,7 +375,7 @@ describe("Accounting.sol:report", () => { await expect( accounting.handleOracleReport( report({ - clBalance: ether("1.0"), // 1 ether of profit + clValidatorsBalance: ether("1.0"), // 1 ether of profit }), ), ) @@ -406,18 +407,18 @@ describe("Accounting.sol:report", () => { precisionPoints, ); - const clBalance = ether("1.0"); + const clValidatorsBalance = ether("1.0"); const expectedSharesToMint = - (clBalance * totalFee * (await lido.getTotalShares())) / - (((await lido.getTotalPooledEther()) + clBalance) * precisionPoints - clBalance * totalFee); + (clValidatorsBalance * totalFee * (await lido.getTotalShares())) / + (((await lido.getTotalPooledEther()) + clValidatorsBalance) * precisionPoints - clValidatorsBalance * totalFee); const expectedTreasuryCutInShares = expectedSharesToMint; await expect( accounting.handleOracleReport( report({ - clBalance: ether("1.0"), // 1 ether of profit + clValidatorsBalance: ether("1.0"), // 1 ether of profit }), ), ) diff --git a/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol b/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol index fb4dff79c9..4446170ea8 100644 --- a/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/AccountingOracle__MockForSanityChecker.sol @@ -36,8 +36,8 @@ contract AccountingOracle__MockForSanityChecker { ReportValues( data.refSlot * SECONDS_PER_SLOT, slotsElapsed * SECONDS_PER_SLOT, - data.numValidators, - data.clBalanceGwei * 1e9, + data.clValidatorsBalanceGwei * 1e9, + data.clPendingBalanceGwei * 1e9, data.withdrawalVaultBalance, data.elRewardsVaultBalance, data.sharesRequestedToBurn, diff --git a/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol b/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol index 15ae72c3f5..267ec10460 100644 --- a/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol +++ b/test/0.8.9/contracts/Accounting__MockForAccountingOracle.sol @@ -13,8 +13,13 @@ contract Accounting__MockForAccountingOracle is IReportReceiver { } HandleOracleReportCallData public lastCall__handleOracleReport; + uint256 public totalDepositsRecorded; function handleOracleReport(ReportValues memory values) external override { lastCall__handleOracleReport = HandleOracleReportCallData(values, ++lastCall__handleOracleReport.callCount); } + + function recordDeposit(uint256 amount) external { + totalDepositsRecorded += amount; + } } diff --git a/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol b/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol new file mode 100644 index 0000000000..62d79fc060 --- /dev/null +++ b/test/0.8.9/contracts/EIP7251ConsolidationRequest__Mock.sol @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/** + * @notice This is a mock of EIP-7251's consolidation request pre-deploy contract. + */ +contract EIP7251ConsolidationRequest__Mock { + uint256[100] __gap; // NB: to avoid storage collision with the predeployed withdrawals contract https://github.com/NomicFoundation/edr/issues/865 + bytes public fee; + bool public mock__failOnAddRequest; + bool public mock__failOnGetFee; + + bool public constant MOCK = true; + + event ConsolidationRequestAdded__Mock(bytes request, uint256 fee); + + function mock__setFailOnAddRequest(bool _failOnAddRequest) external { + mock__failOnAddRequest = _failOnAddRequest; + } + + function mock__setFailOnGetFee(bool _failOnGetFee) external { + mock__failOnGetFee = _failOnGetFee; + } + + function mock__setFee(uint256 _fee) external { + require(_fee > 0, "fee must be greater than 0"); + fee = abi.encode(_fee); + } + + function mock__setFeeRaw(bytes calldata _rawFeeBytes) external { + fee = _rawFeeBytes; + } + + // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-7251.md#add-consolidation-request + fallback(bytes calldata input) external payable returns (bytes memory) { + // calculate the fee path + if (input.length == 0) { + require(!mock__failOnGetFee, "Inhibitor still active"); + return fee; + } + + // add consolidation request path + require(input.length == 48 * 2, "Invalid callData length"); // 48 bytes source + 48 bytes target + require(!mock__failOnAddRequest, "fail on add request"); + + uint256 feeValue = abi.decode(fee, (uint256)); + if (msg.value < feeValue) { + revert("Insufficient value for fee"); + } + + emit ConsolidationRequestAdded__Mock(input, msg.value); + } +} diff --git a/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol b/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol index 48d7118523..175e225804 100644 --- a/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/LidoLocator__MockForSanityChecker.sol @@ -23,6 +23,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -30,6 +31,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } address public immutable lido; @@ -47,6 +49,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -54,6 +57,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; constructor(ContractAddresses memory addresses) { lido = addresses.lido; @@ -71,6 +75,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { oracleDaemonConfig = addresses.oracleDaemonConfig; validatorExitDelayVerifier = addresses.validatorExitDelayVerifier; triggerableWithdrawalsGateway = addresses.triggerableWithdrawalsGateway; + consolidationGateway = addresses.consolidationGateway; accounting = addresses.accounting; wstETH = addresses.wstETH; predepositGuarantee = addresses.predepositGuarantee; @@ -78,6 +83,7 @@ contract LidoLocator__MockForSanityChecker is ILidoLocator { vaultFactory = addresses.vaultFactory; lazyOracle = addresses.lazyOracle; operatorGrid = addresses.operatorGrid; + topUpGateway = addresses.topUpGateway; } function coreComponents() external view returns (address, address, address, address, address, address) { diff --git a/test/0.8.9/contracts/LidoLocator__MockMutable.sol b/test/0.8.9/contracts/LidoLocator__MockMutable.sol index 99c4aefaa4..8417d2b013 100644 --- a/test/0.8.9/contracts/LidoLocator__MockMutable.sol +++ b/test/0.8.9/contracts/LidoLocator__MockMutable.sol @@ -22,6 +22,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address oracleDaemonConfig; address validatorExitDelayVerifier; address triggerableWithdrawalsGateway; + address consolidationGateway; address accounting; address predepositGuarantee; address wstETH; @@ -29,6 +30,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address vaultFactory; address lazyOracle; address operatorGrid; + address topUpGateway; } error ZeroAddress(); @@ -48,6 +50,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address public immutable oracleDaemonConfig; address public immutable validatorExitDelayVerifier; address public immutable triggerableWithdrawalsGateway; + address public immutable consolidationGateway; address public immutable accounting; address public immutable predepositGuarantee; address public immutable wstETH; @@ -55,6 +58,7 @@ contract LidoLocator__MockMutable is ILidoLocator { address public immutable vaultFactory; address public immutable lazyOracle; address public immutable operatorGrid; + address public immutable topUpGateway; constructor(Config memory _config) { accountingOracle = _assertNonZero(_config.accountingOracle); @@ -72,6 +76,7 @@ contract LidoLocator__MockMutable is ILidoLocator { oracleDaemonConfig = _assertNonZero(_config.oracleDaemonConfig); validatorExitDelayVerifier = _assertNonZero(_config.validatorExitDelayVerifier); triggerableWithdrawalsGateway = _assertNonZero(_config.triggerableWithdrawalsGateway); + consolidationGateway = _assertNonZero(_config.consolidationGateway); accounting = _assertNonZero(_config.accounting); wstETH = _assertNonZero(_config.wstETH); predepositGuarantee = _assertNonZero(_config.predepositGuarantee); @@ -79,6 +84,7 @@ contract LidoLocator__MockMutable is ILidoLocator { vaultFactory = _assertNonZero(_config.vaultFactory); lazyOracle = _assertNonZero(_config.lazyOracle); operatorGrid = _assertNonZero(_config.operatorGrid); + topUpGateway = _assertNonZero(_config.topUpGateway); } function coreComponents() external view returns (address, address, address, address, address, address) { diff --git a/test/0.8.9/contracts/Lido__MockForAccounting.sol b/test/0.8.9/contracts/Lido__MockForAccounting.sol index 7e8209971d..5fe4a4ade7 100644 --- a/test/0.8.9/contracts/Lido__MockForAccounting.sol +++ b/test/0.8.9/contracts/Lido__MockForAccounting.sol @@ -7,9 +7,13 @@ contract Lido__MockForAccounting { uint256 public depositedValidatorsValue; uint256 public reportClValidators; uint256 public reportClBalance; + uint256 public reportClValidatorsBalance; + uint256 public reportClPendingBalance; + uint256 public depositedLastReport; + uint256 public depositedCurrentReport; - // Emitted when validators number delivered by the oracle - event CLValidatorsUpdated(uint256 indexed reportTimestamp, uint256 preCLValidators, uint256 postCLValidators); + // Emitted when CL balances are updated by the oracle + event CLBalancesUpdated(uint256 indexed reportTimestamp, uint256 clValidatorsBalance, uint256 clPendingBalance); event Mock__CollectRewardsAndProcessWithdrawals( uint256 _reportTimestamp, uint256 _reportClBalance, @@ -31,14 +35,46 @@ contract Lido__MockForAccounting { depositedValidatorsValue = _amount; } + function mock__setClValidatorsBalance(uint256 _amount) external { + reportClValidatorsBalance = _amount; + } + + function mock__setClPendingBalance(uint256 _amount) external { + reportClPendingBalance = _amount; + } + + function mock__setDepositedLastReportBalance(uint256 _amount) external { + depositedLastReport = _amount; + } + + function mock__setDepositedCurrentReportBalance(uint256 _amount) external { + depositedCurrentReport = _amount; + } + function getBeaconStat() external view returns (uint256 depositedValidators, uint256 beaconValidators, uint256 beaconBalance) { depositedValidators = depositedValidatorsValue; - beaconValidators = reportClValidators; - beaconBalance = 0; + beaconValidators = depositedValidators; + beaconBalance = reportClValidatorsBalance + reportClPendingBalance; + } + + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + clValidatorsBalanceAtLastReport = reportClValidatorsBalance; + clPendingBalanceAtLastReport = reportClPendingBalance; + depositedSinceLastReport = depositedLastReport; + depositedForCurrentReport = depositedCurrentReport; } function getTotalPooledEther() external pure returns (uint256) { @@ -91,24 +127,15 @@ contract Lido__MockForAccounting { uint256 _sharesMintedAsFees ) external {} - /** - * @notice Process CL related state changes as a part of the report processing - * @dev All data validation was done by Accounting and OracleReportSanityChecker - * @param _reportTimestamp timestamp of the report - * @param _preClValidators number of validators in the previous CL state (for event compatibility) - * @param _reportClValidators number of validators in the current CL state - * @param _reportClBalance total balance of the current CL state - */ function processClStateUpdate( uint256 _reportTimestamp, - uint256 _preClValidators, - uint256 _reportClValidators, - uint256 _reportClBalance + uint256 _clValidatorsBalance, + uint256 _clPendingBalance ) external { - reportClValidators = _reportClValidators; - reportClBalance = _reportClBalance; + reportClValidatorsBalance = _clValidatorsBalance; + reportClPendingBalance = _clPendingBalance; - emit CLValidatorsUpdated(_reportTimestamp, _preClValidators, _reportClValidators); + emit CLBalancesUpdated(_reportTimestamp, _clValidatorsBalance, _clPendingBalance); } function mintShares(address _recipient, uint256 _sharesAmount) external { diff --git a/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol b/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol index f65ceae4fe..76b466ca94 100644 --- a/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol +++ b/test/0.8.9/contracts/Lido__MockForDepositSecurityModule.sol @@ -18,12 +18,12 @@ contract Lido__MockForDepositSecurityModule { } function deposit( - uint256 maxDepositsCount, + uint256 maxDepositsAmount, uint256 stakingModuleId, bytes calldata depositCalldata ) external returns (uint256 keysCount) { - emit StakingModuleDeposited(maxDepositsCount, uint24(stakingModuleId), depositCalldata); - return maxDepositsCount; + emit StakingModuleDeposited(maxDepositsAmount, uint24(stakingModuleId), depositCalldata); + return maxDepositsAmount; } function canDeposit() external view returns (bool) { diff --git a/test/0.8.9/contracts/Lido__MockForSanityChecker.sol b/test/0.8.9/contracts/Lido__MockForSanityChecker.sol new file mode 100644 index 0000000000..09c0356f61 --- /dev/null +++ b/test/0.8.9/contracts/Lido__MockForSanityChecker.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +contract Lido__MockForSanityChecker { + uint256 public clValidatorsBalance; + uint256 public clPendingBalance; + uint256 public depositedLastReport; + uint256 public depositedCurrentReport; + uint256 public contractVersion; + + function mock__setBalanceStats( + uint256 _clValidatorsBalance, + uint256 _clPendingBalance, + uint256 _depositedLastReport, + uint256 _depositedCurrentReport + ) external { + clValidatorsBalance = _clValidatorsBalance; + clPendingBalance = _clPendingBalance; + depositedLastReport = _depositedLastReport; + depositedCurrentReport = _depositedCurrentReport; + } + + function mock__setContractVersion(uint256 _version) external { + contractVersion = _version; + } + + function getBalanceStats() + external + view + returns ( + uint256 clValidatorsBalanceAtLastReport, + uint256 clPendingBalanceAtLastReport, + uint256 depositedSinceLastReport, + uint256 depositedForCurrentReport + ) + { + clValidatorsBalanceAtLastReport = clValidatorsBalance; + clPendingBalanceAtLastReport = clPendingBalance; + depositedSinceLastReport = depositedLastReport; + depositedForCurrentReport = depositedCurrentReport; + } + + function getContractVersion() external view returns (uint256) { + return contractVersion; + } +} diff --git a/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol b/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol new file mode 100644 index 0000000000..28de36e2d4 --- /dev/null +++ b/test/0.8.9/contracts/NodeOperatorsRegistry__Mock.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity 0.8.9; + +/** + * @notice Mock NodeOperatorsRegistry for testing + * @dev This mock is permissive - it accepts any pubkey for any (nodeOpId, keyIndex) combination + * Tests can optionally configure specific keys using setSigningKey() + */ +contract NodeOperatorsRegistry__Mock { + mapping(uint256 => mapping(uint256 => bytes)) public signingKeys; + + // If true, return any non-empty key even if not explicitly set + bool public permissiveMode = true; + + function setSigningKey(uint256 nodeOperatorId, uint256 keyIndex, bytes memory key) external { + signingKeys[nodeOperatorId][keyIndex] = key; + } + + function setPermissiveMode(bool _permissive) external { + permissiveMode = _permissive; + } + + function getSigningKey( + uint256 nodeOperatorId, + uint256 keyIndex + ) external view returns (bytes memory key, bytes memory depositSignature, bool used) { + key = signingKeys[nodeOperatorId][keyIndex]; + + // In permissive mode, return empty key if not explicitly set + // The ValidatorsExitBus contract will skip validation for empty keys + // This allows tests to work without pre-configuring every possible (nodeOpId, keyIndex) combination + // Tests can still explicitly set keys using setSigningKey() if needed + + depositSignature = new bytes(96); + used = false; + } + + function getNodeOperatorsCount() external pure returns (uint256) { + return 100; + } +} diff --git a/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol b/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol index 1be8722d58..02c4f940cb 100644 --- a/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol +++ b/test/0.8.9/contracts/OracleReportSanityCheckerWrapper.sol @@ -7,40 +7,57 @@ pragma solidity 0.8.9; import { OracleReportSanityChecker, LimitsList, - LimitsListPacked, - LimitsListPacker + AccountingCoreLimitsPacked, + OperationalLimitsPacked, + LimitsListPacker, + LimitsListUnpacker } from "contracts/0.8.9/sanity_checks/OracleReportSanityChecker.sol"; contract OracleReportSanityCheckerWrapper is OracleReportSanityChecker { using LimitsListPacker for LimitsList; + using LimitsListUnpacker for AccountingCoreLimitsPacked; - LimitsListPacked private _limitsListPacked; + // Test-only storage for codec roundtrip checks; these are not the parent's private slots. + AccountingCoreLimitsPacked private _accountingCoreLimitsPacked; + OperationalLimitsPacked private _operationalLimitsPacked; constructor( address _lidoLocator, - address _accountingOracle, address _accounting, address _admin, - LimitsList memory _limitsList - ) OracleReportSanityChecker(_lidoLocator, _accountingOracle, _accounting, _admin, _limitsList) {} - - function addReportData(uint256 _timestamp, uint256 _exitedValidatorsCount, uint256 _negativeCLRebase) public { - _addReportData(_timestamp, _exitedValidatorsCount, _negativeCLRebase); + LimitsList memory _limitsList, + bool _postMigrationFirstReportDone + ) OracleReportSanityChecker(_lidoLocator, _accounting, _admin, _limitsList) { + if (_postMigrationFirstReportDone) { + _finalizePostReportState(0, 0); + } } - function sumNegativeRebasesNotOlderThan(uint256 _timestamp) public view returns (uint256) { - return _sumNegativeRebasesNotOlderThan(_timestamp); + function addReportData(uint256 _timestamp, uint256 _clBalance, uint256 _deposits, uint256 _clWithdrawals) public { + _addReportData(_timestamp, _clBalance, _deposits, _clWithdrawals); } - function exitedValidatorsAtTimestamp(uint256 _timestamp) public view returns (uint256) { - return _exitedValidatorsAtTimestamp(_timestamp); + function exposeAccountingCorePackedLimits() public view returns (AccountingCoreLimitsPacked memory) { + return _accountingCoreLimitsPacked; } - function exposePackedLimits() public view returns (LimitsListPacked memory) { - return _limitsListPacked; + function exposeOperationalPackedLimits() public view returns (OperationalLimitsPacked memory) { + return _operationalLimitsPacked; } function packAndStore() public { - _limitsListPacked = getOracleReportLimits().pack(); + LimitsList memory limits = getOracleReportLimits(); + _accountingCoreLimitsPacked = limits.packAccountingCore(); + _operationalLimitsPacked = limits.packOperational(); + } + + function packRawLimits( + LimitsList memory _limitsList + ) external pure returns (AccountingCoreLimitsPacked memory, OperationalLimitsPacked memory) { + return (_limitsList.packAccountingCore(), _limitsList.packOperational()); + } + + function roundtripRawLimits(LimitsList memory _limitsList) external pure returns (LimitsList memory) { + return _limitsList.packAccountingCore().unpack(_limitsList.packOperational()); } } diff --git a/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol b/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol index 4d0235eb3d..673064e783 100644 --- a/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol +++ b/test/0.8.9/contracts/OracleReportSanityChecker__MockForAccounting.sol @@ -9,7 +9,7 @@ contract OracleReportSanityChecker__MockForAccounting { bool private checkSimulatedShareRateReverts; uint256 private _withdrawals; uint256 private _elRewards; - uint256 private _simulatedSharesToBurn; + uint256 private _sharesFromWQToBurn; uint256 private _sharesToBurn; error CheckAccountingOracleReportReverts(); @@ -18,13 +18,15 @@ contract OracleReportSanityChecker__MockForAccounting { function checkAccountingOracleReport( uint256, //_timeElapsed, - uint256, //_preCLBalance, - uint256, //_postCLBalance, + uint256, //_preCLValidatorsBalance, + uint256, //_preCLPendingBalance, + uint256, //_postCLValidatorsBalance, + uint256, //_postCLPendingBalance, uint256, //_withdrawalVaultBalance, uint256, //_elRewardsVaultBalance, uint256, //_sharesRequestedToBurn, - uint256, //_preCLValidators, - uint256 //_postCLValidators + uint256, //_deposits + uint256 //_withdrawalsVaultTransfer ) external view { if (checkAccountingOracleReportReverts) revert CheckAccountingOracleReportReverts(); } @@ -37,31 +39,27 @@ contract OracleReportSanityChecker__MockForAccounting { } function smoothenTokenRebase( - uint256, // _preTotalPooledEther, - uint256, // _preTotalShares, + uint256, // _preInternalEther, + uint256, // _preInternalShares, uint256, // _preCLBalance, uint256, // _postCLBalance, uint256, // _withdrawalVaultBalance, uint256, // _elRewardsVaultBalance, uint256, // _sharesRequestedToBurn, - uint256, // _etherToLockForWithdrawals, - uint256 // _newSharesToBurnForWithdrawals - ) - external - view - returns (uint256 withdrawals, uint256 elRewards, uint256 simulatedSharesToBurn, uint256 sharesToBurn) - { + uint256, // _etherToFinalizeWithdrawals, + uint256 // _sharesToBurnFromWithdrawalQueue + ) external view returns (uint256 withdrawals, uint256 elRewards, uint256 sharesFromWQToBurn, uint256 sharesToBurn) { withdrawals = _withdrawals; elRewards = _elRewards; - simulatedSharesToBurn = _simulatedSharesToBurn; + sharesFromWQToBurn = _sharesFromWQToBurn; sharesToBurn = _sharesToBurn; } function checkSimulatedShareRate( - uint256, // _postTotalPooledEther, - uint256, // _postTotalShares, - uint256, // _etherLockedOnWithdrawalQueue, - uint256, // _sharesBurntDueToWithdrawals, + uint256, // _postInternalEther, + uint256, // _postInternalShares, + uint256, // _etherToFinalizeWithdrawals, + uint256, // _sharesToBurnFromWithdrawalQueue, uint256 // _simulatedShareRate ) external view { if (checkSimulatedShareRateReverts) revert CheckSimulatedShareRateReverts(); @@ -84,12 +82,12 @@ contract OracleReportSanityChecker__MockForAccounting { function mock__smoothenTokenRebaseReturn( uint256 withdrawals, uint256 elRewards, - uint256 simulatedSharesToBurn, + uint256 sharesFromWQToBurn, uint256 sharesToBurn ) external { _withdrawals = withdrawals; _elRewards = elRewards; - _simulatedSharesToBurn = simulatedSharesToBurn; + _sharesFromWQToBurn = sharesFromWQToBurn; _sharesToBurn = sharesToBurn; } } diff --git a/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol b/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol new file mode 100644 index 0000000000..1613464d15 --- /dev/null +++ b/test/0.8.9/contracts/OracleReportSanityChecker__MockForExitBusWeights.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/// @notice Minimal mock to control MaxEB weights for ValidatorsExitBus tests +contract OracleReportSanityChecker__MockForExitBusWeights { + uint256 private _w1; + uint256 private _w2; + + constructor(uint256 w1, uint256 w2) { + _w1 = w1; + _w2 = w2; + } + + function setWeights(uint256 w1, uint256 w2) external { + _w1 = w1; + _w2 = w2; + } + + function getMaxEffectiveBalanceWeightWCType01() external view returns (uint256) { + return _w1; + } + + function getMaxEffectiveBalanceWeightWCType02() external view returns (uint256) { + return _w2; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockBadKeys.sol b/test/0.8.9/contracts/StakingModule__MockBadKeys.sol new file mode 100644 index 0000000000..deefcd0014 --- /dev/null +++ b/test/0.8.9/contracts/StakingModule__MockBadKeys.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/// @notice Minimal mock that returns configurable raw bytes for signing keys +contract StakingModule__MockBadKeys { + bytes private _returned; + + function setReturned(bytes calldata data) external { + _returned = data; + } + + function getSigningKeys( + uint256 /* nodeOpId */, + uint256 /* startIndex */, + uint256 /* keysCount */ + ) external view returns (bytes memory) { + return _returned; + } +} diff --git a/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol b/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol new file mode 100644 index 0000000000..da9c390eaa --- /dev/null +++ b/test/0.8.9/contracts/StakingModule__MockForKeyVerification.sol @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +/** + * @notice Universal mock for staking modules that returns any requested signing key + * @dev This mock implements both legacy (NOR, SDVT) and new (CSM, CuratedV2) interfaces + * and can be used for key verification testing in ValidatorsExitBus + */ +contract StakingModule__MockForKeyVerification { + // Storage: nodeOpId => keyIndex => pubkey (48 bytes) + mapping(uint256 => mapping(uint256 => bytes)) private _keys; + + /// @notice Configure a signing key for testing + /// @param nodeOpId Node operator ID + /// @param keyIndex Key index + /// @param pubkey Public key (48 bytes) + function setSigningKey(uint256 nodeOpId, uint256 keyIndex, bytes calldata pubkey) external { + require(pubkey.length == 48, "Invalid pubkey length"); + _keys[nodeOpId][keyIndex] = pubkey; + } + + /// @notice Legacy interface (NOR, SDVT): getSigningKeys returns pubkeys, signatures, and used flags + /// @param _nodeOperatorId Node operator ID + /// @param _offset Key index to start from + /// @param _limit Number of keys to return + /// @return pubkeys Concatenated public keys (48 bytes each) + /// @return signatures Empty (not needed for exit verification) + /// @return used Empty (not needed for exit verification) + function getSigningKeys( + uint256 _nodeOperatorId, + uint256 _offset, + uint256 _limit + ) external view returns (bytes memory pubkeys, bytes memory signatures, bool[] memory used) { + require(_limit == 1, "Mock only supports _limit=1"); + + bytes memory key = _keys[_nodeOperatorId][_offset]; + if (key.length == 0) { + // Permissive mode: generate a deterministic 48-byte key + // This allows tests to work without explicitly configuring every key + bytes32 hash1 = keccak256(abi.encode(_nodeOperatorId, _offset)); + bytes32 hash2 = keccak256(abi.encode(_nodeOperatorId, _offset, 1)); + key = new bytes(48); + assembly { + // Copy first 32 bytes from hash1 + mstore(add(key, 32), hash1) + // Copy next 16 bytes from hash2 (total 48 bytes) + mstore(add(key, 64), hash2) + } + } + + pubkeys = key; + signatures = new bytes(0); + used = new bool[](1); + } +} diff --git a/test/0.8.9/contracts/StakingRouter__Harness.sol b/test/0.8.9/contracts/StakingRouter__Harness.sol deleted file mode 100644 index 054a39b452..0000000000 --- a/test/0.8.9/contracts/StakingRouter__Harness.sol +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -// for testing purposes only - -pragma solidity 0.8.9; - -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; -import {UnstructuredStorage} from "contracts/0.8.9/lib/UnstructuredStorage.sol"; - -contract StakingRouter__Harness is StakingRouter { - using UnstructuredStorage for bytes32; - - constructor(address _depositContract) StakingRouter(_depositContract) {} - - function getStakingModuleIndexById(uint256 _stakingModuleId) external view returns (uint256) { - return _getStakingModuleIndexById(_stakingModuleId); - } - - function getStakingModuleByIndex(uint256 _stakingModuleIndex) external view returns (StakingModule memory) { - return _getStakingModuleByIndex(_stakingModuleIndex); - } - - function testing_setBaseVersion(uint256 version) external { - CONTRACT_VERSION_POSITION.setStorageUint256(version); - } - - function testing_setStakingModuleStatus(uint256 _stakingModuleId, StakingModuleStatus _status) external { - StakingModule storage stakingModule = _getStakingModuleByIndex(_getStakingModuleIndexById(_stakingModuleId)); - _setStakingModuleStatus(stakingModule, _status); - } -} diff --git a/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol b/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol index 83111db9a3..7089be6785 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForAccountingOracle.sol @@ -6,6 +6,8 @@ pragma solidity 0.8.9; import {IStakingRouter} from "contracts/0.8.9/oracle/AccountingOracle.sol"; contract StakingRouter__MockForAccountingOracle is IStakingRouter { + error InvalidValidatorBalancesReport(); + struct UpdateExitedKeysByModuleCallData { uint256[] moduleIds; uint256[] exitedKeysCounts; @@ -19,6 +21,13 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { } mapping(uint256 => uint256) internal _exitedKeysCountsByModuleId; + mapping(uint256 => uint256) internal _moduleBalancesWei; + mapping(uint256 => uint64) internal _validatorBalancesGweiByModuleId; + mapping(uint256 => uint64) internal _pendingBalancesGweiByModuleId; + mapping(uint256 => bool) internal _moduleExistsById; + uint256[] internal _registeredModuleIds; + + uint256 internal _totalStakingModulesBalanceWei; UpdateExitedKeysByModuleCallData internal _lastCall_updateExitedKeysByModule; @@ -38,6 +47,15 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { /// IStakingRouter /// + function mock__registerStakingModule(uint256 moduleId) external { + if (_moduleExistsById[moduleId]) { + return; + } + + _moduleExistsById[moduleId] = true; + _registeredModuleIds.push(moduleId); + } + function updateExitedValidatorsCountByStakingModule( uint256[] calldata moduleIds, uint256[] calldata exitedKeysCounts @@ -52,11 +70,60 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { uint256 moduleId = moduleIds[i]; newlyExitedValidatorsCount += exitedKeysCounts[i] - _exitedKeysCountsByModuleId[moduleId]; _exitedKeysCountsByModuleId[moduleId] = exitedKeysCounts[i]; + _moduleExistsById[moduleId] = true; } return newlyExitedValidatorsCount; } + function reportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external { + this.validateReportValidatorBalancesByStakingModule(_stakingModuleIds, _validatorBalancesGwei); + + uint256 totalBalance = _totalStakingModulesBalanceWei; + for (uint256 i = 0; i < _stakingModuleIds.length; ++i) { + uint256 moduleId = _stakingModuleIds[i]; + uint256 previousBalance = _moduleBalancesWei[moduleId]; + uint256 currentBalance = (_validatorBalancesGwei[i]) * 1 gwei; + + if (currentBalance >= previousBalance) { + totalBalance += currentBalance - previousBalance; + } else { + totalBalance -= previousBalance - currentBalance; + } + + _moduleBalancesWei[moduleId] = currentBalance; + _validatorBalancesGweiByModuleId[moduleId] = uint64(_validatorBalancesGwei[i]); + _moduleExistsById[moduleId] = true; + } + _totalStakingModulesBalanceWei = totalBalance; + } + + function validateReportValidatorBalancesByStakingModule( + uint256[] calldata _stakingModuleIds, + uint256[] calldata _validatorBalancesGwei + ) external view { + uint256 modulesCount = _registeredModuleIds.length; + if (_stakingModuleIds.length != modulesCount || _validatorBalancesGwei.length != modulesCount) { + revert InvalidValidatorBalancesReport(); + } + + for (uint256 i = 0; i < modulesCount; ++i) { + if (_stakingModuleIds[i] != _registeredModuleIds[i]) { + revert InvalidValidatorBalancesReport(); + } + if (_validatorBalancesGwei[i] > type(uint64).max) { + revert InvalidValidatorBalancesReport(); + } + } + } + + function getDepositAmountFromLastSlot(uint256) external view returns (uint256) { + return 0; + } + function reportStakingModuleExitedValidatorsCountByNodeOperator( uint256 stakingModuleId, bytes calldata nodeOperatorIds, @@ -70,4 +137,22 @@ contract StakingRouter__MockForAccountingOracle is IStakingRouter { function onValidatorsCountsByNodeOperatorReportingFinished() external { ++totalCalls_onValidatorsCountsByNodeOperatorReportingFinished; } + + function getModuleValidatorsBalance(uint256 moduleId) external view returns (uint256) { + return _moduleBalancesWei[moduleId]; + } + + function hasStakingModule(uint256 moduleId) external view returns (bool) { + return _moduleExistsById[moduleId]; + } + + function getStakingModuleStateAccounting( + uint256 moduleId + ) external view returns (uint64 validatorsBalanceGwei, uint64 exitedValidatorsCount) { + return (_validatorBalancesGweiByModuleId[moduleId], uint64(_exitedKeysCountsByModuleId[moduleId])); + } + + function getTotalModulesValidatorsBalance() external view returns (uint256) { + return _totalStakingModulesBalanceWei; + } } diff --git a/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol b/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol index d489dd29e3..d812dfad9c 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForDepositSecurityModule.sol @@ -1,10 +1,25 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; -import {IStakingRouter} from "contracts/0.8.9/DepositSecurityModule.sol"; -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; +import {StakingModuleStatus} from "contracts/0.8.25/sr/SRTypes.sol"; + +interface IStakingRouter { + function getStakingModuleMinDepositBlockDistance(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleMaxDepositsPerBlock(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleIsActive(uint256 _stakingModuleId) external view returns (bool); + function getStakingModuleNonce(uint256 _stakingModuleId) external view returns (uint256); + function getStakingModuleLastDepositBlock(uint256 _stakingModuleId) external view returns (uint256); + function hasStakingModule(uint256 _stakingModuleId) external view returns (bool); + function decreaseStakingModuleVettedKeysCountByNodeOperator( + uint256 _stakingModuleId, + bytes calldata _nodeOperatorIds, + bytes calldata _vettedSigningKeysCounts + ) external; + function deposit(uint256 _stakingModuleId, bytes calldata _depositCalldata) external; + function canDeposit(uint256 _stakingModuleId) external view returns (bool); +} contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { error StakingModuleUnregistered(); @@ -14,17 +29,14 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { bytes nodeOperatorIds, bytes vettedSigningKeysCounts ); - event StakingModuleDeposited(uint256 maxDepositsCount, uint24 stakingModuleId, bytes depositCalldata); - event StakingModuleStatusSet( - uint24 indexed stakingModuleId, - StakingRouter.StakingModuleStatus status, - address setBy - ); + event StakingModuleDeposited(uint24 stakingModuleId, bytes depositCalldata); + event StakingModuleStatusSet(uint24 indexed stakingModuleId, StakingModuleStatus status, address setBy); - StakingRouter.StakingModuleStatus private status; + StakingModuleStatus private status; uint256 private stakingModuleNonce; uint256 private stakingModuleLastDepositBlock; uint256 private stakingModuleMaxDepositsPerBlock; + uint256 private stakingModuleMaxDepositsAmountPerBlock; uint256 private stakingModuleMinDepositBlockDistance; uint256 private registeredStakingModuleId; @@ -32,13 +44,15 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { registeredStakingModuleId = stakingModuleId; } + function receiveDepositableEther() external payable { + // Mock function to receive ETH from Lido.withdrawDepositableEther + } + function deposit( - uint256 maxDepositsCount, uint256 stakingModuleId, bytes calldata depositCalldata - ) external payable whenModuleIsRegistered(stakingModuleId) returns (uint256 keysCount) { - emit StakingModuleDeposited(maxDepositsCount, uint24(stakingModuleId), depositCalldata); - return maxDepositsCount; + ) external whenModuleIsRegistered(stakingModuleId) { + emit StakingModuleDeposited(uint24(stakingModuleId), depositCalldata); } function decreaseStakingModuleVettedKeysCountByNodeOperator( @@ -55,13 +69,13 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { function getStakingModuleStatus( uint256 stakingModuleId - ) external view whenModuleIsRegistered(stakingModuleId) returns (StakingRouter.StakingModuleStatus) { + ) external view whenModuleIsRegistered(stakingModuleId) returns (StakingModuleStatus) { return status; } function setStakingModuleStatus( uint256 _stakingModuleId, - StakingRouter.StakingModuleStatus _status + StakingModuleStatus _status ) external whenModuleIsRegistered(_stakingModuleId) { emit StakingModuleStatusSet(uint24(_stakingModuleId), _status, msg.sender); status = _status; @@ -70,19 +84,23 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { function getStakingModuleIsStopped( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.Stopped; + return status == StakingModuleStatus.Stopped; } function getStakingModuleIsDepositsPaused( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.DepositsPaused; + return status == StakingModuleStatus.DepositsPaused; + } + + function canDeposit(uint256 _stakingModuleId) external view returns (bool) { + return hasStakingModule(_stakingModuleId) && status == StakingModuleStatus.Active; } function getStakingModuleIsActive( uint256 stakingModuleId ) external view whenModuleIsRegistered(stakingModuleId) returns (bool) { - return status == StakingRouter.StakingModuleStatus.Active; + return status == StakingModuleStatus.Active; } function getStakingModuleNonce( @@ -111,6 +129,12 @@ contract StakingRouter__MockForDepositSecurityModule is IStakingRouter { return stakingModuleMaxDepositsPerBlock; } + function getStakingModuleMaxDepositsAmountPerBlock( + uint256 stakingModuleId + ) external view whenModuleIsRegistered(stakingModuleId) returns (uint256) { + return stakingModuleMaxDepositsAmountPerBlock; + } + function setStakingModuleMaxDepositsPerBlock(uint256 value) external { stakingModuleMaxDepositsPerBlock = value; } diff --git a/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol b/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol index e998d50755..9983c1c931 100644 --- a/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol +++ b/test/0.8.9/contracts/StakingRouter__MockForSanityChecker.sol @@ -1,19 +1,20 @@ // SPDX-License-Identifier: UNLICENSED // for testing purposes only -pragma solidity 0.8.9; +pragma solidity 0.8.25; -import {StakingRouter} from "contracts/0.8.9/StakingRouter.sol"; +import {StakingModule} from "contracts/0.8.25/sr/SRTypes.sol"; contract StakingRouter__MockForSanityChecker { - mapping(uint256 => StakingRouter.StakingModule) private modules; + mapping(uint256 => StakingModule) private modules; + mapping(uint256 => bool) private moduleExistsById; uint256[] private moduleIds; constructor() {} function mock__addStakingModuleExitedValidators(uint24 moduleId, uint256 exitedValidators) external { - StakingRouter.StakingModule memory module = StakingRouter.StakingModule( + StakingModule memory module = StakingModule( moduleId, address(0), 0, @@ -26,9 +27,12 @@ contract StakingRouter__MockForSanityChecker { exitedValidators, 0, 0, + 0, + 1, // wcType 0 ); modules[moduleId] = module; + moduleExistsById[moduleId] = true; moduleIds.push(moduleId); } @@ -42,13 +46,25 @@ contract StakingRouter__MockForSanityChecker { break; } } + delete modules[moduleId]; + delete moduleExistsById[moduleId]; } function getStakingModuleIds() external view returns (uint256[] memory) { return moduleIds; } - function getStakingModule(uint256 stakingModuleId) public view returns (StakingRouter.StakingModule memory module) { + function getStakingModule(uint256 stakingModuleId) public view returns (StakingModule memory module) { return modules[stakingModuleId]; } + + function hasStakingModule(uint256 stakingModuleId) external view returns (bool) { + return moduleExistsById[stakingModuleId]; + } + + function getStakingModuleStateAccounting( + uint256 stakingModuleId + ) external view returns (uint64 validatorsBalanceGwei, uint64 pendingBalanceGwei, uint64 exitedValidatorsCount) { + return (0, 0, uint64(modules[stakingModuleId].exitedValidatorsCount)); + } } diff --git a/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol b/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol new file mode 100644 index 0000000000..91190e9258 --- /dev/null +++ b/test/0.8.9/contracts/StakingRouter__MockForValidatorsExitBus.sol @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only + +pragma solidity 0.8.9; + +import {IStakingRouter} from "contracts/0.8.9/oracle/ValidatorsExitBus.sol"; + +contract StakingRouter__MockForValidatorsExitBus is IStakingRouter { + error StakingModuleUnregistered(); + + struct StakingModuleData { + uint24 id; + address stakingModuleAddress; + uint16 stakingModuleFee; + uint16 treasuryFee; + uint16 stakeShareLimit; + uint8 status; + string name; + uint64 lastDepositAt; + uint256 lastDepositBlock; + uint256 exitedValidatorsCount; + uint16 priorityExitShareThreshold; + uint64 maxDepositsPerBlock; + uint64 minDepositBlockDistance; + uint8 withdrawalCredentialsType; + } + + mapping(uint256 => StakingModuleData) internal _modules; + mapping(uint256 => bool) internal _moduleConfigured; + + /// @notice Mock function to set up module configuration for tests + /// @param moduleId The module ID + /// @param withdrawalCredentialsType The withdrawal credentials type (0x01 or 0x02) + function setStakingModuleWithdrawalCredentialsType(uint256 moduleId, uint8 withdrawalCredentialsType) external { + _modules[moduleId].id = uint24(moduleId); + _modules[moduleId].withdrawalCredentialsType = withdrawalCredentialsType; + _moduleConfigured[moduleId] = true; + // Set a placeholder address - tests can override with setStakingModuleAddress if needed + if (_modules[moduleId].stakingModuleAddress == address(0)) { + _modules[moduleId].stakingModuleAddress = address(uint160(moduleId + 0x1000)); + } + } + + /// @notice Mock function to set staking module address + /// @param moduleId The module ID + /// @param moduleAddress The module address + function setStakingModuleAddress(uint256 moduleId, address moduleAddress) external { + _modules[moduleId].stakingModuleAddress = moduleAddress; + _moduleConfigured[moduleId] = true; + } + + function getStakingModuleStateConfig( + uint256 _stakingModuleId + ) external view returns (ModuleStateConfig memory stateConfig) { + _validateModuleId(_stakingModuleId); + StakingModuleData memory data = _modules[_stakingModuleId]; + return + ModuleStateConfig({ + moduleAddress: data.stakingModuleAddress, + moduleFee: data.stakingModuleFee, + treasuryFee: data.treasuryFee, + stakeShareLimit: data.stakeShareLimit, + priorityExitShareThreshold: data.priorityExitShareThreshold, + status: data.status, + withdrawalCredentialsType: data.withdrawalCredentialsType + }); + } + + function _validateModuleId(uint256 _moduleId) internal view { + /// @dev require module configured and non-zero id + if (_moduleId == 0 || !_moduleConfigured[_moduleId]) { + revert StakingModuleUnregistered(); + } + } + + // Stub implementations for other IStakingRouter methods (not used in ValidatorsExitBus) + function updateExitedValidatorsCountByStakingModule( + uint256[] calldata, + uint256[] calldata + ) external pure returns (uint256) { + revert("Not implemented"); + } + + function getDepositAmountFromLastSlot(uint256) external pure returns (uint256) { + revert("Not implemented"); + } + + function reportStakingModuleExitedValidatorsCountByNodeOperator( + uint256, + bytes calldata, + bytes calldata + ) external pure { + revert("Not implemented"); + } + + function onValidatorsCountsByNodeOperatorReportingFinished() external pure { + revert("Not implemented"); + } +} diff --git a/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol b/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol index c7382f9ad8..cd12f64932 100644 --- a/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol +++ b/test/0.8.9/contracts/ValidatorsExitBus__Harness.sol @@ -58,4 +58,28 @@ contract ValidatorsExitBus__Harness is ValidatorsExitBusOracle, ITimeProvider { function getRequestStatus(bytes32 exitRequestHash) external view returns (RequestStatus memory requestStatus) { requestStatus = _storageRequestStatus()[exitRequestHash]; } + + // Expose internal function for unit testing + function calculateTotalExitBalanceEth(bytes calldata data, uint256 dataFormat) external view returns (uint256) { + return _calculateTotalExitBalanceEth(data, dataFormat); + } + + /// @notice Expose base timestamp calculation (without consensus override) for coverage + function callBaseTimestamp() external view returns (uint32) { + return super._getTimestamp(); + } + + /// @notice Expose internal validator data decoder for coverage of unsupported formats + function callGetValidatorData( + bytes calldata data, + uint256 dataFormat, + uint256 index + ) external pure returns (ValidatorData memory) { + return _getValidatorData(data, dataFormat, index); + } + + /// @notice Expose internal dispatcher for direct branch coverage + function callProcessExitRequestsList(bytes calldata data, uint256 dataFormat) external { + _processExitRequestsList(data, dataFormat); + } } diff --git a/test/0.8.9/contracts/VaultHub__MockForAccReport.sol b/test/0.8.9/contracts/VaultHub__MockForAccReport.sol new file mode 100644 index 0000000000..e4eb7262ee --- /dev/null +++ b/test/0.8.9/contracts/VaultHub__MockForAccReport.sol @@ -0,0 +1,29 @@ +// SPDX-FileCopyrightText: 2025 Lido +// SPDX-License-Identifier: GPL-3.0 +pragma solidity 0.8.9; + +import {IVaultHub} from "contracts/common/interfaces/IVaultHub.sol"; + +contract VaultHub__MockForAccountingReport is IVaultHub { + uint256 private badDebtToInternalize_; + + function mock__badDebtToInternalize() external view returns (uint256) { + return badDebtToInternalize_; + } + + function setBadDebtToInternalize(uint256 _badDebt) external { + badDebtToInternalize_ = _badDebt; + } + + function decreaseInternalizedBadDebt(uint256 _badDebt) external { + badDebtToInternalize_ -= _badDebt; + } + + function badDebtToInternalize() external view override returns (uint256) { + return badDebtToInternalize_; + } + + function badDebtToInternalizeForLastRefSlot() external view override returns (uint256) { + return badDebtToInternalize_; + } +} diff --git a/test/0.8.9/contracts/WithdrawalVault__Harness.sol b/test/0.8.9/contracts/WithdrawalVault__Harness.sol index 8bbefb2f82..59dfb326c2 100644 --- a/test/0.8.9/contracts/WithdrawalVault__Harness.sol +++ b/test/0.8.9/contracts/WithdrawalVault__Harness.sol @@ -9,8 +9,20 @@ contract WithdrawalVault__Harness is WithdrawalVault { constructor( address _lido, address _treasury, - address _triggerableWithdrawalsGateway - ) WithdrawalVault(_lido, _treasury, _triggerableWithdrawalsGateway) {} + address _triggerableWithdrawalsGateway, + address _consolidationGateway, + address _withdrawalRequest, + address _consolidationRequest + ) + WithdrawalVault( + _lido, + _treasury, + _triggerableWithdrawalsGateway, + _consolidationGateway, + _withdrawalRequest, + _consolidationRequest + ) + {} function harness__initializeContractVersionTo(uint256 _version) external { _initializeContractVersionTo(_version); diff --git a/test/0.8.9/depositSecurityModule.test.ts b/test/0.8.9/depositSecurityModule.test.ts index 28dec79a02..749fd9defb 100644 --- a/test/0.8.9/depositSecurityModule.test.ts +++ b/test/0.8.9/depositSecurityModule.test.ts @@ -30,6 +30,7 @@ import { Snapshot } from "test/suite"; const UNREGISTERED_STAKING_MODULE_ID = 1; const STAKING_MODULE_ID = 100; const MAX_DEPOSITS_PER_BLOCK = 100; +// const MAX_DEPOSITS_AMOUNT_PER_BLOCK_WEI = BigInt(MAX_DEPOSITS_PER_BLOCK) * parseEther("32"); const MIN_DEPOSIT_BLOCK_DISTANCE = 14; const PAUSE_INTENT_VALIDITY_PERIOD_BLOCKS = 10; const MAX_OPERATORS_PER_UNVETTING = 20; @@ -169,8 +170,11 @@ describe("DepositSecurityModule.sol", () => { expect(minDepositBlockDistance).to.equal(MIN_DEPOSIT_BLOCK_DISTANCE); await stakingRouter.setStakingModuleMaxDepositsPerBlock(MAX_DEPOSITS_PER_BLOCK); + // await stakingRouter.setStakingModuleMaxDepositsAmountPerBlock(MAX_DEPOSITS_AMOUNT_PER_BLOCK_WEI); const maxDepositsPerBlock = await stakingRouter.getStakingModuleMaxDepositsPerBlock(STAKING_MODULE_ID); expect(maxDepositsPerBlock).to.equal(MAX_DEPOSITS_PER_BLOCK); + // const maxDepositsAmountPerBlock = await stakingRouter.getStakingModuleMaxDepositsAmountPerBlock(STAKING_MODULE_ID); + // expect(maxDepositsAmountPerBlock).to.equal(MAX_DEPOSITS); await depositContract.set_deposit_root(DEPOSIT_ROOT); expect(await depositContract.get_deposit_root()).to.equal(DEPOSIT_ROOT); @@ -1173,9 +1177,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); }); @@ -1240,9 +1242,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian2, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (0,1)", async () => { @@ -1254,9 +1254,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian2], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (0,2)", async () => { @@ -1268,9 +1266,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian1, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); it("Allow deposit if deposit with guardian's sigs (1,2)", async () => { @@ -1282,9 +1278,7 @@ describe("DepositSecurityModule.sol", () => { const depositCalldata = encodeBytes32String(""); const tx = await deposit([guardian2, guardian3], { depositCalldata }); - await expect(tx) - .to.emit(lido, "StakingModuleDeposited") - .withArgs(MAX_DEPOSITS_PER_BLOCK, STAKING_MODULE_ID, depositCalldata); + await expect(tx).to.emit(stakingRouter, "StakingModuleDeposited").withArgs(STAKING_MODULE_ID, depositCalldata); }); }); }); diff --git a/test/0.8.9/lidoLocator.test.ts b/test/0.8.9/lidoLocator.test.ts index 00a375baf9..928cf84839 100644 --- a/test/0.8.9/lidoLocator.test.ts +++ b/test/0.8.9/lidoLocator.test.ts @@ -21,6 +21,7 @@ const services = [ "oracleDaemonConfig", "validatorExitDelayVerifier", "triggerableWithdrawalsGateway", + "consolidationGateway", "accounting", "predepositGuarantee", "wstETH", @@ -28,8 +29,7 @@ const services = [ "vaultFactory", "lazyOracle", "operatorGrid", - "vaultFactory", - "lazyOracle", + "topUpGateway", ] as const; type ArrayToUnion = A[number]; diff --git a/test/0.8.9/oracle/accountingOracle.accessControl.test.ts b/test/0.8.9/oracle/accountingOracle.accessControl.test.ts index 897cfd9d6e..550ddb0de2 100644 --- a/test/0.8.9/oracle/accountingOracle.accessControl.test.ts +++ b/test/0.8.9/oracle/accountingOracle.accessControl.test.ts @@ -66,10 +66,12 @@ describe("AccountingOracle.sol:accessControl", () => { reportFields = { consensusVersion: AO_CONSENSUS_VERSION, refSlot: refSlot, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), diff --git a/test/0.8.9/oracle/accountingOracle.happyPath.test.ts b/test/0.8.9/oracle/accountingOracle.happyPath.test.ts index c13ac0028c..2d5725b2c4 100644 --- a/test/0.8.9/oracle/accountingOracle.happyPath.test.ts +++ b/test/0.8.9/oracle/accountingOracle.happyPath.test.ts @@ -132,10 +132,12 @@ describe("AccountingOracle.sol:happyPath", () => { reportFields = { consensusVersion: AO_CONSENSUS_VERSION, refSlot: refSlot, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -222,14 +224,12 @@ describe("AccountingOracle.sol:happyPath", () => { it("Accounting got the oracle report", async () => { const lastOracleReportCall = await mockAccounting.lastCall__handleOracleReport(); expect(lastOracleReportCall.callCount).to.equal(1); - expect(lastOracleReportCall.arg.timeElapsed).to.equal( - (reportFields.refSlot - ORACLE_LAST_REPORT_SLOT) * SECONDS_PER_SLOT, - ); - expect(lastOracleReportCall.arg.clValidators).to.equal(reportFields.numValidators); - expect(lastOracleReportCall.arg.clBalance).to.equal(BigInt(reportFields.clBalanceGwei) * ONE_GWEI); - expect(lastOracleReportCall.arg.withdrawalVaultBalance).to.equal(reportFields.withdrawalVaultBalance); - expect(lastOracleReportCall.arg.elRewardsVaultBalance).to.equal(reportFields.elRewardsVaultBalance); - expect(lastOracleReportCall.arg.withdrawalFinalizationBatches.map(Number)).to.have.ordered.members( + expect(lastOracleReportCall.arg[1]).to.equal((reportFields.refSlot - ORACLE_LAST_REPORT_SLOT) * SECONDS_PER_SLOT); + expect(lastOracleReportCall.arg[2]).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastOracleReportCall.arg[3]).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + expect(lastOracleReportCall.arg[4]).to.equal(reportFields.withdrawalVaultBalance); + expect(lastOracleReportCall.arg[5]).to.equal(reportFields.elRewardsVaultBalance); + expect(lastOracleReportCall.arg[7].map(Number)).to.have.ordered.members( reportFields.withdrawalFinalizationBatches.map(Number), ); }); diff --git a/test/0.8.9/oracle/accountingOracle.submitReport.test.ts b/test/0.8.9/oracle/accountingOracle.submitReport.test.ts index 0fdf42e515..f3881b8737 100644 --- a/test/0.8.9/oracle/accountingOracle.submitReport.test.ts +++ b/test/0.8.9/oracle/accountingOracle.submitReport.test.ts @@ -10,6 +10,7 @@ import { Accounting__MockForAccountingOracle, AccountingOracle__Harness, HashConsensus__Harness, + Lido__MockForAccounting, OracleReportSanityChecker, StakingRouter__MockForAccountingOracle, WithdrawalQueue__MockForAccountingOracle, @@ -47,6 +48,7 @@ describe("AccountingOracle.sol:submitReport", () => { let extraDataItems: string[]; let oracleVersion: bigint; let deadline: BigNumberish; + let mockLido: Lido__MockForAccounting; let mockStakingRouter: StakingRouter__MockForAccountingOracle; let extraData: ExtraDataType; let mockAccounting: Accounting__MockForAccountingOracle; @@ -61,10 +63,12 @@ describe("AccountingOracle.sol:submitReport", () => { const getReportFields = (override = {}) => ({ consensusVersion: AO_CONSENSUS_VERSION, refSlot: 0n, - numValidators: 10n, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -105,6 +109,7 @@ describe("AccountingOracle.sol:submitReport", () => { oracle = deployed.oracle; consensus = deployed.consensus; + mockLido = deployed.lido; mockStakingRouter = deployed.stakingRouter; mockAccounting = deployed.accounting; sanityChecker = deployed.oracleReportSanityChecker; @@ -351,7 +356,7 @@ describe("AccountingOracle.sol:submitReport", () => { it("reverts with UnexpectedDataHash", async () => { const incorrectReportFields = { ...reportFields, - numValidators: Number(reportFields.numValidators) - 1, + clValidatorsBalanceGwei: getBigInt(reportFields.clValidatorsBalanceGwei) - ONE_GWEI, }; const incorrectReportItems = getReportDataItems(incorrectReportFields); @@ -424,24 +429,32 @@ describe("AccountingOracle.sol:submitReport", () => { ).to.be.revertedWithCustomError(oracle, "InvalidExitedValidatorsData"); }); - it("reverts with ExitedValidatorsLimitExceeded if exited validators rate limit will be reached", async () => { - // Really simple test here for now - // TODO: Come up with more tests for better coverage of edge-case scenarios that can be accrued - // during calculation `exitedValidatorsPerDay` rate in AccountingOracle:612 - const totalExitedValidators = reportFields.numExitedValidatorsByStakingModule.reduce( - (sum: BigNumberish, curr: BigNumberish) => getBigInt(sum) + getBigInt(curr), - 0, + it("reverts with ExitedEthAmountPerDayLimitExceeded if exited ETH amount per day limit is reached", async () => { + const totalExitedValidators: bigint = reportFields.numExitedValidatorsByStakingModule.reduce( + (sum, curr) => sum + getBigInt(curr), + 0n, ); - const exitingRateLimit = getBigInt(totalExitedValidators) - 1n; + const exitingRateLimit = 0n; await sanityChecker.grantRole( - await sanityChecker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await sanityChecker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), admin.address, ); - await sanityChecker.setExitedValidatorsPerDayLimit(exitingRateLimit); - expect((await sanityChecker.getOracleReportLimits()).exitedValidatorsPerDayLimit).to.equal(exitingRateLimit); + await sanityChecker.setExitedEthAmountPerDayLimit(exitingRateLimit); + + const limits = await sanityChecker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(exitingRateLimit); + + const refSlotDelta = reportFields.refSlot - (await oracle.getLastProcessingRefSlot()); + const timeElapsed = refSlotDelta * SECONDS_PER_SLOT; + const exitedEthAmount = totalExitedValidators * limits.exitedValidatorEthAmountLimit * 10n ** 18n; + const exitedEthAmountPerDay = + timeElapsed === 0n ? exitedEthAmount * 86_400n : (exitedEthAmount * 86_400n) / timeElapsed; + const exitedEthAmountPerDayLimitWithConsolidation = + (limits.exitedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * 10n ** 18n; + await expect(oracle.connect(member1).submitReportData(reportFields, oracleVersion)) - .to.be.revertedWithCustomError(sanityChecker, "ExitedValidatorsLimitExceeded") - .withArgs(exitingRateLimit, totalExitedValidators); + .to.be.revertedWithCustomError(sanityChecker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(exitedEthAmountPerDayLimitWithConsolidation, exitedEthAmountPerDay); }); }); @@ -463,7 +476,12 @@ describe("AccountingOracle.sol:submitReport", () => { GENESIS_TIME + reportFields.refSlot * SECONDS_PER_SLOT, ); - expect(lastOracleReportToAccounting.arg.clBalance).to.equal(reportFields.clBalanceGwei + "000000000"); + expect(lastOracleReportToAccounting.arg.clValidatorsBalance).to.equal( + reportFields.clValidatorsBalanceGwei + "000000000", + ); + expect(lastOracleReportToAccounting.arg.clPendingBalance).to.equal( + reportFields.clPendingBalanceGwei + "000000000", + ); expect(lastOracleReportToAccounting.arg.withdrawalVaultBalance).to.equal(reportFields.withdrawalVaultBalance); expect(lastOracleReportToAccounting.arg.elRewardsVaultBalance).to.equal(reportFields.elRewardsVaultBalance); expect(lastOracleReportToAccounting.arg.withdrawalFinalizationBatches.map(Number)).to.have.ordered.members( @@ -640,5 +658,212 @@ describe("AccountingOracle.sol:submitReport", () => { expect(data.dataHash).to.equal(reportFields.extraDataHash); }); }); + + context("Balance-based accounting", () => { + it("should revert with InvalidClBalancesData if a staking module id does not exist", async () => { + const { newReportFields } = await prepareNextReportInNextFrame( + getReportFields({ + stakingModuleIdsWithUpdatedBalance: [999], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], + }), + ); + + await expect( + oracle.connect(member1).submitReportData(newReportFields, oracleVersion), + ).to.be.revertedWithCustomError(mockStakingRouter, "InvalidValidatorBalancesReport"); + }); + + it("should accept different balance values", async () => { + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(reportFields, oracleVersion)).not.to.be.reverted; + }); + + it("should process balance data correctly", async () => { + expect((await mockAccounting.lastCall__handleOracleReport()).callCount).to.equal(0); + + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + expect(lastCall.callCount).to.equal(1); + expect(lastCall.arg.clValidatorsBalance).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastCall.arg.clPendingBalance).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + }); + + it("should accept zero active balance", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + // Router mock stores validators balance only; pending is seeded on the Lido mock. + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(64n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 64n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [0n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept zero pending balance", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + // Seed the previous router balances to the target values; this case checks zero pending itself, not one-frame growth. + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [1000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(1000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(0n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 1000n * ONE_GWEI, + clPendingBalanceGwei: 0n, + validatorBalancesGweiByStakingModule: [1000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept large balance values", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [60000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(60000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(5000n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 60000n * ONE_GWEI, + clPendingBalanceGwei: 5000n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [60000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should handle pending larger than active", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(500n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 100n * ONE_GWEI, + clPendingBalanceGwei: 500n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [100n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should convert gwei to wei correctly", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [300n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(300n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(456n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 123n * ONE_GWEI, + clPendingBalanceGwei: 456n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [123n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + expect(lastCall.arg.clValidatorsBalance).to.equal(123n * ONE_GWEI * 1000000000n); + expect(lastCall.arg.clPendingBalance).to.equal(456n * ONE_GWEI * 1000000000n); + }); + + it("should accept both balances zero", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 0n, + clPendingBalanceGwei: 0n, + validatorBalancesGweiByStakingModule: [0n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should accept minimal gwei values", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 1n, + clPendingBalanceGwei: 1n, + validatorBalancesGweiByStakingModule: [1n], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should handle realistic scenarios", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await mockStakingRouter.reportValidatorBalancesByStakingModule([1], [30000n * ONE_GWEI]); + await mockLido.mock__setClValidatorsBalance(30000n * 10n ** 18n); + await mockLido.mock__setClPendingBalance(1000n * 10n ** 18n); + + const nextReport = await prepareNextReportInNextFrame( + getReportFields({ + clValidatorsBalanceGwei: 30000n * ONE_GWEI, + clPendingBalanceGwei: 1000n * ONE_GWEI, + validatorBalancesGweiByStakingModule: [30000n * ONE_GWEI], + }), + ); + + await consensus.setTime(deadline); + await expect(oracle.connect(member1).submitReportData(nextReport.newReportFields, oracleVersion)).not.to.be + .reverted; + }); + + it("should verify ReportValues structure", async () => { + await consensus.setTime(deadline); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + + const lastCall = await mockAccounting.lastCall__handleOracleReport(); + + expect(lastCall.arg).to.be.an("array"); + expect(lastCall.arg).to.have.length(9); + expect(lastCall.arg[0]).to.be.a("bigint"); + expect(lastCall.arg[1]).to.be.a("bigint"); + expect(lastCall.arg[2]).to.be.a("bigint"); + expect(lastCall.arg[3]).to.be.a("bigint"); + expect(lastCall.arg[2]).to.equal(BigInt(reportFields.clValidatorsBalanceGwei) * 1000000000n); + expect(lastCall.arg[3]).to.equal(BigInt(reportFields.clPendingBalanceGwei) * 1000000000n); + }); + }); }); }); diff --git a/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts b/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts index bb1276ec89..329794f6ab 100644 --- a/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts +++ b/test/0.8.9/oracle/accountingOracle.submitReportExtraData.test.ts @@ -50,10 +50,12 @@ const getDefaultExtraData = (): ExtraDataType => ({ const getDefaultReportFields = (override = {}) => ({ consensusVersion: AO_CONSENSUS_VERSION, refSlot: 0, - numValidators: 10, - clBalanceGwei: 320n * ONE_GWEI, + clValidatorsBalanceGwei: 300n * ONE_GWEI, + clPendingBalanceGwei: 20n * ONE_GWEI, stakingModuleIdsWithNewlyExitedValidators: [1], numExitedValidatorsByStakingModule: [3], + stakingModuleIdsWithUpdatedBalance: [1], + validatorBalancesGweiByStakingModule: [300n * ONE_GWEI], withdrawalVaultBalance: ether("1"), elRewardsVaultBalance: ether("2"), sharesRequestedToBurn: ether("3"), @@ -836,62 +838,65 @@ describe("AccountingOracle.sol:submitReportExtraData", () => { }); }); - context("checks data type for UnsupportedExtraDataType reverts (only supported types are `1` and `2`)", () => { - // contextual helper to prepeare wrong typed data - const getExtraWithCustomType = (typeCustom: bigint) => { - const extraData = { - exitedKeys: [{ moduleId: 1, nodeOpIds: [1], keysCounts: [2] }], - }; - const item = extraData.exitedKeys[0]; - const extraDataItems = []; - extraDataItems.push(encodeExtraDataItem(0, typeCustom, item.moduleId, item.nodeOpIds, item.keysCounts)); - return { - extraData, - extraDataItems, - wrongTypedIndex: 0, - typeCustom, + context( + "checks data type for UnsupportedExtraDataType reverts (only supported type is `2` and `1` is deprecated)", + () => { + // contextual helper to prepeare wrong typed data + const getExtraWithCustomType = (typeCustom: bigint) => { + const extraData = { + exitedKeys: [{ moduleId: 1, nodeOpIds: [1], keysCounts: [2] }], + }; + const item = extraData.exitedKeys[0]; + const extraDataItems = []; + extraDataItems.push(encodeExtraDataItem(0, typeCustom, item.moduleId, item.nodeOpIds, item.keysCounts)); + return { + extraData, + extraDataItems, + wrongTypedIndex: 0, + typeCustom, + }; }; - }; - it("if type `0` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(0n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `0` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(0n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("if type `3` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(3n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `4` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(4n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "UnsupportedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("if type `1` was passed", async () => { - const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(1n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) - .to.be.revertedWithCustomError(oracle, "DeprecatedExtraDataType") - .withArgs(wrongTypedIndex, typeCustom); - }); + it("if type `1` was passed", async () => { + const { extraDataItems, wrongTypedIndex, typeCustom } = getExtraWithCustomType(1n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + await expect(oracle.connect(member1).submitReportExtraDataList(extraDataList)) + .to.be.revertedWithCustomError(oracle, "DeprecatedExtraDataType") + .withArgs(wrongTypedIndex, typeCustom); + }); - it("succeeds if `2` was passed", async () => { - const { extraDataItems } = getExtraWithCustomType(2n); - await consensus.advanceTimeToNextFrameStart(); - const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); - await oracle.connect(member1).submitReportData(reportFields, oracleVersion); - const tx = await oracle.connect(member1).submitReportExtraDataList(extraDataList); - await expect(tx).to.emit(oracle, "ExtraDataSubmitted").withArgs(reportFields.refSlot, anyValue, anyValue); - }); - }); + it("succeeds if `2` was passed", async () => { + const { extraDataItems } = getExtraWithCustomType(2n); + await consensus.advanceTimeToNextFrameStart(); + const { reportFields, extraDataList } = await submitReportHash({ extraData: extraDataItems }); + await oracle.connect(member1).submitReportData(reportFields, oracleVersion); + const tx = await oracle.connect(member1).submitReportExtraDataList(extraDataList); + await expect(tx).to.emit(oracle, "ExtraDataSubmitted").withArgs(reportFields.refSlot, anyValue, anyValue); + }); + }, + ); context("should check node operators processing limits with OracleReportSanityChecker", () => { it("by reverting TooManyNodeOpsPerExtraDataItem if there was too much node operators", async () => { diff --git a/test/0.8.9/oracle/accountingOracle.upgrade.test.ts b/test/0.8.9/oracle/accountingOracle.upgrade.test.ts index 2d0dd92ec2..85d67f7830 100644 --- a/test/0.8.9/oracle/accountingOracle.upgrade.test.ts +++ b/test/0.8.9/oracle/accountingOracle.upgrade.test.ts @@ -8,7 +8,7 @@ import { AccountingOracle__Harness } from "typechain-types"; import { deployAndConfigureAccountingOracle } from "test/deploy"; describe("AccountingOracle.sol:upgrade", () => { - context("finalizeUpgrade_v3", () => { + context("finalizeUpgrade_v5", () => { let admin: HardhatEthersSigner; let oracle: AccountingOracle__Harness; const NEW_CONSENSUS_VERSION = 42n; // Just a test value @@ -17,19 +17,19 @@ describe("AccountingOracle.sol:upgrade", () => { [admin] = await ethers.getSigners(); const deployed = await deployAndConfigureAccountingOracle(admin.address); oracle = deployed.oracle; - await oracle.setContractVersion(3); // Set initial contract version to 3 + await oracle.setContractVersion(4); // Set initial contract version to 4 }); - // TODO: test version increment because finalizeUpgrade_v4 should be called on a v2 contract + // TODO: test version increment because finalizeUpgrade_v5 should be called on a v4 contract it("successfully updates contract and consensus versions", async () => { // Get initial versions const initialContractVersion = await oracle.getContractVersion(); const initialConsensusVersion = await oracle.getConsensusVersion(); - await oracle.connect(admin).finalizeUpgrade_v4(NEW_CONSENSUS_VERSION); + await oracle.connect(admin).finalizeUpgrade_v5(NEW_CONSENSUS_VERSION); const newContractVersion = await oracle.getContractVersion(); - expect(newContractVersion).to.equal(4); + expect(newContractVersion).to.equal(5); expect(newContractVersion).to.not.equal(initialContractVersion); // Verify consensus version updated to the provided value diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts index 80dead68bb..778e9d6259 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.accessControl.test.ts @@ -4,11 +4,15 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, seedMockModuleSigningKeys } from "test/deploy"; import { Snapshot } from "test/suite"; const PUBKEYS = [ @@ -22,6 +26,14 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let originalState: string; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let initTx: ContractTransactionResponse; let oracleVersion: bigint; @@ -39,6 +51,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -58,10 +71,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -72,6 +91,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + mockModules = deployed.mockModules; initTx = await initVEBO({ admin: admin.address, oracle, consensus, resumeAfterDeploy: true }); @@ -83,15 +103,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; + await seedMockModuleSigningKeys(mockModules, exitRequests); reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, requestsCount: exitRequests.length, data: encodeExitRequestsDataList(exitRequests), }; @@ -118,14 +139,14 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, // consensusVersion: CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, @@ -136,6 +157,7 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { await consensus.connect(member1).submitReport(refSlot, reportHash, VEBO_CONSENSUS_VERSION); await consensus.connect(member3).submitReport(refSlot, reportHash, VEBO_CONSENSUS_VERSION); + await seedMockModuleSigningKeys(deployed.mockModules, exitRequests); await deploy(); }); @@ -164,7 +186,16 @@ describe("ValidatorsExitBusOracle.sol:accessControl", () => { }); it("should revert without admin address", async () => { await expect( - oracle.initialize(ZeroAddress, await consensus.getAddress(), VEBO_CONSENSUS_VERSION, 0, 600, 13000, 1, 48), + oracle.initialize( + ZeroAddress, + await consensus.getAddress(), + VEBO_CONSENSUS_VERSION, + 0, + 600, + 13_000n, // 13,000 ETH + 32n, // 32 ETH + 48, + ), ).to.be.revertedWithCustomError(oracle, "AdminCannotBeZero"); }); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts new file mode 100644 index 0000000000..1704cf8e98 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceCalculation.test.ts @@ -0,0 +1,325 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + OracleReportSanityChecker, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { de0x, numberToHex } from "lib"; + +import { DATA_FORMAT_LIST, DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO } from "test/deploy"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +// Module IDs used in tests +const LEGACY_MODULE_ID = 1; // Module with 0x01 withdrawal credentials (32 ETH) +const MAXEB_MODULE_ID_1 = 3; // Module with 0x02 withdrawal credentials (2048 ETH) +const MAXEB_MODULE_ID_2 = 5; // Another module with 0x02 withdrawal credentials +const MAXEB_MODULE_ID_3 = 7; // Another module with 0x02 withdrawal credentials + +// Balance constants from WithdrawalCredentials.sol +const LEGACY_MODULE_MAX_BALANCE_ETH = 32n; // 32 ETH +const MAXEB_MODULE_MAX_BALANCE_ETH = 2048n; // 2048 ETH + +describe("ValidatorsExitBusOracle.sol:balanceCalculation", () => { + let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; + let sanityChecker: OracleReportSanityChecker; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + keyIndex?: number; // Optional for format 2 + } + + const encodeExitRequestHexV1 = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestHexV2 = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + expect(pubkeyHex.length).to.equal(48 * 2); + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex || 0, 8) + + pubkeyHex + ); + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[], dataFormat: number) => { + const encoder = dataFormat === DATA_FORMAT_LIST ? encodeExitRequestHexV1 : encodeExitRequestHexV2; + return "0x" + requests.map(encoder).join(""); + }; + + before(async () => { + const signers = await ethers.getSigners(); + admin = signers[0]; + + const deployed = await deployVEBO(await admin.getAddress()); + oracle = deployed.oracle; + sanityChecker = deployed.oracleReportSanityChecker; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + }); + + describe("_calculateTotalExitBalanceEth", () => { + describe("Format 1 (DATA_FORMAT_LIST)", () => { + it("should calculate balance for single legacy validator (32 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for single MaxEB validator (2048 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for multiple legacy validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for multiple MaxEB validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: MAXEB_MODULE_ID_3, nodeOpId: 3, valIndex: 30, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for mixed module types", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, valPubkey: PUBKEYS[2] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 3, valIndex: 30, valPubkey: PUBKEYS[3] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + const expected = + LEGACY_MODULE_MAX_BALANCE_ETH * 2n + // 2 legacy validators + MAXEB_MODULE_MAX_BALANCE_ETH * 2n; // 2 MaxEB validators + expect(totalBalance).to.equal(expected); + }); + + it("should return zero for empty data", async () => { + const data = "0x"; + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(0n); + }); + }); + + describe("Format 2 (DATA_FORMAT_LIST_WITH_KEY_INDEX)", () => { + it("should calculate balance for single legacy validator (32 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 5, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for single MaxEB validator (2048 ETH)", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, keyIndex: 7, valPubkey: PUBKEYS[0] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should calculate balance for multiple legacy validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for multiple MaxEB validators", async () => { + const requests: ExitRequest[] = [ + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 1, valIndex: 10, keyIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[1] }, + { moduleId: MAXEB_MODULE_ID_3, nodeOpId: 3, valIndex: 30, keyIndex: 30, valPubkey: PUBKEYS[2] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH * 3n); + }); + + it("should calculate balance for mixed module types", async () => { + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 3, valPubkey: PUBKEYS[2] }, + { moduleId: MAXEB_MODULE_ID_2, nodeOpId: 3, valIndex: 30, keyIndex: 4, valPubkey: PUBKEYS[3] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const expected = + LEGACY_MODULE_MAX_BALANCE_ETH * 2n + // 2 legacy validators + MAXEB_MODULE_MAX_BALANCE_ETH * 2n; // 2 MaxEB validators + expect(totalBalance).to.equal(expected); + }); + + it("should return zero for empty data", async () => { + const data = "0x"; + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance).to.equal(0n); + }); + + it("should ignore keyIndex when calculating balance", async () => { + // Same module, different keyIndexes should result in same total balance + const requests1: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + ]; + const requests2: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 100, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 200, valPubkey: PUBKEYS[1] }, + ]; + + const data1 = encodeExitRequestsDataList(requests1, DATA_FORMAT_LIST_WITH_KEY_INDEX); + const data2 = encodeExitRequestsDataList(requests2, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + const totalBalance1 = await oracle.calculateTotalExitBalanceEth(data1, DATA_FORMAT_LIST_WITH_KEY_INDEX); + const totalBalance2 = await oracle.calculateTotalExitBalanceEth(data2, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + expect(totalBalance1).to.equal(totalBalance2); + expect(totalBalance1).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 2n); + }); + }); + + describe("Edge cases", () => { + it("should handle large number of validators", async () => { + const requests: ExitRequest[] = []; + for (let i = 0; i < 100; i++) { + requests.push({ + moduleId: LEGACY_MODULE_ID, + nodeOpId: 1, + valIndex: i, + valPubkey: PUBKEYS[i % PUBKEYS.length], + }); + } + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH * 100n); + }); + + it("should handle module with 0x02 withdrawal credentials (MaxEB)", async () => { + const { oracle: newOracle, stakingRouter: localRouter } = await deployVEBO(await admin.getAddress()); + + // Configure module 999 as MaxEB (0x02) + await localRouter.setStakingModuleWithdrawalCredentialsType(999, 0x02); + + const requests: ExitRequest[] = [{ moduleId: 999, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await newOracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + expect(totalBalance).to.equal(MAXEB_MODULE_MAX_BALANCE_ETH); + }); + + it("should handle module with 0x01 withdrawal credentials (Legacy)", async () => { + const { oracle: newOracle, stakingRouter: localRouter } = await deployVEBO(await admin.getAddress()); + + // Configure module 888 as Legacy (0x01) + await localRouter.setStakingModuleWithdrawalCredentialsType(888, 0x01); + + const requests: ExitRequest[] = [{ moduleId: 888, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + const totalBalance = await newOracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST); + expect(totalBalance).to.equal(LEGACY_MODULE_MAX_BALANCE_ETH); + }); + + it("uses MaxEB weights from sanity checker and applies governance updates", async () => { + await sanityChecker.grantRole(await sanityChecker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), admin.address); + await sanityChecker.setMaxEffectiveBalanceWeightWCType01(40n); + await sanityChecker.setMaxEffectiveBalanceWeightWCType02(4_096n); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: MAXEB_MODULE_ID_1, nodeOpId: 2, valIndex: 20, valPubkey: PUBKEYS[1] }, + ]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + expect(await oracle.MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01()).to.equal(40n); + expect(await oracle.MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02()).to.equal(4_096n); + expect(await oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST)).to.equal(4_136n); + }); + + it("reverts for unconfigured modules", async () => { + // Module 777 is not configured in the router + const requests: ExitRequest[] = [{ moduleId: 777, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }]; + const data = encodeExitRequestsDataList(requests, DATA_FORMAT_LIST); + + await expect(oracle.calculateTotalExitBalanceEth(data, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + stakingRouter, + "StakingModuleUnregistered", + ); + }); + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts new file mode 100644 index 0000000000..f771af0f76 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.balanceIntegration.test.ts @@ -0,0 +1,240 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + HashConsensus__Harness, + OracleReportSanityChecker, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; + +import { DATA_FORMAT_LIST, DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const PUBKEYS = [ + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + "0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd", + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", +]; + +// Constants from WithdrawalCredentials.sol +const LEGACY_MODULE_ID = 1; // Module with 0x01 withdrawal credentials (32 ETH) +const LEGACY_MODULE_MAX_BALANCE_ETH = 32n; // 32 ETH +const MAXEB_MODULE_MAX_BALANCE_ETH = 2048n; // 2048 ETH + +describe("ValidatorsExitBusOracle.sol:balanceIntegration", () => { + let consensus: HashConsensus__Harness; + let oracle: ValidatorsExitBus__Harness; + let oracleReportSanityChecker: OracleReportSanityChecker; + let admin: HardhatEthersSigner; + let member1: HardhatEthersSigner; + let member2: HardhatEthersSigner; + let member3: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; + + let oracleVersion: bigint; + + interface ExitRequest { + moduleId: number; + nodeOpId: number; + valIndex: number; + valPubkey: string; + keyIndex?: number; // Optional for format 2 + } + + const encodeExitRequestHexV1 = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + }; + + const encodeExitRequestHexV2 = ({ moduleId, nodeOpId, valIndex, keyIndex, valPubkey }: ExitRequest) => { + const pubkeyHex = de0x(valPubkey); + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex || 0, 8) + + pubkeyHex + ); + }; + + const encodeExitRequestsDataList = (requests: ExitRequest[], dataFormat: number) => { + const encoder = dataFormat === DATA_FORMAT_LIST ? encodeExitRequestHexV1 : encodeExitRequestHexV2; + return "0x" + requests.map(encoder).join(""); + }; + + const calcValidatorsExitBusReportDataHash = (items: { + consensusVersion: bigint; + refSlot: bigint; + requestsCount: number; + dataFormat: number; + data: string; + }) => { + const reportData = [items.consensusVersion, items.refSlot, items.requestsCount, items.dataFormat, items.data]; + const reportDataHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["(uint256,uint256,uint256,uint256,bytes)"], [reportData]), + ); + return reportDataHash; + }; + + const triggerConsensusOnHash = async (hash: string) => { + const { refSlot } = await consensus.getCurrentFrame(); + await consensus.connect(member1).submitReport(refSlot, hash, VEBO_CONSENSUS_VERSION); + await consensus.connect(member3).submitReport(refSlot, hash, VEBO_CONSENSUS_VERSION); + expect((await consensus.getConsensusState()).consensusReport).to.equal(hash); + }; + + const prepareReportAndSubmitHash = async ( + requests: ExitRequest[], + dataFormat: number = DATA_FORMAT_LIST, + options = { reportFields: {} }, + ) => { + const { refSlot } = await consensus.getCurrentFrame(); + const reportData = { + consensusVersion: VEBO_CONSENSUS_VERSION, + dataFormat, + refSlot, + requestsCount: requests.length, + data: encodeExitRequestsDataList(requests, dataFormat), + ...options.reportFields, + }; + + const reportHash = calcValidatorsExitBusReportDataHash(reportData); + + await triggerConsensusOnHash(reportHash); + + return { reportData, reportHash }; + }; + + before(async () => { + const signers = await ethers.getSigners(); + admin = signers[0]; + member1 = signers[1]; + member2 = signers[2]; + member3 = signers[3]; + + const deployed = await deployVEBO(await admin.getAddress()); + consensus = deployed.consensus; + oracle = deployed.oracle; + oracleReportSanityChecker = deployed.oracleReportSanityChecker; + mockModules = deployed.mockModules; + + // Configure signing keys for Format 2 testing (key verification) + // Set up keys for all combinations used in tests + for (let i = 0; i < PUBKEYS.length; i++) { + // Module 1 (legacy): keys for nodeOpId 1, keyIndex 1-5 + await mockModules.module1.setSigningKey(1, i + 1, PUBKEYS[i]); + + // Module 3 (MaxEB): keys for nodeOpId 1, keyIndex 10-14 + await mockModules.module3.setSigningKey(1, 10 + i, PUBKEYS[i]); + + // Module 3 (MaxEB): keys for nodeOpId 2, keyIndex 20 + // Multiple PUBKEYS can map to the same keyIndex for different tests + await mockModules.module3.setSigningKey(2, 20, PUBKEYS[0]); // Used in mixed validator test + + // Module 5 (MaxEB): keys for nodeOpId 2, keyIndex 20 + await mockModules.module5.setSigningKey(2, 20, PUBKEYS[1]); // Used in MaxEB validator test + } + + // Additional keys for "same balance" comparison test + await mockModules.module1.setSigningKey(1, 100, PUBKEYS[0]); // Format 1 vs Format 2 test + await mockModules.module3.setSigningKey(2, 200, PUBKEYS[1]); // Format 1 vs Format 2 test + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + }); + + await consensus.addMember(member1, 1); + await consensus.addMember(member2, 2); + await consensus.addMember(member3, 2); + + oracleVersion = await oracle.getContractVersion(); + }); + + describe("Balance calculation integration with sanity checker", () => { + let originalState: string; + + before(async () => { + // Grant the role to admin for setting limits + await oracleReportSanityChecker + .connect(admin) + .grantRole(await oracleReportSanityChecker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), admin.address); + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + it("should pass sanity check for legacy validators (Format 2)", async () => { + // Set limit to allow 10 legacy validators (320 ETH) + const limit = LEGACY_MODULE_MAX_BALANCE_ETH * 10n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + + it("should pass sanity check for MaxEB validators (Format 2)", async () => { + // Set limit to allow 2 MaxEB validators (4096 ETH) + const limit = MAXEB_MODULE_MAX_BALANCE_ETH * 2n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: 3, nodeOpId: 1, valIndex: 10, keyIndex: 10, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[1] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + + it("should pass sanity check for mixed validators (Format 2)", async () => { + // Set limit to allow: 5 legacy (160 ETH) + 1 MaxEB (2048 ETH) = 2208 ETH + const limit = LEGACY_MODULE_MAX_BALANCE_ETH * 5n + MAXEB_MODULE_MAX_BALANCE_ETH * 1n; + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(limit); + + const requests: ExitRequest[] = [ + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 10, keyIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 11, keyIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 12, keyIndex: 3, valPubkey: PUBKEYS[2] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 13, keyIndex: 4, valPubkey: PUBKEYS[3] }, + { moduleId: LEGACY_MODULE_ID, nodeOpId: 1, valIndex: 14, keyIndex: 5, valPubkey: PUBKEYS[4] }, + { moduleId: 3, nodeOpId: 2, valIndex: 20, keyIndex: 20, valPubkey: PUBKEYS[0] }, + ]; + + const { reportData } = await prepareReportAndSubmitHash(requests, DATA_FORMAT_LIST_WITH_KEY_INDEX); + + // Should not revert - balance is within limit + await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).not.to.be.reverted; + }); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts new file mode 100644 index 0000000000..fbefeca202 --- /dev/null +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.edgecases.test.ts @@ -0,0 +1,159 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + HashConsensus__Harness, + OracleReportSanityChecker__MockForExitBusWeights, + StakingModule__MockBadKeys, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; + +import { numberToHex } from "lib"; + +import { + DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + makeMockPubkey, + updateLidoLocatorImplementation, +} from "test/deploy"; + +const PUBKEY_AA = "0x" + "aa".repeat(48); +const PUBKEY_BB = "0x" + "bb".repeat(48); + +const encodeV1 = (moduleId: number, nodeOpId: number, valIndex: number, pubkey: string) => + ("0x" + + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + pubkey.slice(2)) as `0x${string}`; + +const encodeV2 = (moduleId: number, nodeOpId: number, valIndex: number, keyIndex: number, pubkey: string) => + ("0x" + + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkey.slice(2)) as `0x${string}`; + +describe("ValidatorsExitBusOracle.sol:edge coverage", () => { + let oracle: ValidatorsExitBus__Harness; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; + let consensus: HashConsensus__Harness; + let admin: HardhatEthersSigner; + let locatorAddr: string; + + beforeEach(async () => { + [admin] = await ethers.getSigners(); + const deployed = await deployVEBO(admin.address); + oracle = deployed.oracle as ValidatorsExitBus__Harness; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + consensus = deployed.consensus as HashConsensus__Harness; + locatorAddr = deployed.locatorAddr; + + await initVEBO({ + admin: admin.address, + oracle, + consensus, + resumeAfterDeploy: true, + lastProcessingRefSlot: 0, + }); + }); + + it("unpackExitRequest happy path + bounds", async () => { + const request = encodeV1(1, 2, 3, PUBKEY_AA); + + const [pubkey, nodeOpId, moduleId, valIndex] = await oracle.unpackExitRequest(request, DATA_FORMAT_LIST, 0); + expect(pubkey).to.equal(PUBKEY_AA); + expect(nodeOpId).to.equal(2n); + expect(moduleId).to.equal(1n); + expect(valIndex).to.equal(3n); + + await expect(oracle.unpackExitRequest(request, DATA_FORMAT_LIST, 1)).to.be.revertedWithCustomError( + oracle, + "ExitDataIndexOutOfRange", + ); + }); + + it("base _getTimestamp is reachable", async () => { + const ts = await oracle.callBaseTimestamp(); + expect(ts).to.be.greaterThan(0); + }); + + it("unsupported formats revert in decoder, dispatcher, and balance calc", async () => { + const request = encodeV1(1, 1, 1, PUBKEY_AA); + + await expect(oracle.callGetValidatorData(request, 3, 0)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + await expect(oracle.callProcessExitRequestsList(request, 3)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + await expect(oracle.calculateTotalExitBalanceEth(request, 3)).to.be.revertedWithCustomError( + oracle, + "UnsupportedRequestsDataFormat", + ); + }); + + it("processExitRequestsList supports format 2 and reverts on unsorted data", async () => { + const req1 = encodeV2(1, 1, 2, 0, makeMockPubkey(1, 0)); // valIndex 2 + const req2 = encodeV2(1, 1, 1, 1, makeMockPubkey(1, 1)); // valIndex 1 (unordered) + const data = (req1 + req2.slice(2)) as `0x${string}`; + + await expect( + oracle.callProcessExitRequestsList(data, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidRequestsDataSortOrder"); + }); + + it("calculateTotalExitBalanceEth reverts on unexpected WC type", async () => { + await stakingRouter.setStakingModuleWithdrawalCredentialsType(30, 0x03); // unsupported + const req = encodeV1(30, 1, 1, PUBKEY_AA); + + await expect(oracle.calculateTotalExitBalanceEth(req, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + oracle, + "UnexpectedWCType", + ); + }); + + it("InvalidMaxEBWeight when sanity checker returns zero", async () => { + const mockSanity = (await ethers.deployContract("OracleReportSanityChecker__MockForExitBusWeights", [ + 0n, + 32n, + ])) as OracleReportSanityChecker__MockForExitBusWeights; + + await updateLidoLocatorImplementation(locatorAddr, { + oracleReportSanityChecker: await mockSanity.getAddress(), + }); + + const req = encodeV1(1, 1, 1, PUBKEY_AA); + await expect(oracle.calculateTotalExitBalanceEth(req, DATA_FORMAT_LIST)).to.be.revertedWithCustomError( + oracle, + "InvalidMaxEBWeight", + ); + }); + + it("verifyKey detects invalid lengths and mismatched pubkeys", async () => { + const badModule = (await ethers.deployContract("StakingModule__MockBadKeys")) as StakingModule__MockBadKeys; + await stakingRouter.setStakingModuleWithdrawalCredentialsType(40, 0x01); + await stakingRouter.setStakingModuleAddress(40, await badModule.getAddress()); + + // invalid length (empty) + await badModule.setReturned("0x"); + const req = encodeV2(40, 1, 1, 0, PUBKEY_AA); + await expect( + oracle.callProcessExitRequestsList(req, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidRetrievedKeyLength"); + + // mismatched pubkey (returns PUBKEY_BB but request has PUBKEY_AA) + await badModule.setReturned(PUBKEY_BB); + await expect( + oracle.callProcessExitRequestsList(req, DATA_FORMAT_LIST_WITH_KEY_INDEX), + ).to.be.revertedWithCustomError(oracle, "InvalidPublicKey"); + }); +}); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts index 87dd6c83e7..779a90e149 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.finalize_v2.test.ts @@ -7,18 +7,27 @@ import { LidoLocator, ValidatorsExitBus__Harness } from "typechain-types"; import { EPOCHS_PER_FRAME, INITIAL_FAST_LANE_LENGTH_SLOTS, SLOTS_PER_EPOCH, VEBO_CONSENSUS_VERSION } from "lib"; -import { deployLidoLocator } from "test/deploy"; +import { deployLidoLocator, updateLidoLocatorImplementation } from "test/deploy"; import { Snapshot } from "test/suite"; -describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { +describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v3", () => { let originalState: string; let locator: LidoLocator; let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; + const NEW_CONSENSUS_VERSION = 42n; before(async () => { locator = await deployLidoLocator(); [admin] = await ethers.getSigners(); + + const stakingRouter = await ethers.deployContract("StakingRouter__MockForValidatorsExitBus"); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(1, 0x01); + + await updateLidoLocatorImplementation(await locator.getAddress(), { + stakingRouter: await stakingRouter.getAddress(), + }); + oracle = await ethers.deployContract("ValidatorsExitBus__Harness", [12n, 100n, await locator.getAddress()]); const consensus = await ethers.deployContract("HashConsensus__Harness", [ @@ -31,7 +40,16 @@ describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { await oracle.getAddress(), ]); - await oracle.initialize(admin, await consensus.getAddress(), VEBO_CONSENSUS_VERSION, 0, 10, 100, 1, 48); + await oracle.initialize( + admin, + await consensus.getAddress(), + VEBO_CONSENSUS_VERSION, + 0, + 10, + 100n, // 100 ETH + 32n, // 32 ETH + 48, + ); }); beforeEach(async () => (originalState = await Snapshot.take())); @@ -40,28 +58,42 @@ describe("ValidatorsExitBusOracle.sol:finalizeUpgrade_v2", () => { // contract version it("should revert if set wrong version", async () => { - await expect(oracle.finalizeUpgrade_v2(10, 100, 1, 48)).to.be.revertedWithCustomError( + await expect(oracle.finalizeUpgrade_v3(10, 100n, 32n, 48, NEW_CONSENSUS_VERSION)).to.be.revertedWithCustomError( oracle, "InvalidContractVersionIncrement", ); }); it("should successfully finalize upgrade", async () => { - await oracle.setContractVersion(1); - - await oracle.finalizeUpgrade_v2(15, 150, 1, 48); + // Simulate pre-upgrade state (contract at version 2) + await oracle.setContractVersion(2); + + // Set balance limits in ETH (not Gwei, not validator counts) + const maxExitBalanceEth = 150n; // 150 ETH + const balancePerFrameEth = 32n; // 32 ETH (1 legacy validator) + const maxValidatorsPerReport = 15; + const frameDuration = 48; + + await oracle.finalizeUpgrade_v3( + maxValidatorsPerReport, + maxExitBalanceEth, + balancePerFrameEth, + frameDuration, + NEW_CONSENSUS_VERSION, + ); - expect(await oracle.getContractVersion()).to.equal(2); + expect(await oracle.getContractVersion()).to.equal(3); + expect(await oracle.getConsensusVersion()).to.equal(NEW_CONSENSUS_VERSION); const exitRequestLimitData = await oracle.getExitRequestLimitFullInfo(); - expect(exitRequestLimitData.maxExitRequestsLimit).to.equal(150); - expect(exitRequestLimitData.exitsPerFrame).to.equal(1); - expect(exitRequestLimitData.frameDurationInSec).to.equal(48); + expect(exitRequestLimitData.maxExitBalanceEth).to.equal(maxExitBalanceEth); + expect(exitRequestLimitData.balancePerFrameEth).to.equal(balancePerFrameEth); + expect(exitRequestLimitData.frameDurationInSec).to.equal(frameDuration); - expect(await oracle.getMaxValidatorsPerReport()).to.equal(15); + expect(await oracle.getMaxValidatorsPerReport()).to.equal(maxValidatorsPerReport); - // should not allow to run finalizeUpgrade_v2 again - await expect(oracle.finalizeUpgrade_v2(10, 100, 1, 48)).to.be.revertedWithCustomError( + // should not allow finalizeUpgrade_v3 to run again + await expect(oracle.finalizeUpgrade_v3(10, 100, 1, 48, NEW_CONSENSUS_VERSION + 1n)).to.be.revertedWithCustomError( oracle, "InvalidContractVersionIncrement", ); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts index 830bc1692e..21d0d379e3 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.gas.test.ts @@ -4,16 +4,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + StakingRouter__MockForValidatorsExitBus, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; import { computeTimestampAtSlot, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, SECONDS_PER_FRAME, + seedMockModuleSigningKeys, SLOTS_PER_FRAME, } from "test/deploy"; import { Snapshot } from "test/suite"; @@ -29,7 +35,16 @@ const PUBKEYS = [ describe("ValidatorsExitBusOracle.sol:gas", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; + let stakingRouter: StakingRouter__MockForValidatorsExitBus; let admin: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -41,11 +56,13 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const NODE_OPS_PER_MODULE = 100; let nextValIndex = 1; + let nextKeyIndex = 1; interface ExitRequest { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -65,10 +82,16 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -92,8 +115,9 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const moduleId = Math.floor(i / requestsPerModule); const nodeOpId = Math.floor((i - moduleId * requestsPerModule) / requestsPerNodeOp); const valIndex = nextValIndex++; + const keyIndex = nextKeyIndex++; const valPubkey = PUBKEYS[valIndex % PUBKEYS.length]; - requests.push({ moduleId: moduleId + 1, nodeOpId, valIndex, valPubkey }); + requests.push({ moduleId: moduleId + 1, nodeOpId, valIndex, keyIndex, valPubkey }); } return { requests, requestsPerModule, requestsPerNodeOp }; @@ -108,6 +132,13 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + stakingRouter = deployed.stakingRouter as StakingRouter__MockForValidatorsExitBus; + mockModules = deployed.mockModules; + + // Use legacy withdrawal credentials (32 ETH per validator) for all modules exercised in this suite + for (let moduleId = 1; moduleId <= 5; moduleId++) { + await stakingRouter.setStakingModuleWithdrawalCredentialsType(moduleId, 0x01); + } await initVEBO({ admin: admin.address, @@ -133,7 +164,7 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { ); }); - for (const totalRequests of [10, 50, 100, 1000, 2000]) { + for (const totalRequests of [10, 50, 100, 600]) { context(`Total requests: ${totalRequests}`, () => { let exitRequests: { requests: ExitRequest[]; requestsPerModule: number; requestsPerNodeOp: number }; let reportFields: ReportFields; @@ -164,9 +195,10 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.requests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests.requests), }; + await seedMockModuleSigningKeys(mockModules, exitRequests.requests); reportHash = calcValidatorsExitBusReportDataHash(reportFields); @@ -225,7 +257,7 @@ describe("ValidatorsExitBusOracle.sol:gas", () => { const procState = await oracle.getProcessingState(); expect(procState.dataHash).to.equal(reportHash); expect(procState.dataSubmitted).to.equal(true); - expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); expect(procState.requestsCount).to.equal(exitRequests.requests.length); expect(procState.requestsSubmitted).to.equal(exitRequests.requests.length); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts index ebc282a4a2..0b4c4b38b6 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.happyPath.test.ts @@ -4,16 +4,21 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; import { computeTimestampAtSlot, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, deployVEBO, initVEBO, SECONDS_PER_FRAME, + seedMockModuleSigningKeys, SLOTS_PER_FRAME, } from "test/deploy"; @@ -29,6 +34,14 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; let exitRequests: ExitRequest[]; @@ -46,6 +59,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -65,10 +79,16 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -81,6 +101,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { const deployed = await deployVEBO(admin.address); oracle = deployed.oracle; consensus = deployed.consensus; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -132,18 +153,19 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { const { refSlot } = await consensus.getCurrentFrame(); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, ]; reportFields = { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; + await seedMockModuleSigningKeys(mockModules, exitRequests); reportHash = calcValidatorsExitBusReportDataHash(reportFields); @@ -229,7 +251,7 @@ describe("ValidatorsExitBusOracle.sol:happyPath", () => { expect(procState.dataHash).to.equal(reportHash); expect(procState.processingDeadlineTime).to.equal(computeTimestampAtSlot(frame.reportProcessingDeadlineSlot)); expect(procState.dataSubmitted).to.equal(true); - expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(procState.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); expect(procState.requestsCount).to.equal(exitRequests.length); expect(procState.requestsSubmitted).to.equal(exitRequests.length); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts index 7955a51c41..6504d2f600 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.submitExitRequestsData.test.ts @@ -52,9 +52,33 @@ const hashExitRequest = (request: { dataFormat: number; data: string }) => { ); }; +// Helper to extract timestamp from ValidatorExitRequest event +// More memory-efficient than keeping full receipt in scope +const getTimestampFromTx = async ( + tx: Awaited>, + oracleInterface: ValidatorsExitBus__Harness["interface"], +): Promise => { + const receipt = await tx.wait(); + if (!receipt) { + throw new Error("Transaction receipt is null"); + } + for (const log of receipt.logs) { + try { + const parsed = oracleInterface.parseLog({ topics: [...log.topics], data: log.data }); + if (parsed?.name === "ValidatorExitRequest") { + return parsed.args[4]; // Return timestamp immediately + } + } catch { + // Skip logs from other contracts + } + } + throw new Error("ValidatorExitRequest event not found"); +}; + describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { let consensus: HashConsensus__Harness; let oracle: ValidatorsExitBus__Harness; + let admin: HardhatEthersSigner; let exitRequests = [ @@ -172,7 +196,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { it("Should revert if wrong DATA_FORMAT", async () => { const exitRequestWrongDataFormat: ExitRequestData = { - dataFormat: 2, + dataFormat: 3, data: encodeExitRequestsDataList(exitRequests), }; const hash = hashExitRequest(exitRequestWrongDataFormat); @@ -182,7 +206,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await expect(oracle.submitExitRequestsData(exitRequestWrongDataFormat)) .to.be.revertedWithCustomError(oracle, "UnsupportedRequestsDataFormat") - .withArgs(2); + .withArgs(3); }); it("Should revert if contains duplicates", async () => { @@ -298,20 +322,21 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.grantRole(submitRole, authorizedEntity); }); - // ----------------------------------------------------------------------------- - // Shared test data - // ----------------------------------------------------------------------------- - const MAX_EXIT_REQUESTS_LIMIT = 5; - const EXITS_PER_FRAME = 1; + // Limit configuration (in ETH, as used by the contract) + // The limit allows up to 5 validators worth of balance: + // 2 legacy (64 ETH) + 3 MaxEB slots (6144 ETH) = 6208 ETH total + const MAX_EXIT_BALANCE_ETH = 6_208n; // Total balance of all 5 validators in ETH + const BALANCE_PER_FRAME_ETH = 2_048n; // 1 MaxEB validator per frame (2048 ETH) const FRAME_DURATION = 48; // Data for case when limit is not enough to process entire request + // Total: 2×32 ETH (module 1) + 2×2048 ETH (module 2) + 1×2048 ETH (module 3) = 6208 ETH const VALIDATORS: ExitRequest[] = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, - { moduleId: 3, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[4] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, // 32 ETH + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, // 32 ETH + { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, // 2048 ETH + { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, // 2048 ETH + { moduleId: 3, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[4] }, // 2048 ETH ]; const REQUEST = { @@ -325,28 +350,29 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { const reportLimitRole = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await expect( - oracle.connect(stranger).setExitRequestLimit(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION), + oracle.connect(stranger).setExitRequestLimit(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION), ).to.be.revertedWithOZAccessControlError(await stranger.getAddress(), reportLimitRole); }); it("Should not allow to set exits per frame bigger than max limit", async () => { await expect( - oracle.connect(authorizedEntity).setExitRequestLimit(10, 12, FRAME_DURATION), - ).to.be.revertedWithCustomError(oracle, "TooLargeExitsPerFrame"); + oracle.connect(authorizedEntity).setExitRequestLimit(10n * 2048n, 12n * 2048n, FRAME_DURATION), + ).to.be.revertedWithCustomError(oracle, "TooLargeItemsPerFrame"); }); it("Should deliver request as it is below limit", async () => { const exitLimitTx = await oracle .connect(authorizedEntity) - .setExitRequestLimit(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION); + .setExitRequestLimit(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION); await expect(exitLimitTx) - .to.emit(oracle, "ExitRequestsLimitSet") - .withArgs(MAX_EXIT_REQUESTS_LIMIT, EXITS_PER_FRAME, FRAME_DURATION); + .to.emit(oracle, "ExitBalanceLimitSet") + .withArgs(MAX_EXIT_BALANCE_ETH, BALANCE_PER_FRAME_ETH, FRAME_DURATION); exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, // 32 ETH + { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, // 32 ETH ]; + // Total: 64 ETH = 64,000,000,000 Gwei (well below limit) exitRequest = { dataFormat: DATA_FORMAT_LIST, @@ -369,36 +395,51 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("Should not allow to deliver if limit doesnt cover full request", async () => { + // Previous test consumed 64 ETH (2 legacy validators × 32 ETH) + // The limit starts at MAX_EXIT_BALANCE_ETH (6208 ETH), not BALANCE_PER_FRAME_ETH + // Remaining: 6208 - 64 = 6144 ETH + const consumedEth = 64n; // 2 legacy validators × 32 ETH + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + const requestTotalEth = 6208n; // 2×32 + 3×2048 ETH + await oracle.connect(authorizedEntity).submitExitRequestsHash(HASH_REQUEST); await expect(oracle.submitExitRequestsData(REQUEST)) .to.be.revertedWithCustomError(oracle, "ExitRequestsLimitExceeded") - .withArgs(5, 3); + .withArgs(requestTotalEth, remainingEth); }); - it("Current limit should be equal to 0", async () => { + it("Current limit should reflect consumed balance", async () => { const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); - expect(data.exitsPerFrame).to.equal(EXITS_PER_FRAME); + const consumedEth = 64n; // 64 ETH from previous test + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + + expect(data.maxExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); + expect(data.balancePerFrameEth).to.equal(BALANCE_PER_FRAME_ETH); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(3); - expect(data.currentExitRequestsLimit).to.equal(3); + expect(data.prevExitBalanceEth).to.equal(remainingEth); + expect(data.currentExitBalanceEth).to.equal(remainingEth); }); - it("Should current limit should be increased on 2 if 2*48 seconds passed", async () => { + it("Should current limit should be increased if 2*48 seconds passed", async () => { await consensus.advanceTimeBy(2 * 4 * 12); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); - expect(data.exitsPerFrame).to.equal(EXITS_PER_FRAME); + const consumedEth = 64n; // 64 ETH from first test + const remainingEth = MAX_EXIT_BALANCE_ETH - consumedEth; // 6144 ETH + + expect(data.maxExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); + expect(data.balancePerFrameEth).to.equal(BALANCE_PER_FRAME_ETH); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(3); - expect(data.currentExitRequestsLimit).to.equal(5); + expect(data.prevExitBalanceEth).to.equal(remainingEth); + // After 2 frames (2×48 seconds), we get 2 more frames worth of balance: 6144 + 4096 = 10240 ETH + // But capped at MAX_EXIT_BALANCE_ETH (6208 ETH) + expect(data.currentExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); }); it("Should process requests after 2 frames passes", async () => { const emitTx = await oracle.submitExitRequestsData(REQUEST); - const timestamp = await oracle.getTime(); + const timestamp = await getTimestampFromTx(emitTx, oracle.interface); for (let i = 0; i < 5; i++) { const request = VALIDATORS[i]; @@ -438,9 +479,10 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("Should not allow to process request larger than MAX_VALIDATORS_PER_REPORT", async () => { - await consensus.advanceTimeBy(MAX_EXIT_REQUESTS_LIMIT * 4 * 12); + // Advance time to ensure we have enough balance limit + await consensus.advanceTimeBy(MAX_EXIT_BALANCE_ETH * 4n * 12n); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.currentExitRequestsLimit).to.equal(MAX_EXIT_REQUESTS_LIMIT); + expect(data.currentExitBalanceEth).to.equal(MAX_EXIT_BALANCE_ETH); const maxRequestsPerReport = 4; @@ -448,6 +490,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await expect(tx).to.emit(oracle, "SetMaxValidatorsPerReport").withArgs(maxRequestsPerReport); expect(await oracle.connect(authorizedEntity).getMaxValidatorsPerReport()).to.equal(maxRequestsPerReport); + // Create a request with 5 validators (exceeds maxRequestsPerReport of 4) const exitRequestsRandom = [ { moduleId: 100, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, { moduleId: 101, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, @@ -465,26 +508,27 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.connect(authorizedEntity).submitExitRequestsHash(exitRequestHashRandom); + // Should fail because 5 validators > maxRequestsPerReport (4) await expect(oracle.submitExitRequestsData(exitRequestRandom)) .to.be.revertedWithCustomError(oracle, "TooManyExitRequestsInReport") .withArgs(5, 4); }); - it("Should set maxExitRequestsLimit equal to 0 and return as currentExitRequestsLimit type(uint256).max", async () => { - // can't set just maxExitRequestsLimit to 0, as it will be less than exitsPerFrame + it("Should set maxExitBalanceEth equal to 0 and return as currentExitBalanceEth type(uint256).max", async () => { + // can't set just maxExitBalanceEth to 0, as it will be less than balancePerFrameEth const exitLimitTx = await oracle.connect(authorizedEntity).setExitRequestLimit(0, 0, FRAME_DURATION); - await expect(exitLimitTx).to.emit(oracle, "ExitRequestsLimitSet").withArgs(0, 0, FRAME_DURATION); + await expect(exitLimitTx).to.emit(oracle, "ExitBalanceLimitSet").withArgs(0, 0, FRAME_DURATION); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(0); - expect(data.exitsPerFrame).to.equal(0); + expect(data.maxExitBalanceEth).to.equal(0); + expect(data.balancePerFrameEth).to.equal(0); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(0); - expect(data.currentExitRequestsLimit).to.equal(2n ** 256n - 1n); + expect(data.prevExitBalanceEth).to.equal(0); + expect(data.currentExitBalanceEth).to.equal(2n ** 256n - 1n); }); - it("Should not check limit, if maxLimitRequests equal to 0 (means limit was not set)", async () => { + it("Should not check limit, if maxExitBalanceEth equal to 0 (means limit was not set)", async () => { const exitRequestsRandom = [ { moduleId: 100, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, { moduleId: 101, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, @@ -500,25 +544,23 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { await oracle.connect(authorizedEntity).submitExitRequestsHash(exitRequestRandomHash); const emitTx = await oracle.submitExitRequestsData(exitRequestRandom); - const timestamp = await oracle.getTime(); + const timestamp = await getTimestampFromTx(emitTx, oracle.interface); - for (let i = 0; i < 2; i++) { - const request = exitRequestsRandom[i]; - await expect(emitTx) - .to.emit(oracle, "ValidatorExitRequest") - .withArgs(request.moduleId, request.nodeOpId, request.valIndex, request.valPubkey, timestamp); - } + // Check each event individually + await expect(emitTx).to.emit(oracle, "ValidatorExitRequest").withArgs(100, 0, 0, PUBKEYS[0], timestamp); + + await expect(emitTx).to.emit(oracle, "ValidatorExitRequest").withArgs(101, 0, 2, PUBKEYS[1], timestamp); await expect(emitTx).to.emit(oracle, "ExitDataProcessing").withArgs(exitRequestRandomHash); const data = await oracle.getExitRequestLimitFullInfo(); - expect(data.maxExitRequestsLimit).to.equal(0); - expect(data.exitsPerFrame).to.equal(0); + expect(data.maxExitBalanceEth).to.equal(0); + expect(data.balancePerFrameEth).to.equal(0); expect(data.frameDurationInSec).to.equal(FRAME_DURATION); - expect(data.prevExitRequestsLimit).to.equal(0); + expect(data.prevExitBalanceEth).to.equal(0); // as time is mocked and we didnt change it since last consume, currentExitRequestsLimit was not increased - expect(data.currentExitRequestsLimit).to.equal(2n ** 256n - 1n); + expect(data.currentExitBalanceEth).to.equal(2n ** 256n - 1n); }); }); @@ -546,7 +588,7 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { it("Check version", async () => { // set in initialize in deployVEBO - expect(await oracle.getContractVersion()).to.equal(2); + expect(await oracle.getContractVersion()).to.equal(3); }); it("Store exit hash", async () => { @@ -554,14 +596,14 @@ describe("ValidatorsExitBusOracle.sol:submitExitRequestsData", () => { }); it("set new version", async () => { - await oracle.setContractVersion(3); - expect(await oracle.getContractVersion()).to.equal(3); + await oracle.setContractVersion(4); + expect(await oracle.getContractVersion()).to.equal(4); }); it("Should revert if request has old contract version", async () => { await expect(oracle.submitExitRequestsData(REQUEST)) .to.be.revertedWithCustomError(oracle, "UnexpectedContractVersion") - .withArgs(3, 2); + .withArgs(4, 3); }); }); }); diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts index 016d36e19c..07d8cd7fdd 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.submitReportData.test.ts @@ -4,11 +4,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { HashConsensus__Harness, OracleReportSanityChecker, ValidatorsExitBus__Harness } from "typechain-types"; +import { + HashConsensus__Harness, + OracleReportSanityChecker, + StakingModule__MockForKeyVerification, + ValidatorsExitBus__Harness, +} from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { computeTimestampAtSlot, DATA_FORMAT_LIST, deployVEBO, initVEBO } from "test/deploy"; +import { + computeTimestampAtSlot, + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + seedMockModuleSigningKeys, +} from "test/deploy"; import { Snapshot } from "test/suite"; const PUBKEYS = [ @@ -25,6 +36,14 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let oracleReportSanityChecker: OracleReportSanityChecker; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -40,6 +59,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -59,10 +79,16 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { return reportDataHash; }; - const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { + const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -77,13 +103,15 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { }; const prepareReportAndSubmitHash = async ( - requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }], + requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }], options = { reportFields: {} }, ) => { + await seedMockModuleSigningKeys(mockModules, requests); + const { refSlot } = await consensus.getCurrentFrame(); const reportData = { consensusVersion: VEBO_CONSENSUS_VERSION, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, refSlot, requestsCount: requests.length, data: encodeExitRequestsDataList(requests), @@ -102,6 +130,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { oracle = deployed.oracle; consensus = deployed.consensus; oracleReportSanityChecker = deployed.oracleReportSanityChecker; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -175,7 +204,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("dataFormat = 0 reverts", async () => { const dataFormatUnsupported = 0; const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { dataFormat: dataFormatUnsupported } }, ); @@ -184,10 +213,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { .withArgs(dataFormatUnsupported); }); - it("dataFormat = 2 reverts", async () => { - const dataFormatUnsupported = 2; + it("dataFormat = 3 reverts", async () => { + const dataFormatUnsupported = 3; const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { dataFormat: dataFormatUnsupported } }, ); @@ -198,7 +227,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("dataFormat = 1 pass", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -207,7 +236,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("enforces data length", () => { it("reverts if there is more data than expected", async () => { const { refSlot } = await consensus.getCurrentFrame(); - const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }]; const { reportData } = await prepareReportAndSubmitHash(exitRequests, { reportFields: { refSlot, data: encodeExitRequestsDataList(exitRequests) + "aaaaaaaaaaaaaaaaaa" }, }); @@ -220,7 +249,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("reverts if there is less data than expected", async () => { const { refSlot } = await consensus.getCurrentFrame(); - const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }]; + const exitRequests = [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }]; const data = encodeExitRequestsDataList(exitRequests); const { reportData } = await prepareReportAndSubmitHash(exitRequests, { @@ -238,7 +267,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("pass if there is exact amount of data", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -247,27 +276,33 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("invokes sanity check", () => { before(async () => { await oracleReportSanityChecker.grantRole( - await oracleReportSanityChecker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), + await oracleReportSanityChecker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), admin.address, ); }); it("reverts if request limit is reached", async () => { - const exitRequestsLimit = 1; - await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + // Module 5 (not curated) = 2048 ETH per validator + // Set limit to 1 validator worth + const exitRequestsLimit = 2_048n; // 2048 ETH + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(exitRequestsLimit); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]); + // 2 validators = 4096 ETH (actual balance that exceeds the limit) + const actualBalance = 2_048n * 2n; await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)) - .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectNumberOfExitRequestsPerReport") - .withArgs(exitRequestsLimit); + .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectSumOfExitBalancePerReport") + .withArgs(actualBalance); }); it("pass if requests amount equals to limit", async () => { - const exitRequestsLimit = 1; - await oracleReportSanityChecker.connect(admin).setMaxExitRequestsPerOracleReport(exitRequestsLimit); + // Module 5 (not curated) = 2048 ETH per validator + // Set limit to exactly 1 validator worth + const exitRequestsLimit = 2_048n; // 2048 ETH + await oracleReportSanityChecker.connect(admin).setMaxBalanceExitRequestedPerReportInEth(exitRequestsLimit); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); }); @@ -276,7 +311,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { context("validates data.requestsCount field with given data", () => { it("reverts if requestsCount does not match with encoded data size", async () => { const { reportData } = await prepareReportAndSubmitHash( - [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }], + [{ moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }], { reportFields: { requestsCount: 2 } }, ); @@ -289,7 +324,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("reverts if moduleId equals zero", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 0, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 0, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await expect(oracle.connect(member1).submitReportData(reportData, oracleVersion)).to.be.revertedWithCustomError( @@ -300,8 +335,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("emits ValidatorExitRequest events", async () => { const requests = [ - { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); const tx = await oracle.connect(member1).submitReportData(reportData, oracleVersion); @@ -333,8 +368,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { const { refSlot } = await consensus.getCurrentFrame(); const requests = [ - { moduleId: 4, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); await oracle.connect(member1).submitReportData(reportData, oracleVersion); @@ -343,7 +378,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { expect(storageAfter.refSlot).to.equal(refSlot); expect(storageAfter.requestsCount).to.equal(requests.length); expect(storageAfter.requestsProcessed).to.equal(requests.length); - expect(storageAfter.dataFormat).to.equal(DATA_FORMAT_LIST); + expect(storageAfter.dataFormat).to.equal(DATA_FORMAT_LIST_WITH_KEY_INDEX); }); it("updates total requests processed count", async () => { @@ -352,7 +387,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { expect(countStep0).to.equal(currentCount); // Step 1 — process 1 item - const requestsStep1 = [{ moduleId: 3, nodeOpId: 1, valIndex: 2, valPubkey: PUBKEYS[1] }]; + const requestsStep1 = [{ moduleId: 3, nodeOpId: 1, valIndex: 2, valPubkey: PUBKEYS[1], keyIndex: 1 }]; const { reportData: reportStep1 } = await prepareReportAndSubmitHash(requestsStep1); await oracle.connect(member1).submitReportData(reportStep1, oracleVersion); const countStep1 = await oracle.getTotalRequestsProcessed(); @@ -362,8 +397,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { // Step 2 — process 2 items await consensus.advanceTimeToNextFrameStart(); const requestsStep2 = [ - { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3] }, + { moduleId: 4, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 2, valPubkey: PUBKEYS[3], keyIndex: 2 }, ]; const { reportData: reportStep2 } = await prepareReportAndSubmitHash(requestsStep2); await oracle.connect(member1).submitReportData(reportStep2, oracleVersion); @@ -446,9 +481,9 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { }); it("reverts on hash mismatch", async () => { - const requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }]; + const requests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }]; const { reportHash: actualReportHash } = await prepareReportAndSubmitHash(requests); - const newRequests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[1] }]; + const newRequests = [{ moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[1], keyIndex: 1 }]; const { refSlot } = await consensus.getCurrentFrame(); // change pubkey @@ -456,7 +491,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot, requestsCount: requests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(newRequests), }; @@ -497,7 +532,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("should increase after report", async () => { const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 3, valIndex: 0, valPubkey: PUBKEYS[0], keyIndex: 1 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion, { from: member1 }); requestCount += 1; @@ -507,8 +542,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("should double increase for two exits", async () => { await consensus.advanceTimeToNextFrameStart(); const { reportData } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0] }, - { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0] }, + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[0], keyIndex: 2 }, ]); await oracle.connect(member1).submitReportData(reportData, oracleVersion); requestCount += 2; @@ -549,8 +584,8 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("consensus report submitted", async () => { ({ reportData: report, reportHash: hash } = await prepareReportAndSubmitHash([ - { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2] }, - { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3] }, + { moduleId: 5, nodeOpId: 1, valIndex: 10, valPubkey: PUBKEYS[2], keyIndex: 1 }, + { moduleId: 5, nodeOpId: 3, valIndex: 1, valPubkey: PUBKEYS[3], keyIndex: 2 }, ])); const state = await oracle.getProcessingState(); @@ -573,7 +608,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { computeTimestampAtSlot((await consensus.getCurrentFrame()).reportProcessingDeadlineSlot), hash, true, - DATA_FORMAT_LIST, + DATA_FORMAT_LIST_WITH_KEY_INDEX, 2, 2, ]); @@ -609,20 +644,23 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("Set exit limit", async () => { const role = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await oracle.grantRole(role, admin); - const exitLimitTx = await oracle.connect(admin).setExitRequestLimit(7, 1, 48); - await expect(exitLimitTx).to.emit(oracle, "ExitRequestsLimitSet").withArgs(7, 1, 48); + // Set limit to allow 4160 ETH (2 legacy + 2 MaxEB validators) + // Max: 7000 ETH, Per frame: 5000 ETH (enough to cover 4160 ETH) + const exitLimitTx = await oracle.connect(admin).setExitRequestLimit(7_000n, 5_000n, 48); + await expect(exitLimitTx).to.emit(oracle, "ExitBalanceLimitSet").withArgs(7_000n, 5_000n, 48); }); it("deliver report by actor different from oracle", async () => { const requests = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; + await seedMockModuleSigningKeys(mockModules, requests); const data = await encodeExitRequestsDataList(requests); const exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST_WITH_KEY_INDEX]), ); const role = await oracle.SUBMIT_REPORT_HASH_ROLE(); @@ -632,7 +670,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { await expect(submitTx).to.emit(oracle, "RequestsHashSubmitted").withArgs(exitRequestHash); const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data, }; @@ -656,10 +694,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("oracle does not consume common veb limits", async () => { const requests = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 4, valPubkey: PUBKEYS[4] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 4, valPubkey: PUBKEYS[4], keyIndex: 4 }, ]; const { reportData } = await prepareReportAndSubmitHash(requests); @@ -693,10 +731,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { after(async () => await Snapshot.restore(originalState)); const validators = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; let exitRequestHash: string; @@ -704,7 +742,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("create hash", async () => { const data = await encodeExitRequestsDataList(validators); exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [data, DATA_FORMAT_LIST_WITH_KEY_INDEX]), ); }); @@ -784,24 +822,30 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { const role = await oracle.EXIT_REQUEST_LIMIT_MANAGER_ROLE(); await oracle.grantRole(role, admin); - await oracle.connect(admin).setExitRequestLimit(100, 1, 48); + // Set limit to allow 4160 ETH (2 legacy + 2 MaxEB validators) + // Max: 100000 ETH, Per frame: 5000 ETH + await oracle.connect(admin).setExitRequestLimit(100_000n, 5_000n, 48); }); after(async () => await Snapshot.restore(originalState)); const validators = [ - { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 2, valIndex: 2, valPubkey: PUBKEYS[0], keyIndex: 1 }, + { moduleId: 1, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[1], keyIndex: 2 }, + { moduleId: 2, nodeOpId: 2, valIndex: 3, valPubkey: PUBKEYS[2], keyIndex: 3 }, + { moduleId: 2, nodeOpId: 3, valIndex: 3, valPubkey: PUBKEYS[3], keyIndex: 4 }, ]; let exitRequestHash: string; let exitRequests: string; it("create hash", async () => { + await seedMockModuleSigningKeys(mockModules, validators); exitRequests = await encodeExitRequestsDataList(validators); exitRequestHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [exitRequests, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode( + ["bytes", "uint256"], + [exitRequests, DATA_FORMAT_LIST_WITH_KEY_INDEX], + ), ); }); @@ -818,7 +862,7 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("submit report by actor different from oracle", async () => { const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: exitRequests, }; @@ -928,7 +972,10 @@ describe("ValidatorsExitBusOracle.sol:submitReportData", () => { it("submit report pass", async () => { const encodedEmptyRequestList = encodeExitRequestsDataList([]); const exitHash = ethers.keccak256( - ethers.AbiCoder.defaultAbiCoder().encode(["bytes", "uint256"], [encodedEmptyRequestList, DATA_FORMAT_LIST]), + ethers.AbiCoder.defaultAbiCoder().encode( + ["bytes", "uint256"], + [encodedEmptyRequestList, DATA_FORMAT_LIST_WITH_KEY_INDEX], + ), ); await expect(oracle.connect(member1).getDeliveryTimestamp(exitHash)).to.be.revertedWithCustomError( diff --git a/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts b/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts index fef2fe9540..7256c960d0 100644 --- a/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts +++ b/test/0.8.9/oracle/validator-exit-bus-oracle.triggerExits.test.ts @@ -5,13 +5,20 @@ import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { HashConsensus__Harness, + StakingModule__MockForKeyVerification, TriggerableWithdrawalsGateway__MockForVEB, ValidatorsExitBus__Harness, } from "typechain-types"; import { de0x, numberToHex, VEBO_CONSENSUS_VERSION } from "lib"; -import { DATA_FORMAT_LIST, deployVEBO, initVEBO, SECONDS_PER_FRAME } from "test/deploy"; +import { + DATA_FORMAT_LIST_WITH_KEY_INDEX, + deployVEBO, + initVEBO, + SECONDS_PER_FRAME, + seedMockModuleSigningKeys, +} from "test/deploy"; // ----------------------------------------------------------------------------- // Constants & helpers @@ -37,6 +44,7 @@ interface ExitRequest { moduleId: number; nodeOpId: number; valIndex: number; + keyIndex: number; valPubkey: string; } @@ -56,10 +64,16 @@ const calcValidatorsExitBusReportDataHash = (items: ReportFields) => { return reportDataHash; }; -const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey }: ExitRequest) => { +const encodeExitRequestHex = ({ moduleId, nodeOpId, valIndex, valPubkey, keyIndex }: ExitRequest) => { const pubkeyHex = de0x(valPubkey); expect(pubkeyHex.length).to.equal(48 * 2); - return numberToHex(moduleId, 3) + numberToHex(nodeOpId, 5) + numberToHex(valIndex, 8) + pubkeyHex; + return ( + numberToHex(moduleId, 3) + + numberToHex(nodeOpId, 5) + + numberToHex(valIndex, 8) + + numberToHex(keyIndex, 8) + + pubkeyHex + ); }; const encodeExitRequestsDataList = (requests: ExitRequest[]) => { @@ -85,6 +99,14 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { let oracle: ValidatorsExitBus__Harness; let admin: HardhatEthersSigner; let triggerableWithdrawalsGateway: TriggerableWithdrawalsGateway__MockForVEB; + let mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }; let oracleVersion: bigint; @@ -99,6 +121,7 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { oracle = deployed.oracle; consensus = deployed.consensus; triggerableWithdrawalsGateway = deployed.triggerableWithdrawalsGateway; + mockModules = deployed.mockModules; await initVEBO({ admin: admin.address, @@ -124,10 +147,10 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { describe("Submit via oracle flow ", async () => { const exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 1, valPubkey: PUBKEYS[2] }, - { moduleId: 2, nodeOpId: 0, valIndex: 3, valPubkey: PUBKEYS[3] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 1, keyIndex: 2, valPubkey: PUBKEYS[2] }, + { moduleId: 2, nodeOpId: 0, valIndex: 3, keyIndex: 3, valPubkey: PUBKEYS[3] }, ]; let reportFields: ReportFields; @@ -135,8 +158,8 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { before(async () => { [admin, member1, member2, member3, authorizedEntity, stranger] = await ethers.getSigners(); - await deploy(); + await seedMockModuleSigningKeys(mockModules, exitRequests); }); it("some time passes", async () => { @@ -150,7 +173,7 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { consensusVersion: VEBO_CONSENSUS_VERSION, refSlot: refSlot, requestsCount: exitRequests.length, - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; @@ -286,13 +309,13 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { describe("Submit via trustfull method", () => { const exitRequests = [ - { moduleId: 1, nodeOpId: 0, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 1, nodeOpId: 0, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 2, keyIndex: 2, valPubkey: PUBKEYS[1] }, ]; const exitRequest = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(exitRequests), }; @@ -300,8 +323,8 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { before(async () => { [admin, member1, member2, member3, authorizedEntity] = await ethers.getSigners(); - await deploy(); + await seedMockModuleSigningKeys(mockModules, exitRequests); }); it("should revert if request was not submitted", async () => { @@ -380,13 +403,14 @@ describe("ValidatorsExitBusOracle.sol:triggerExits", () => { it("should revert with error if module id is equal to 0", async () => { const requests = [ - { moduleId: 0, nodeOpId: 1, valIndex: 0, valPubkey: PUBKEYS[0] }, - { moduleId: 1, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, - { moduleId: 2, nodeOpId: 0, valIndex: 2, valPubkey: PUBKEYS[1] }, + { moduleId: 0, nodeOpId: 1, valIndex: 0, keyIndex: 0, valPubkey: PUBKEYS[0] }, + { moduleId: 1, nodeOpId: 0, valIndex: 2, keyIndex: 1, valPubkey: PUBKEYS[1] }, + { moduleId: 2, nodeOpId: 0, valIndex: 2, keyIndex: 2, valPubkey: PUBKEYS[1] }, ]; + await seedMockModuleSigningKeys(mockModules, requests); const request = { - dataFormat: DATA_FORMAT_LIST, + dataFormat: DATA_FORMAT_LIST_WITH_KEY_INDEX, data: encodeExitRequestsDataList(requests), }; diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts new file mode 100644 index 0000000000..ae75a0268a --- /dev/null +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.checkModuleAndCLBalancesChangeRates.test.ts @@ -0,0 +1,870 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + Accounting__MockForSanityChecker, + AccountingOracle__MockForSanityChecker, + Burner__MockForSanityChecker, + LidoLocator__MockForSanityChecker, + OracleReportSanityCheckerWrapper, + StakingModule__MockForStakingRouter, + StakingRouter__Harness, + StakingRouter__MockForAccountingOracle, + WithdrawalQueue__MockForSanityChecker, +} from "typechain-types"; + +import { ether, impersonate, ONE_GWEI, randomWCType1, WithdrawalCredentialsType } from "lib"; + +import { deployStakingRouter } from "test/deploy"; +import { Snapshot } from "test/suite"; + +const ONE_DAY = 24n * 60n * 60n; + +describe("OracleReportSanityChecker.sol:checkModuleAndCLBalancesChangeRates", () => { + type ModuleBalance = { + id: bigint; + validatorsBalanceWei: bigint; + pendingWei?: bigint; + }; + + const limits = { + exitedEthAmountPerDayLimit: 100n, + appearedEthAmountPerDayLimit: 100n, + annualBalanceIncreaseBPLimit: 1_000n, + simulatedShareRateDeviationBPLimit: 250n, + maxBalanceExitRequestedPerReportInEth: 65_000n, + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, + maxItemsPerExtraDataTransaction: 15n, + maxNodeOperatorsPerExtraDataItem: 16n, + requestTimestampMargin: 128n, + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: 360n, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 10n, + exitedValidatorEthAmountLimit: 1n, + }; + + let checker: OracleReportSanityCheckerWrapper; + let locator: LidoLocator__MockForSanityChecker; + let burner: Burner__MockForSanityChecker; + let accounting: Accounting__MockForSanityChecker; + let withdrawalQueue: WithdrawalQueue__MockForSanityChecker; + let stakingRouter: StakingRouter__MockForAccountingOracle; + let accountingOracle: AccountingOracle__MockForSanityChecker; + + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let manager: HardhatEthersSigner; + let elRewardsVault: HardhatEthersSigner; + + let originalState: string; + + const toGwei = (weiAmount: bigint) => weiAmount / ONE_GWEI; + + const toModuleInput = (modules: ModuleBalance[]) => { + const ids = modules.map((m) => m.id); + const validatorBalancesGweiByStakingModule = modules.map((m) => toGwei(m.validatorsBalanceWei)); + + return { + ids, + validatorBalancesGweiByStakingModule, + }; + }; + + const seedPreviousBalances = async (modules: ModuleBalance[]) => { + const input = toModuleInput(modules); + for (const id of input.ids) { + await stakingRouter.mock__registerStakingModule(id); + } + // Router state seeds validators balance only; pending budget is passed to the checker explicitly. + await stakingRouter.reportValidatorBalancesByStakingModule(input.ids, input.validatorBalancesGweiByStakingModule); + }; + + const check = async ( + modules: ModuleBalance[], + { + preCLPendingBalanceWei = 0n, + // Module fixtures carry only post-report pending; router state itself no longer stores module pending. + postCLPendingBalanceWei = modules.reduce((sum, m) => sum + (m.pendingWei ?? 0n), 0n), + depositsWei = 0n, + timeElapsed = ONE_DAY, + }: { + preCLPendingBalanceWei?: bigint; + postCLPendingBalanceWei?: bigint; + depositsWei?: bigint; + timeElapsed?: bigint; + } = {}, + ) => { + const ids = modules.map((m) => m.id); + const validatorBalancesWeiByStakingModule = modules.map((m) => m.validatorsBalanceWei); + const postCLValidatorsBalanceWei = validatorBalancesWeiByStakingModule.reduce((sum, val) => sum + val, 0n); + const previousModuleStates = await Promise.all(ids.map((id) => stakingRouter.getStakingModuleStateAccounting(id))); + const preCLValidatorsBalanceWei = previousModuleStates.reduce( + (sum, [validatorsBalanceGwei]) => sum + validatorsBalanceGwei * ONE_GWEI, + 0n, + ); + return checker.checkModuleAndCLBalancesChangeRates( + ids, + validatorBalancesWeiByStakingModule, + preCLValidatorsBalanceWei, + preCLPendingBalanceWei, + postCLValidatorsBalanceWei, + postCLPendingBalanceWei, + depositsWei, + timeElapsed, + ); + }; + + const deployCheckerWithRouterModules = async (modulesCount = 1, postMigrationFirstReportDone = true) => { + const routerHarness = (await deployStakingRouter({ deployer, admin }, {})) as { + stakingRouter: StakingRouter__Harness; + }; + const moduleIds: bigint[] = []; + + await routerHarness.stakingRouter.connect(admin).initialize(admin.address, randomWCType1()); + await routerHarness.stakingRouter + .connect(admin) + .grantRole(await routerHarness.stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin.address); + await routerHarness.stakingRouter + .connect(admin) + .grantRole(await routerHarness.stakingRouter.REPORT_EXITED_VALIDATORS_ROLE(), admin.address); + for (let i = 0; i < modulesCount; i++) { + const module = (await ethers.deployContract( + "StakingModule__MockForStakingRouter", + deployer, + )) as StakingModule__MockForStakingRouter; + + await routerHarness.stakingRouter + .connect(admin) + .addStakingModule(`new module ${i + 1}`, await module.getAddress(), { + stakeShareLimit: 10_000n, + priorityExitShareThreshold: 10_000n, + stakingModuleFee: 500n, + treasuryFee: 500n, + maxDepositsPerBlock: 150n, + minDepositBlockDistance: 25n, + withdrawalCredentialsType: WithdrawalCredentialsType.WC0x01, + }); + + moduleIds.push(BigInt(i + 1)); + } + + const locatorWithRouter = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await routerHarness.stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: deployer.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + const checkerWithRouter = await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locatorWithRouter.getAddress(), + await accounting.getAddress(), + admin.address, + limits, + postMigrationFirstReportDone, + ]); + + return { + checkerWithRouter, + stakingRouterHarness: routerHarness.stakingRouter, + moduleIds, + }; + }; + + const checkGlobalReport = ( + sanityChecker: OracleReportSanityCheckerWrapper, + accountingSigner: HardhatEthersSigner, + { + timeElapsed = ONE_DAY, + preValidatorsWei = 0n, + prePendingWei = 0n, + postValidatorsWei = 0n, + postPendingWei = 0n, + withdrawalVaultBalanceWei = 0n, + elRewardsVaultBalanceWei = 0n, + sharesRequestedToBurn = 0n, + depositsWei = 0n, + withdrawalsVaultTransferWei = 0n, + }: { + timeElapsed?: bigint; + preValidatorsWei?: bigint; + prePendingWei?: bigint; + postValidatorsWei?: bigint; + postPendingWei?: bigint; + withdrawalVaultBalanceWei?: bigint; + elRewardsVaultBalanceWei?: bigint; + sharesRequestedToBurn?: bigint; + depositsWei?: bigint; + withdrawalsVaultTransferWei?: bigint; + }, + ) => + sanityChecker + .connect(accountingSigner) + .checkAccountingOracleReport( + timeElapsed, + preValidatorsWei, + prePendingWei, + postValidatorsWei, + postPendingWei, + withdrawalVaultBalanceWei, + elRewardsVaultBalanceWei, + sharesRequestedToBurn, + depositsWei, + withdrawalsVaultTransferWei, + ); + + before(async () => { + [deployer, admin, manager, elRewardsVault] = await ethers.getSigners(); + + withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForSanityChecker"); + burner = await ethers.deployContract("Burner__MockForSanityChecker"); + accounting = await ethers.deployContract("Accounting__MockForSanityChecker"); + stakingRouter = await ethers.deployContract("StakingRouter__MockForAccountingOracle"); + + accountingOracle = await ethers.deployContract("AccountingOracle__MockForSanityChecker", [ + deployer.address, + 12, + 1_606_824_023, + ]); + + locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: deployer.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + checker = await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + limits, + true, + ]); + }); + + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + it("passes for empty module arrays and zero totals", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([], [], 0n, 0n, 0n, 0n, 0n, ONE_DAY)).not.to.be.reverted; + }); + + it("skips module-specific checks for the first report of a newly added module", async () => { + const { checkerWithRouter, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const firstReportTotalBalanceWei = ether("120"); + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [firstReportTotalBalanceWei], + firstReportTotalBalanceWei, + 0n, + firstReportTotalBalanceWei, + 0n, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("skips the module validators balance increase check on the first post-migration report and applies it on the second", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(1, false); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const previousValidatorsBalanceWei = ether("40150"); + const prePendingBalanceWei = ether("120"); + const excessiveValidatorsGrowthWei = ether("112"); + const postValidatorsBalanceWei = previousValidatorsBalanceWei + excessiveValidatorsGrowthWei; + const postPendingBalanceWei = ether("20"); + const activatedBalanceWei = prePendingBalanceWei - postPendingBalanceWei; + const expectedValidatorsGrowthLimitWei = + activatedBalanceWei + (previousValidatorsBalanceWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + + const problematicModuleReport = () => + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [postValidatorsBalanceWei], + previousValidatorsBalanceWei, + prePendingBalanceWei, + postValidatorsBalanceWei, + postPendingBalanceWei, + 0n, + ONE_DAY, + ); + + await stakingRouterHarness + .connect(admin) + .reportValidatorBalancesByStakingModule([moduleId], [previousValidatorsBalanceWei / ONE_GWEI]); + + await expect(problematicModuleReport()).not.to.be.reverted; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + preValidatorsWei: previousValidatorsBalanceWei, + prePendingWei: prePendingBalanceWei, + postValidatorsWei: previousValidatorsBalanceWei, + postPendingWei: prePendingBalanceWei, + }), + ).not.to.be.reverted; + + await expect(problematicModuleReport()) + .to.be.revertedWithCustomError(checkerWithRouter, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, excessiveValidatorsGrowthWei); + }); + + it("supports cold-start onboarding across the global path and module bootstrap flow", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const depositedWei = ether("200"); + const activatedValidatorsWei = ether("100"); + const remainingPendingWei = depositedWei - activatedValidatorsWei; + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + postPendingWei: depositedWei, + depositsWei: depositedWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [0n], + 0n, + 0n, + 0n, + depositedWei, + depositedWei, + ONE_DAY, + ), + ).not.to.be.reverted; + + await stakingRouterHarness.connect(admin).reportValidatorBalancesByStakingModule([moduleId], [0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + prePendingWei: depositedWei, + postValidatorsWei: activatedValidatorsWei, + postPendingWei: remainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [activatedValidatorsWei], + 0n, + depositedWei, + activatedValidatorsWei, + remainingPendingWei, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("supports cold-start onboarding across multiple new modules", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(2); + const [moduleOneId, moduleTwoId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const moduleOneInitialPendingWei = ether("120"); + const moduleTwoInitialPendingWei = ether("80"); + const totalInitialPendingWei = moduleOneInitialPendingWei + moduleTwoInitialPendingWei; + const moduleOneActivatedValidatorsWei = ether("60"); + const moduleTwoActivatedValidatorsWei = ether("40"); + const moduleOneRemainingPendingWei = moduleOneInitialPendingWei - moduleOneActivatedValidatorsWei; + const moduleTwoRemainingPendingWei = moduleTwoInitialPendingWei - moduleTwoActivatedValidatorsWei; + const totalActivatedValidatorsWei = moduleOneActivatedValidatorsWei + moduleTwoActivatedValidatorsWei; + const totalRemainingPendingWei = moduleOneRemainingPendingWei + moduleTwoRemainingPendingWei; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + postPendingWei: totalInitialPendingWei, + depositsWei: totalInitialPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleOneId, moduleTwoId], + [0n, 0n], + 0n, + 0n, + 0n, + totalInitialPendingWei, + totalInitialPendingWei, + ONE_DAY, + ), + ).not.to.be.reverted; + + await stakingRouterHarness + .connect(admin) + .reportValidatorBalancesByStakingModule([moduleOneId, moduleTwoId], [0n, 0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + prePendingWei: totalInitialPendingWei, + postValidatorsWei: totalActivatedValidatorsWei, + postPendingWei: totalRemainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleOneId, moduleTwoId], + [moduleOneActivatedValidatorsWei, moduleTwoActivatedValidatorsWei], + 0n, + totalInitialPendingWei, + totalActivatedValidatorsWei, + totalRemainingPendingWei, + 0n, + ONE_DAY, + ), + ).not.to.be.reverted; + }); + + it("supports cold-start onboarding with timeElapsed = 0 under allowance and rate-normalization fallbacks", async () => { + const { checkerWithRouter, stakingRouterHarness, moduleIds } = await deployCheckerWithRouterModules(); + const [moduleId] = moduleIds; + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const zeroTimeElapsed = 0n; + const initialPendingWei = ether("10"); + const expectedModulePerDayLimitWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + const maxModuleActivationGwei = expectedModulePerDayLimitWei / ONE_DAY / ONE_GWEI; + const maxModuleActivationWei = maxModuleActivationGwei * ONE_GWEI; + const remainingPendingWei = initialPendingWei - maxModuleActivationWei; + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + timeElapsed: zeroTimeElapsed, + postPendingWei: initialPendingWei, + depositsWei: initialPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [0n], + 0n, + 0n, + 0n, + initialPendingWei, + initialPendingWei, + zeroTimeElapsed, + ), + ).not.to.be.reverted; + + await stakingRouterHarness.connect(admin).reportValidatorBalancesByStakingModule([moduleId], [0n]); + + await expect( + checkGlobalReport(checkerWithRouter, accountingSigner, { + timeElapsed: zeroTimeElapsed, + prePendingWei: initialPendingWei, + postValidatorsWei: maxModuleActivationWei, + postPendingWei: remainingPendingWei, + }), + ).not.to.be.reverted; + + await expect( + checkerWithRouter.checkModuleAndCLBalancesChangeRates( + [moduleId], + [maxModuleActivationWei], + 0n, + initialPendingWei, + maxModuleActivationWei, + remainingPendingWei, + 0n, + zeroTimeElapsed, + ), + ).not.to.be.reverted; + }); + + it("reverts with InvalidClBalancesData on array length mismatch", async () => { + await expect( + checker.checkModuleAndCLBalancesChangeRates([1n], [], 0n, 0n, 1n, 0n, 0n, ONE_DAY), + ).to.be.revertedWithCustomError(checker, "InvalidClBalancesData"); + }); + + it("reverts with InconsistentValidatorsBalanceByModule when validators balance sum mismatches", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([1n, 2n], [10n, 20n], 0n, 0n, 40n, 3n, 0n, ONE_DAY)) + .to.be.revertedWithCustomError(checker, "InconsistentValidatorsBalanceByModule") + .withArgs(40n, 30n); + }); + + it("reverts with IncorrectTotalPendingBalance when reported pending exceeds funded protocol pending", async () => { + await expect(checker.checkModuleAndCLBalancesChangeRates([1n, 2n], [10n, 20n], 0n, 0n, 30n, 4n, 0n, ONE_DAY)) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, 4n); + }); + + it("allows redistribution between modules when total CL balance is unchanged", async () => { + const redistributionWei = limits.consolidationEthAmountPerDayLimit * ether("1"); + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: redistributionWei }, + { id: 2n, validatorsBalanceWei: redistributionWei }, + ]); + + await expect( + check([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: redistributionWei * 2n }, + ]), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalPendingBalance when a module reports more pending than the protocol funded", async () => { + const previousPendingWei = ether("10"); + const reportedPendingWei = previousPendingWei + ether("1"); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: 0n, pendingWei: reportedPendingWei }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(previousPendingWei, reportedPendingWei); + }); + + it("allows pending-to-validators activation within a module when module total is unchanged", async () => { + const previousPendingWei = ether("100"); + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: ether("100"), pendingWei: 0n }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when module increase exceeds the global activation budget", async () => { + const previousValidatorsWei = ether("219000"); + const currentIncreasePerDay = ether("121"); + const previousPendingWei = ether("60"); + const expectedValidatorsGrowthLimitWei = + previousPendingWei + (previousValidatorsWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: previousValidatorsWei + currentIncreasePerDay, pendingWei: 0n }], { + preCLPendingBalanceWei: previousPendingWei, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, currentIncreasePerDay); + }); + + it("sums module increases across modules before checking appeared limit", async () => { + const previousModuleValidatorsWei = ether("109500"); + const previousPendingWei = ether("60"); + const totalPreviousValidatorsWei = previousModuleValidatorsWei * 2n; + const totalPositiveModuleIncreaseWei = ether("131"); + const expectedModuleIncreaseLimitWei = + previousPendingWei + + (totalPreviousValidatorsWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n) + + limits.consolidationEthAmountPerDayLimit * ether("1"); + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: previousModuleValidatorsWei }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei }, + ]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousModuleValidatorsWei + totalPositiveModuleIncreaseWei, + pendingWei: 0n, + }, + { id: 2n, validatorsBalanceWei: previousModuleValidatorsWei - ether("71"), pendingWei: 0n }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalModuleValidatorsBalanceIncrease") + .withArgs(expectedModuleIncreaseLimitWei, totalPositiveModuleIncreaseWei); + }); + + it("reverts with IncorrectTotalActivatedBalance when consumed pending exceeds the global appeared limit", async () => { + const appearedLimitPerPeriodWei = limits.appearedEthAmountPerDayLimit * ether("1"); + const totalConsumedPendingWei = ether("120"); + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n }, + ]); + + await expect( + check( + [ + { id: 1n, validatorsBalanceWei: 0n, pendingWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n, pendingWei: 0n }, + ], + { + preCLPendingBalanceWei: totalConsumedPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(appearedLimitPerPeriodWei, totalConsumedPendingWei); + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when reported validators balance growth exceeds consumed pending", async () => { + const consumedPendingWei = ether("20"); + const reportedValidatorsGrowthWei = ether("60"); + const expectedValidatorsGrowthLimitWei = consumedPendingWei; + + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: 0n }, + ]); + + await expect( + check( + [ + { id: 1n, validatorsBalanceWei: ether("30"), pendingWei: ether("20") }, + { id: 2n, validatorsBalanceWei: ether("30"), pendingWei: ether("20") }, + ], + { + preCLPendingBalanceWei: ether("60"), + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(expectedValidatorsGrowthLimitWei, reportedValidatorsGrowthWei); + }); + + it("allows reported validators balance growth above consumed pending within safetyCap", async () => { + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("10"); + const consumedPendingWei = ether("9"); + const safetyCapWei = (previousValidatorsWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + const maxAllowedValidatorsGrowthWei = consumedPendingWei + safetyCapWei; + const currentPendingWei = previousPendingWei - consumedPendingWei; + const requiredValidatorsIncreaseWei = maxAllowedValidatorsGrowthWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + requiredValidatorsIncreaseWei, + pendingWei: currentPendingWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ).not.to.be.reverted; + }); + + it("reverts when reported validators balance growth exceeds consumed pending plus safetyCap by an explicit overflow", async () => { + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("10"); + const consumedPendingWei = ether("9"); + const safetyCapWei = (previousValidatorsWei * limits.annualBalanceIncreaseBPLimit) / (365n * 10_000n); + const safetyCapOverflowWei = ether("1"); + const maxAllowedValidatorsGrowthWei = consumedPendingWei + safetyCapWei; + const reportedValidatorsGrowthWei = maxAllowedValidatorsGrowthWei + safetyCapOverflowWei; + const currentPendingWei = previousPendingWei - consumedPendingWei; + const requiredValidatorsIncreaseWei = reportedValidatorsGrowthWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + requiredValidatorsIncreaseWei, + pendingWei: currentPendingWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(maxAllowedValidatorsGrowthWei, reportedValidatorsGrowthWei); + }); + + it("allows an exact module increase at the appeared+consolidation limit", async () => { + const previousValidatorsWei = ether("36500"); + const previousPendingWei = ether("36500"); + const activatedWei = ether("100"); + const exactIncrease = (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + exactIncrease, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + }, + ), + ).not.to.be.reverted; + }); + + it("allows validator growth funded by existing pending when total CL is unchanged", async () => { + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: ether("5") }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: ether("105"), pendingWei: 0n }], { + preCLPendingBalanceWei: ether("100"), + }), + ).not.to.be.reverted; + }); + + it("uses timeElapsed in per-day normalization (timeElapsed = 0 path)", async () => { + const activatedWei = ether("5"); + const appearedLimitForZeroElapsedWei = (limits.appearedEthAmountPerDayLimit * ether("1")) / 24n; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: 0n }]); + + await expect( + check([{ id: 1n, validatorsBalanceWei: activatedWei, pendingWei: 0n }], { + preCLPendingBalanceWei: activatedWei, + timeElapsed: 0n, + }), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(appearedLimitForZeroElapsedWei, activatedWei); + }); + + it("normalizes module increases by a non-zero elapsed time", async () => { + const previousValidatorsWei = ether("43800"); + const previousPendingWei = ether("36500"); + const halfDay = ONE_DAY / 2n; + const activatedWei = ether("50"); + const safetyCapWei = + (previousValidatorsWei * limits.annualBalanceIncreaseBPLimit * halfDay) / (365n * ONE_DAY * 10_000n); + const allowedValidatorsGrowthWei = activatedWei + safetyCapWei; + + await seedPreviousBalances([{ id: 1n, validatorsBalanceWei: previousValidatorsWei }]); + + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + allowedValidatorsGrowthWei, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + timeElapsed: halfDay, + }, + ), + ).not.to.be.reverted; + + const exceededValidatorsGrowthWei = allowedValidatorsGrowthWei + ether("1"); + await expect( + check( + [ + { + id: 1n, + validatorsBalanceWei: previousValidatorsWei + exceededValidatorsGrowthWei, + pendingWei: previousPendingWei - activatedWei, + }, + ], + { + preCLPendingBalanceWei: previousPendingWei, + timeElapsed: halfDay, + }, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedValidatorsGrowthWei, exceededValidatorsGrowthWei); + }); + + it("allows redistribution between modules even when maxCLBalanceDecreaseBP is zero", async () => { + const redistributionWei = limits.consolidationEthAmountPerDayLimit * ether("1"); + await seedPreviousBalances([ + { id: 1n, validatorsBalanceWei: redistributionWei }, + { id: 2n, validatorsBalanceWei: redistributionWei }, + ]); + + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setMaxCLBalanceDecreaseBP(0n); + + await expect( + check([ + { id: 1n, validatorsBalanceWei: 0n }, + { id: 2n, validatorsBalanceWei: redistributionWei * 2n }, + ]), + ).not.to.be.reverted; + }); +}); diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts index 2939c92c08..963689e1aa 100644 --- a/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.negative-rebase.test.ts @@ -4,20 +4,25 @@ import { artifacts, ethers } from "hardhat"; import { anyValue } from "@nomicfoundation/hardhat-chai-matchers/withArgs"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; +import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { Accounting__MockForSanityChecker, AccountingOracle__MockForSanityChecker, + Lido__MockForSanityChecker, LidoLocator__MockForSanityChecker, OracleReportSanityChecker, StakingRouter__MockForSanityChecker, } from "typechain-types"; -import { ether, getCurrentBlockTimestamp, impersonate } from "lib"; +import { ether, impersonate } from "lib"; import { Snapshot } from "test/suite"; const SLOTS_PER_DAY = 7200n; +const REPORTS_WINDOW = 36; +const MAX_BASIS_POINTS = 10_000n; +const MAX_CL_BALANCE_DECREASE_BP = 360n; // 3.6% describe("OracleReportSanityChecker.sol:negative-rebase", () => { let locator: LidoLocator__MockForSanityChecker; @@ -25,40 +30,97 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { let accountingOracle: AccountingOracle__MockForSanityChecker; let accounting: Accounting__MockForSanityChecker; let stakingRouter: StakingRouter__MockForSanityChecker; + let lido: Lido__MockForSanityChecker; let deployer: HardhatEthersSigner; + let withdrawalVault: HardhatEthersSigner; let accountingSigner: HardhatEthersSigner; const defaultLimitsList = { - exitedValidatorsPerDayLimit: 50n, - appearedValidatorsPerDayLimit: 75n, + exitedEthAmountPerDayLimit: 50n, + appearedEthAmountPerDayLimit: 75n, annualBalanceIncreaseBPLimit: 10_00n, // 10% simulatedShareRateDeviationBPLimit: 2_00n, // 2% - maxValidatorExitRequestsPerReport: 2000n, + maxBalanceExitRequestedPerReportInEth: 64_000n, // Max ~65K ETH (close to uint16 max) + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 128n, - maxPositiveTokenRebase: 5_000_000n, // 0.05% - initialSlashingAmountPWei: 1000n, // 1 ETH = 1000 PWei - inactivityPenaltiesAmountPWei: 101n, // 0.101 ETH = 101 PWei - clBalanceOraclesErrorUpperBPLimit: 50n, // 0.5% + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: MAX_CL_BALANCE_DECREASE_BP, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, }; let originalState: string; + const callCheck = ( + preCLBalance: bigint, + postCLBalance: bigint, + withdrawalVaultBalance = 0n, + deposits = 0n, + withdrawalsVaultTransfer = 0n, + timeElapsed = 24n * 60n * 60n, + preCLPendingBalance = 0n, + postCLPendingBalance = 0n, + ) => + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + timeElapsed, + preCLBalance - deposits - preCLPendingBalance, + preCLPendingBalance, + postCLBalance - postCLPendingBalance, + postCLPendingBalance, + withdrawalVaultBalance, + 0n, + 0n, + deposits, + withdrawalsVaultTransfer, + ); + + // Deposits remain in pending until they are activated on the validators side. + const callCheckWithPendingDeposits = ( + preCLBalance: bigint, + postCLBalance: bigint, + deposits: bigint, + { + withdrawalVaultBalance = 0n, + withdrawalsVaultTransfer = 0n, + timeElapsed = 24n * 60n * 60n, + }: { + withdrawalVaultBalance?: bigint; + withdrawalsVaultTransfer?: bigint; + timeElapsed?: bigint; + } = {}, + ) => + callCheck( + preCLBalance, + postCLBalance, + withdrawalVaultBalance, + deposits, + withdrawalsVaultTransfer, + timeElapsed, + 0n, + deposits, + ); + + const maxDiffFor = (adjusted: bigint) => (adjusted * MAX_CL_BALANCE_DECREASE_BP) / MAX_BASIS_POINTS; + const deploySecondOpinionOracle = async () => { const secondOpinionOracle = await ethers.deployContract("SecondOpinionOracle__Mock"); const clOraclesRole = await checker.SECOND_OPINION_MANAGER_ROLE(); await checker.grantRole(clOraclesRole, deployer.address); - // 10000 BP - 100% - // 74 BP - 0.74% await checker.setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinionOracle.getAddress(), 74n); return secondOpinionOracle; }; before(async () => { - [deployer] = await ethers.getSigners(); + [deployer, withdrawalVault] = await ethers.getSigners(); + await setBalance(withdrawalVault.address, ether("10000")); const sanityCheckerAddress = deployer.address; @@ -71,10 +133,11 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { 1606824023, ]); stakingRouter = await ethers.deployContract("StakingRouter__MockForSanityChecker"); + lido = await ethers.deployContract("Lido__MockForSanityChecker"); locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ { - lido: deployer.address, + lido: await lido.getAddress(), depositSecurityModule: deployer.address, elRewardsVault: deployer.address, accountingOracle: await accountingOracle.getAddress(), @@ -84,11 +147,12 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { stakingRouter: await stakingRouter.getAddress(), treasury: deployer.address, withdrawalQueue: deployer.address, - withdrawalVault: deployer.address, + withdrawalVault: withdrawalVault.address, postTokenRebaseReceiver: deployer.address, oracleDaemonConfig: deployer.address, validatorExitDelayVerifier: deployer.address, triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, accounting: await accounting.getAddress(), wstETH: deployer.address, vaultHub: deployer.address, @@ -96,18 +160,17 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { lazyOracle: deployer.address, predepositGuarantee: deployer.address, operatorGrid: deployer.address, + topUpGateway: deployer.address, }, ]); - const locatorAddress = await locator.getAddress(); - const accountingOracleAddress = await accountingOracle.getAddress(); - const accountingAddress = await accounting.getAddress(); - - checker = await ethers - .getContractFactory("OracleReportSanityChecker") - .then((f) => - f.deploy(locatorAddress, accountingOracleAddress, accountingAddress, deployer.address, defaultLimitsList), - ); + const factory = await ethers.getContractFactory("OracleReportSanityChecker"); + checker = await factory.deploy( + await locator.getAddress(), + await accounting.getAddress(), + deployer.address, + defaultLimitsList, + ); accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); }); @@ -118,13 +181,13 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { context("OracleReportSanityChecker checkAccountingOracleReport authorization", () => { it("should allow calling from Accounting address", async () => { - await checker.connect(accountingSigner).checkAccountingOracleReport(0, 110 * 1e9, 109.99 * 1e9, 0, 0, 0, 10, 10); + await callCheck(ether("100"), ether("100")); }); it("should not allow calling from non-Accounting address", async () => { const [, otherClient] = await ethers.getSigners(); await expect( - checker.connect(otherClient).checkAccountingOracleReport(0, 110 * 1e9, 110.01 * 1e9, 0, 0, 0, 10, 10), + checker.connect(otherClient).checkAccountingOracleReport(0, ether("100"), 0, ether("100"), 0, 0, 0, 0, 0, 0), ).to.be.revertedWithCustomError(checker, "CalledNotFromAccounting"); }); }); @@ -144,29 +207,41 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { it("has compact packed limits representation", async () => { const artifact = await artifacts.readArtifact("OracleReportSanityCheckerWrapper"); - const functionABI = artifact.abi.find( - (entry) => entry.type === "function" && entry.name === "exposePackedLimits", + const accountingCoreABI = artifact.abi.find( + (entry) => entry.type === "function" && entry.name === "exposeAccountingCorePackedLimits", + ); + const operationalABI = artifact.abi.find( + (entry) => entry.type === "function" && entry.name === "exposeOperationalPackedLimits", ); const sizeOfCalc = (x: string) => { switch (x) { case "uint256": return 256; + case "uint128": + return 128; case "uint64": return 64; case "uint32": return 32; case "uint16": return 16; + case "uint8": + return 8; default: expect.fail(`Unknown type ${x}`); } }; - const structSizeInBits = functionABI.outputs[0].components + const accountingCoreSizeInBits = accountingCoreABI.outputs[0].components + .map((x: { type: string }) => x.type) + .reduce((acc: number, x: string) => acc + sizeOfCalc(x), 0); + const operationalSizeInBits = operationalABI.outputs[0].components .map((x: { type: string }) => x.type) .reduce((acc: number, x: string) => acc + sizeOfCalc(x), 0); - expect(structSizeInBits).to.lessThanOrEqual(256); + + expect(accountingCoreSizeInBits).to.lessThanOrEqual(256); + expect(operationalSizeInBits).to.lessThanOrEqual(256); }); it("second opinion can be changed or removed", async () => { @@ -186,295 +261,862 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { }); }); - context("OracleReportSanityChecker rebase report data", () => { - async function newChecker() { - return await ethers.deployContract("OracleReportSanityCheckerWrapper", [ - await locator.getAddress(), - await accountingOracle.getAddress(), - await accounting.getAddress(), - deployer.address, - Object.values(defaultLimitsList), - ]); - } - - it("sums negative rebases for a few days", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - expect(await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY)).to.equal(0); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 10, 150); - expect(await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY)).to.equal(250); - }); - - it("sums negative rebases for 18 days", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 0, 700); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 0, 13); - await reportChecker.addReportData(timestamp - 17n * SLOTS_PER_DAY, 0, 10); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 0, 5); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 0, 150); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 0, 100); - - const expectedSum = await reportChecker.sumNegativeRebasesNotOlderThan(timestamp - 18n * SLOTS_PER_DAY); - expect(expectedSum).to.equal(100 + 150 + 5 + 10); - }); - - it("returns exited validators count", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 11, 100); - await reportChecker.addReportData(timestamp - 17n * SLOTS_PER_DAY, 12, 100); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 13, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 14, 100); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 15, 100); - - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 19n * SLOTS_PER_DAY)).to.equal(10); - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 18n * SLOTS_PER_DAY)).to.equal(11); - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 1n * SLOTS_PER_DAY)).to.equal(15); - }); - - it("returns exited validators count for missed or non-existent report", async () => { - const reportChecker = await newChecker(); - const timestamp = await getCurrentBlockTimestamp(); - await reportChecker.addReportData(timestamp - 19n * SLOTS_PER_DAY, 10, 100); - await reportChecker.addReportData(timestamp - 18n * SLOTS_PER_DAY, 11, 100); - await reportChecker.addReportData(timestamp - 15n * SLOTS_PER_DAY, 12, 100); - await reportChecker.addReportData(timestamp - 5n * SLOTS_PER_DAY, 13, 100); - await reportChecker.addReportData(timestamp - 2n * SLOTS_PER_DAY, 14, 100); - await reportChecker.addReportData(timestamp - 1n * SLOTS_PER_DAY, 15, 100); - - // Out of range: day -20 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 20n * SLOTS_PER_DAY)).to.equal(0); - // Missed report: day -6 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 6n * SLOTS_PER_DAY)).to.equal(12); - // Missed report: day -7 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 7n * SLOTS_PER_DAY)).to.equal(12); - // Expected report: day 15 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 15n * SLOTS_PER_DAY)).to.equal(12); - // Missed report: day -16 - expect(await reportChecker.exitedValidatorsAtTimestamp(timestamp - 16n * SLOTS_PER_DAY)).to.equal(11); + context("OracleReportSanityChecker balance-based CL decrease check", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + context("early exit predicate", () => { + it("passes when postCL >= preCL (no decrease)", async () => { + await expect( + callCheck(ether("101"), ether("101.001"), 0n, 0n, 0n, 4n * 24n * 60n * 60n, ether("1"), ether("0.999")), + ).not.to.be.reverted; + }); + + it("passes when postCL + withdrawals >= preCL", async () => { + await expect(callCheck(ether("105"), ether("100"), ether("5"))).not.to.be.reverted; + }); + + it("passes when postCL + withdrawals == preCL", async () => { + await expect(callCheck(ether("100"), ether("95"), ether("5"))).not.to.be.reverted; + }); + + it("passes when postCL == preCL", async () => { + await expect(callCheck(ether("100"), ether("100"))).not.to.be.reverted; + }); + + it("does not use cumulative withdrawal vault balance for early exit when no new CL withdrawals", async () => { + const baseline = ether("10000"); + const unchangedVaultBalance = ether("100"); + const postCL = ether("9550"); + const actualDiff = baseline - postCL; + const adjusted = baseline - unchangedVaultBalance; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // First report with non-zero vault balance sets _lastVaultBalanceAfterTransfer. + // Validators drop matches clWithdrawals (100 ETH) so no "appeared" balance. + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9900"), unchangedVaultBalance, 0n, 0n); + + // Same vault balance on the next report means clWithdrawals == 0 for this period. + // The check must not early-exit based on cumulative vault balance. + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9900"), postCL, unchangedVaultBalance, 0n, 0n)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("first report (no history)", () => { + it("passes on first report even with large decrease", async () => { + await expect(callCheck(ether("100"), ether("50"))).not.to.be.reverted; + }); + }); + + context("single-period decrease", () => { + it("decrease within limit passes and emits NegativeCLRebaseAccepted", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("decrease exactly at limit passes", async () => { + const baseline = ether("10000"); + const expectedMaxDiff = maxDiffFor(baseline); + const postCL = baseline - expectedMaxDiff; + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, expectedMaxDiff, expectedMaxDiff); + }); + + it("decrease exceeding limit reverts with IncorrectCLBalanceDecrease", async () => { + const baseline = ether("10000"); + const postCL = ether("9500"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("deposits and withdrawals adjustment", () => { + it("deposits increase adjusted balance and allowed decrease", async () => { + const baseline = ether("10000"); + const depositAmount = ether("500"); + const postCL = ether("9700"); + const principalCL = baseline + depositAmount; + const actualDiff = baseline - postCL; + const adjusted = baseline + depositAmount; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted includes depositAmount -> + // expectedMaxDiff is larger than without deposits -> actualDiff fits + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(principalCL, postCL, depositAmount)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("withdrawals decrease adjusted balance and allowed decrease", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const wVault = ether("200"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVault; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted = baseline - wVault -> + // smaller expectedMaxDiff, but actualDiff still within limit + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, wVault)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("large withdrawals trigger stricter limit and cause revert", async () => { + const baseline = ether("10000"); + const postCL = ether("9600"); + const wVault = ether("300"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVault; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // adjusted = baseline - wVault -> + // expectedMaxDiff shrinks below actualDiff -> reverts + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, wVault)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("deposits and withdrawals combined over multiple reports", async () => { + const baseline = ether("10000"); + const report2Deposits = ether("200"); + const report2Withdrawals = ether("100"); + const report3Deposits = ether("300"); + const report3Withdrawals = ether("50"); + const postCL = ether("9700"); + + const actualDiff = baseline - postCL; + const totalDeposits = report2Deposits + report3Deposits; + const totalWithdrawals = report2Withdrawals + report3Withdrawals; + const adjusted = baseline + totalDeposits - totalWithdrawals; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheckWithPendingDeposits(ether("10200"), ether("9900"), report2Deposits, { + withdrawalVaultBalance: report2Withdrawals, + withdrawalsVaultTransfer: report2Withdrawals, + }); + + // adjusted = baseline + totalDeposits - totalWithdrawals + // actualDiff = baseline - postCL + await setRefSlot(baseRefSlot); + await expect( + callCheckWithPendingDeposits(ether("10150"), postCL, report3Deposits, { + withdrawalVaultBalance: report3Withdrawals, + withdrawalsVaultTransfer: report3Withdrawals, + }), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("repeated withdrawalVaultBalance snapshots make the limit stricter", async () => { + const baseline = ether("10000"); + const repeatedWVaultSnapshot = ether("150"); + const postCL = ether("9650"); + const actualDiff = baseline - postCL; + const totalCLWithdrawals = repeatedWVaultSnapshot * 2n; + const adjusted = baseline - totalCLWithdrawals; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - 3n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // preCL <= postCL + wVault -> early exit, but CL withdrawals are still stored in reportData + // Validators drop matches clWithdrawals so no "appeared" balance. + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, ether("9850"), repeatedWVaultSnapshot, 0n, repeatedWVaultSnapshot); + + // same for next report; repeated CL withdrawals tighten adjustedBase + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("9850"), ether("9700"), repeatedWVaultSnapshot, 0n, repeatedWVaultSnapshot); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("accumulation over multiple reports", () => { + it("gradual decrease over several reports accumulates", async () => { + const baseline = ether("10000"); + const finalPostCL = ether("9500"); + const cumulativeDiff = baseline - finalPostCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9800")); + + // cumulativeDiff = baseline - finalPostCL + // (summed over 2 decreases) > expectedMaxDiff + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), finalPostCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(cumulativeDiff, expectedMaxDiff); + }); + + it("balance recovery within window via deposits", async () => { + const baseline = ether("10000"); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9700")); + + // deposits raise adjusted balance, increasing the allowed decrease + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(ether("9700"), ether("9700"), ether("300"))).not.to.be.reverted; + }); + + it("single large decrease exceeds limit", async () => { + const baseline = ether("10000"); + const postCL = ether("9300"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await expect(callCheck(baseline, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + }); + + context("window boundary behavior", () => { + it("window grows adaptively from 1 to 36", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + // actualDiff measured from baseline (window start), not from previous report + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, ether("9800")); + + // window=2: cumulative actualDiff (300) < expectedMaxDiff (360) -> passes + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("9800"), postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("window = 1 with only 2 reports", async () => { + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("uses X-36 report as baseline at full window", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const wVaultReport1 = ether("400"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVaultReport1; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(baseline, stableBalance, wVaultReport1, 0n, wVaultReport1); + + for (let i = 2; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // At full window, baseline must still be report 0 (X-36), not report 1 (X-35). + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("uses a 36-day window by timestamps when reports are delayed", async () => { + const twoDaysInSeconds = 2n * 24n * 60n * 60n; + const baseline = ether("10000"); + const postCL = ether("9700"); + const oldWindowWithdrawal = ether("5"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await callCheck(baseline, baseline, 0n, 0n, 0n, twoDaysInSeconds); + await callCheck( + baseline + oldWindowWithdrawal, + baseline, + oldWindowWithdrawal, + oldWindowWithdrawal, + oldWindowWithdrawal, + twoDaysInSeconds, + ); + + for (let i = 0; i < 17; ++i) { + await callCheck(baseline, baseline, 0n, 0n, 0n, twoDaysInSeconds); + } + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, 0n, 0n, 0n, twoDaysInSeconds)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("excludes baseline report flows from adjusted balance", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const baselineWithdrawals = ether("2"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck( + baseline + baselineWithdrawals, + baseline, + baselineWithdrawals, + baselineWithdrawals, + baselineWithdrawals, + ); + + for (let i = 1; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + } + + // Baseline report flows should not affect adjusted balance. + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("old data is evicted after window is full", async () => { + const totalReports = REPORTS_WINDOW + 2; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const actualDiff = stableBalance - postCL; + const expectedMaxDiff = maxDiffFor(stableBalance); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // preCL <= postCL triggers early exit, storing stableBalance with zero deposits/withdrawals + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + + for (let i = 2; i <= REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // report 0 (baseline=10000) evicted -> new baseline = stableBalance + // actualDiff = stableBalance - postCL (small) < expectedMaxDiff -> passes + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + + it("before eviction the old baseline is still in window", async () => { + const totalReports = REPORTS_WINDOW + 1; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const wVaultReport1 = ether("400"); + const actualDiff = baseline - postCL; + const adjusted = baseline - wVaultReport1; + const expectedMaxDiff = maxDiffFor(adjusted); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheck(baseline, stableBalance, wVaultReport1, 0n, wVaultReport1); + + for (let i = 2; i < REPORTS_WINDOW; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // report 0 (baseline) still in window -> + // actualDiff = baseline - postCL (large) + // adjusted = baseline - wVaultReport1 -> + // expectedMaxDiff is small -> actualDiff > expectedMaxDiff -> reverts + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(actualDiff, expectedMaxDiff); + }); + + it("eviction also removes old deposits from the window", async () => { + const totalReports = REPORTS_WINDOW + 3; + const baseline = ether("10000"); + const stableBalance = ether("9600"); + const postCL = ether("9590"); + const actualDiff = stableBalance - postCL; + const expectedMaxDiff = maxDiffFor(stableBalance); + + await setRefSlot(baseRefSlot - BigInt(totalReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // deposits=1000 and wVault=500 stored with report 1; after eviction they leave the window + await setRefSlot(baseRefSlot - BigInt(totalReports - 1) * SLOTS_PER_DAY); + await callCheckWithPendingDeposits(stableBalance, ether("9100"), ether("1000"), { + withdrawalVaultBalance: ether("500"), + withdrawalsVaultTransfer: ether("500"), + }); + + // clean transition to stableBalance (becomes new baseline after eviction) + await setRefSlot(baseRefSlot - BigInt(totalReports - 2) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + + for (let i = 3; i <= REPORTS_WINDOW + 1; i++) { + await setRefSlot(baseRefSlot - BigInt(totalReports - i) * SLOTS_PER_DAY); + await callCheck(stableBalance, stableBalance); + } + + // reports 0 and 1 evicted (deposits=1000, wVault=500 gone) + // new baseline = report 2 with zero deposits/withdrawals + // adjusted = stableBalance -> expectedMaxDiff based on stableBalance only + await setRefSlot(baseRefSlot); + await expect(callCheck(stableBalance, postCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); }); }); - context("OracleReportSanityChecker additional balance decrease check", () => { - it("works for IncorrectCLBalanceDecrease", async () => { - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("320"), ether("300"), 0, 0, 0, 10, 10), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 10n * ether("1") + 10n * ether("0.101")); + context("OracleReportSanityChecker day-one attack", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; }); - it("works as accumulation for IncorrectCLBalanceDecrease", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; - const prevRefSlot = refSlot - SLOTS_PER_DAY; + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); - await accountingOracle.setLastProcessingRefSlot(prevRefSlot); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("310"), 0, 0, 0, 10, 10); + it("3.6% on day 1 passes, repeated 3.6% on day 2 reverts", async () => { + const baseline = ether("10000"); + const day1PostCL = baseline - maxDiffFor(baseline); + const day2PostCL = day1PostCL - maxDiffFor(day1PostCL); - await accountingOracle.setLastProcessingRefSlot(refSlot); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("310"), ether("300"), 0, 0, 0, 10, 10), - ) + await setRefSlot(baseRefSlot - 2n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // day 1: exactly at limit, passes + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await expect(callCheck(baseline, day1PostCL)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot - SLOTS_PER_DAY, day1PostCL, maxDiffFor(baseline), maxDiffFor(baseline)); + + // day 2: cumulative baseline -> day2PostCL ≈ 7.2% > 3.6% limit + await setRefSlot(baseRefSlot); + await expect(callCheck(day1PostCL, day2PostCL)).to.be.revertedWithCustomError( + checker, + "IncorrectCLBalanceDecrease", + ); + }); + + it("small daily decreases accumulate and trigger revert", async () => { + const baseline = ether("10000"); + const dailyDecrease = ether("100"); + const numReports = 5; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - BigInt(numReports) * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // 3 reports of 1% decrease each: cumulative 3% < 3.6% limit + let currentBalance = baseline; + for (let i = 1; i <= 3; i++) { + const newBalance = currentBalance - dailyDecrease; + await setRefSlot(baseRefSlot - BigInt(numReports - i) * SLOTS_PER_DAY); + await callCheck(currentBalance, newBalance); + currentBalance = newBalance; + } + + // 4th decrease: cumulativeDiff = 4 × dailyDecrease (4%) + // > expectedMaxDiff (3.6%) + const cumulativeDiff = baseline - (currentBalance - dailyDecrease); + await setRefSlot(baseRefSlot); + await expect(callCheck(currentBalance, currentBalance - dailyDecrease)) .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 10n * ether("1") + 10n * ether("0.101")); + .withArgs(cumulativeDiff, expectedMaxDiff); + }); + }); + + context("OracleReportSanityChecker edge cases", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + it("maxCLBalanceDecreaseBP = 0 forbids any decrease", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await checker.setMaxCLBalanceDecreaseBP(0); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("10000"), ether("10000") - 1n)).to.be.revertedWithCustomError( + checker, + "IncorrectCLBalanceDecrease", + ); + }); + + it("maxCLBalanceDecreaseBP = 10000 allows any decrease", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await checker.setMaxCLBalanceDecreaseBP(10000); + + const baseline = ether("10000"); + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, ether("1"))) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, ether("1"), baseline - ether("1"), baseline); + }); + + it("reverts with IncorrectCLBalanceDecreaseWindowData when stored withdrawals exceed adjusted balance", async () => { + const baseline = ether("100"); + const hugeWithdrawals = baseline + 1n; + + await setRefSlot(baseRefSlot - 3n * SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, 0n, baseline, 0n, baseline); + + // A tiny follow-up withdrawal pushes the cumulative window withdrawals above the baseline. + await setRefSlot(baseRefSlot - 1n); + await callCheck(1n, 0n, 1n, 0n, 1n); + + // adjusted = baseline + 0 - hugeWithdrawals -> invalid window inputs for subtraction + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("80"), ether("50"))) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecreaseWindowData") + .withArgs(baseline, 0n, hugeWithdrawals); + }); + + it("reverts with IncorrectCLWithdrawalsVaultBalance when reported vault balance is below previous post-transfer state", async () => { + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + // Leave 200 ETH in the vault after the report so the next report cannot go below it. + await callCheck(ether("200"), 0n, ether("200"), 0n, 0n); + + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("100"), ether("100"), ether("199"), 0n, 0n)) + .to.be.revertedWithCustomError(checker, "IncorrectCLWithdrawalsVaultBalance") + .withArgs(ether("199"), ether("200")); + }); + + it("reverts with IncorrectWithdrawalsVaultTransfer when transfer exceeds reported vault balance", async () => { + await setRefSlot(baseRefSlot); + await expect(callCheck(ether("100"), ether("100"), ether("100"), 0n, ether("101"))) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultTransfer") + .withArgs(ether("100"), ether("101")); + }); + + it("large balances (36M ETH) do not cause overflow", async () => { + const totalCLBalance = ether("36000000"); + const depositAmount = ether("1000000"); + const decrease = maxDiffFor(totalCLBalance); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheckWithPendingDeposits(totalCLBalance + depositAmount, totalCLBalance, depositAmount); + + const postCL = totalCLBalance - decrease; + await setRefSlot(baseRefSlot); + await expect(callCheckWithPendingDeposits(postCL + depositAmount, postCL, depositAmount)).not.to.be.reverted; + }); + + it("getReportDataCount returns correct count after reports", async () => { + expect(await checker.getReportDataCount()).to.equal(0); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(1); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(2); + + await callCheck(ether("10000"), ether("10000")); + expect(await checker.getReportDataCount()).to.equal(3); }); + it("second opinion oracle is not consulted when decrease is within limit", async () => { + await deploySecondOpinionOracle(); + + const baseline = ether("10000"); + const postCL = ether("9700"); + const actualDiff = baseline - postCL; + const expectedMaxDiff = maxDiffFor(baseline); + + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(baseline, baseline); + + // actualDiff < expectedMaxDiff -> within limit -> + // Accepted (not Confirmed via second opinion) + await setRefSlot(baseRefSlot); + const tx = callCheck(baseline, postCL); + await expect(tx) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + await expect(tx).not.to.emit(checker, "NegativeCLRebaseConfirmed"); + }); + }); + + context("OracleReportSanityChecker setMaxCLBalanceDecreaseBP validation", () => { + it("accepts 0", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(0)).not.to.be.reverted; + }); + + it("accepts 10000 (MAX_BASIS_POINTS)", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(10000)).not.to.be.reverted; + }); + + it("reverts for 10001 with IncorrectLimitValue", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(10001)) + .to.be.revertedWithCustomError(checker, "IncorrectLimitValue") + .withArgs(10001, 0, 10000); + }); + + it("emits MaxCLBalanceDecreaseBPSet event on change", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); + await checker.grantRole(role, deployer.address); + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.emit(checker, "MaxCLBalanceDecreaseBPSet").withArgs(500); + }); + }); + + context("OracleReportSanityChecker second opinion oracle", () => { + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + it("works for happy path and report is not ready", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); - // Expect to pass through - await checker.connect(accountingSigner).checkAccountingOracleReport(0, 96 * 1e9, 96 * 1e9, 0, 0, 0, 10, 10); + await callCheck(ether("10000"), ether("9700")); const secondOpinionOracle = await deploySecondOpinionOracle(); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("300"), 0, 0, 0, 10, 10), - ).to.be.revertedWithCustomError(checker, "NegativeRebaseFailedSecondOpinionReportIsNotReady"); + await expect(callCheck(ether("10000"), ether("9500"))).to.be.revertedWithCustomError( + checker, + "NegativeRebaseFailedSecondOpinionReportIsNotReady", + ); - await secondOpinionOracle.addReport(refSlot, { + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("300"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("300"), ether("0")); - }); - - it("works with staking router reports exited validators at day 18 and 54", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; - - const refSlot17 = refSlot - 17n * SLOTS_PER_DAY; - const refSlot18 = refSlot - 18n * SLOTS_PER_DAY; - const refSlot54 = refSlot - 54n * SLOTS_PER_DAY; - const refSlot55 = refSlot - 55n * SLOTS_PER_DAY; - - await stakingRouter.mock__addStakingModuleExitedValidators(1, 1); - await accountingOracle.setLastProcessingRefSlot(refSlot55); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await stakingRouter.mock__removeStakingModule(1); - await stakingRouter.mock__addStakingModuleExitedValidators(1, 2); - await accountingOracle.setLastProcessingRefSlot(refSlot54); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await stakingRouter.mock__removeStakingModule(1); - await stakingRouter.mock__addStakingModuleExitedValidators(1, 3); - await accountingOracle.setLastProcessingRefSlot(refSlot18); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("320"), 0, 0, 0, 10, 10); - - await accountingOracle.setLastProcessingRefSlot(refSlot17); - await checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("320"), ether("315"), 0, 0, 0, 10, 10); - - await accountingOracle.setLastProcessingRefSlot(refSlot); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("315"), ether("300"), 0, 0, 0, 10, 10), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") - .withArgs(20n * ether("1"), 7n * ether("1") + 8n * ether("0.101")); + .withArgs(baseRefSlot, ether("9500"), ether("0")); }); it("works for reports close together", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); const secondOpinionOracle = await deploySecondOpinionOracle(); - // Second opinion balance is way bigger than general Oracle's (~1%) - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance diverges too much (~1%) -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("302", "gwei"), + clBalanceGwei: parseUnits("9600", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("299"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") - .withArgs(ether("299"), ether("302"), anyValue); + .withArgs(ether("9500"), ether("9600"), anyValue); - // Second opinion balance is almost equal general Oracle's (<0.74%) - should pass - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance within margin (<0.74%) -> passes + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("301", "gwei"), + clBalanceGwei: parseUnits("9510", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, ether("330"), ether("299"), 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("299"), ether("0")); + .withArgs(baseRefSlot, ether("9500"), ether("0")); - // Second opinion balance is slightly less than general Oracle's (0.01%) - should fail - await secondOpinionOracle.addReport(refSlot, { + // second opinion balance higher than reported -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: 100, + clBalanceGwei: parseUnits("9800", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker.connect(accountingSigner).checkAccountingOracleReport(0, 110 * 1e9, 100.01 * 1e9, 0, 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") - .withArgs(100.01 * 1e9, 100 * 1e9, anyValue); + .withArgs(ether("9500"), ether("9800"), anyValue); }); it("works for reports with incorrect withdrawal vault balance", async () => { - const genesisTime = await accountingOracle.GENESIS_TIME(); - const timestamp = await getCurrentBlockTimestamp(); - const refSlot = (timestamp - genesisTime) / 12n; + await setRefSlot(baseRefSlot - SLOTS_PER_DAY); + await callCheck(ether("10000"), ether("10000")); - await accountingOracle.setLastProcessingRefSlot(refSlot); + await setRefSlot(baseRefSlot); const secondOpinionOracle = await deploySecondOpinionOracle(); - // Second opinion balance is almost equal general Oracle's (<0.74%) and withdrawal value is the same - should pass - await secondOpinionOracle.addReport(refSlot, { + // withdrawal vault matches -> passes + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: ether("1"), numValidators: 0, exitedValidators: 0, }); - await expect( - checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("330"), ether("299"), ether("1"), 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"), ether("1"))) .to.emit(checker, "NegativeCLRebaseConfirmed") - .withArgs(refSlot, ether("299"), ether("1")); + .withArgs(baseRefSlot, ether("9500"), ether("1")); - // Second opinion withdrawal vault balance is different - should fail - await secondOpinionOracle.addReport(refSlot, { + // withdrawal vault mismatch -> revert + await secondOpinionOracle.addReport(baseRefSlot, { success: true, - clBalanceGwei: parseUnits("300", "gwei"), + clBalanceGwei: parseUnits("9500", "gwei"), withdrawalVaultBalanceWei: 0, numValidators: 0, exitedValidators: 0, }); - await expect( - checker - .connect(accountingSigner) - .checkAccountingOracleReport(0, ether("330"), ether("299"), ether("1"), 0, 0, 10, 10), - ) + await expect(callCheck(ether("10000"), ether("9500"), ether("1"))) .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedWithdrawalVaultBalanceMismatch") .withArgs(ether("1"), 0); }); }); context("OracleReportSanityChecker roles", () => { - it("CL Oracle related functions require INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE", async () => { - const role = await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(); + it("setMaxCLBalanceDecreaseBP requires MAX_CL_BALANCE_DECREASE_MANAGER_ROLE", async () => { + const role = await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(); - await expect(checker.setInitialSlashingAndPenaltiesAmount(0, 0)).to.be.revertedWithOZAccessControlError( + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.be.revertedWithOZAccessControlError( deployer.address, role, ); await checker.grantRole(role, deployer.address); - await expect(checker.setInitialSlashingAndPenaltiesAmount(1000, 101)).to.not.be.reverted; + await expect(checker.setMaxCLBalanceDecreaseBP(500)).to.not.be.reverted; }); - it("CL Oracle related functions require SECOND_OPINION_MANAGER_ROLE", async () => { + it("SECOND_OPINION_MANAGER_ROLE works", async () => { const clOraclesRole = await checker.SECOND_OPINION_MANAGER_ROLE(); await expect( @@ -485,4 +1127,94 @@ describe("OracleReportSanityChecker.sol:negative-rebase", () => { await expect(checker.setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 74)).to.not.be.reverted; }); }); + + context("OracleReportSanityChecker migrateBaselineSnapshot", () => { + const CHURN_LIMIT = ether("57600"); + + let genesisTime: bigint; + let baseRefSlot: bigint; + + before(async () => { + genesisTime = await accountingOracle.GENESIS_TIME(); + const timestamp = (await ethers.provider.getBlock("latest"))!.timestamp; + baseRefSlot = (BigInt(timestamp) - genesisTime) / 12n; + }); + + const setRefSlot = (slot: bigint) => accountingOracle.setLastProcessingRefSlot(slot); + + it("is permissionless before migration completes", async () => { + await lido.mock__setContractVersion(4); + await expect(checker.migrateBaselineSnapshot()).not.to.be.reverted; + }); + + it("reverts with UnexpectedLidoVersion when version != 4", async () => { + await lido.mock__setContractVersion(3); + await expect(checker.migrateBaselineSnapshot()) + .to.be.revertedWithCustomError(checker, "UnexpectedLidoVersion") + .withArgs(3, 4); + }); + + it("seeds baseline and bootstrap entries in reportData and emits event", async () => { + const clActive = ether("10000000"); + const clPending = ether("500000"); + const deposits = ether("320000"); + const depositsCur = ether("320000"); + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(clActive, clPending, deposits, depositsCur); + + const expectedCLBalance = clActive + clPending; + + await expect(checker.migrateBaselineSnapshot()) + .to.emit(checker, "BaselineSnapshotMigrated") + .withArgs(expectedCLBalance, deposits, CHURN_LIMIT); + + expect(await checker.getReportDataCount()).to.equal(2); + + const baselineData = await checker.reportData(0); + expect(baselineData.timestamp).to.equal(0n); + expect(baselineData.clBalance).to.equal(expectedCLBalance); + expect(baselineData.deposits).to.equal(0); + expect(baselineData.clWithdrawals).to.equal(0); + + const bootstrapFlowData = await checker.reportData(1); + expect(bootstrapFlowData.timestamp).to.equal(0n); + expect(bootstrapFlowData.clBalance).to.equal(expectedCLBalance); + expect(bootstrapFlowData.deposits).to.equal(deposits); + expect(bootstrapFlowData.clWithdrawals).to.equal(CHURN_LIMIT); + }); + + it("reverts with MigrationAlreadyDone on second call", async () => { + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(ether("10000000"), ether("500000"), ether("320000"), ether("320000")); + + await checker.migrateBaselineSnapshot(); + await expect(checker.migrateBaselineSnapshot()).to.be.revertedWithCustomError(checker, "MigrationAlreadyDone"); + }); + + it("after migration, decrease within limit passes", async () => { + const clActive = ether("10000000"); + const clPending = ether("500000"); + const migrationDeposits = ether("320000"); + const migrationDepositsCur = ether("320000"); + await lido.mock__setContractVersion(4); + await lido.mock__setBalanceStats(clActive, clPending, migrationDeposits, migrationDepositsCur); + + await checker.migrateBaselineSnapshot(); + + // reportData[0] = baseline point with zero flows + // reportData[1] = bootstrap flow chunk with migration deposits/withdrawals + const baseline = clActive + clPending; + const postCL = ether("10200000"); + const actualDiff = baseline - postCL; + const adjusted = baseline + migrationDeposits - CHURN_LIMIT; + const expectedMaxDiff = maxDiffFor(adjusted); + + // Pass the actual vault balance as WVB since migration initialized _lastVaultBalanceAfterTransfer + const vaultBalance = await ethers.provider.getBalance(withdrawalVault.address); + await setRefSlot(baseRefSlot); + await expect(callCheck(baseline, postCL, vaultBalance)) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(baseRefSlot, postCL, actualDiff, expectedMaxDiff); + }); + }); }); diff --git a/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts b/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts index e26e066ab3..c88784f98c 100644 --- a/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts +++ b/test/0.8.9/sanityChecker/oracleReportSanityChecker.test.ts @@ -11,18 +11,19 @@ import { Burner__MockForSanityChecker, LidoLocator__MockForSanityChecker, OracleReportSanityChecker, + OracleReportSanityCheckerWrapper, StakingRouter__MockForSanityChecker, WithdrawalQueue__MockForSanityChecker, } from "typechain-types"; -import { ether, getCurrentBlockTimestamp, impersonate, randomAddress } from "lib"; +import { ether, impersonate } from "lib"; import { TOTAL_BASIS_POINTS } from "lib/constants"; import { Snapshot } from "test/suite"; -const MAX_UINT16 = BigInt(2 ** 16); -const MAX_UINT32 = BigInt(2 ** 32); -const MAX_UINT64 = BigInt(2 ** 64); +const OVER_UINT16 = 1n << 16n; +const OVER_UINT32 = 1n << 32n; +const OVER_UINT64 = 1n << 64n; describe("OracleReportSanityChecker.sol", () => { let checker: OracleReportSanityChecker; @@ -35,41 +36,30 @@ describe("OracleReportSanityChecker.sol", () => { let accountingOracle: AccountingOracle__MockForSanityChecker; let withdrawalVault: HardhatEthersSigner; + let deployer: HardhatEthersSigner; + let admin: HardhatEthersSigner; + let elRewardsVault: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + let manager: HardhatEthersSigner; const defaultLimits = { - exitedValidatorsPerDayLimit: 55n, - appearedValidatorsPerDayLimit: 100n, - annualBalanceIncreaseBPLimit: 10_00n, // 10% - simulatedShareRateDeviationBPLimit: 2_50n, // 2.5% - maxValidatorExitRequestsPerReport: 2000n, + exitedEthAmountPerDayLimit: 55n, + appearedEthAmountPerDayLimit: 100n, + annualBalanceIncreaseBPLimit: 1_000n, + simulatedShareRateDeviationBPLimit: 250n, + maxBalanceExitRequestedPerReportInEth: 65_000n, + maxEffectiveBalanceWeightWCType01: 32n, + maxEffectiveBalanceWeightWCType02: 2_048n, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 128n, - maxPositiveTokenRebase: 5_000_000n, // 0.05% - initialSlashingAmountPWei: 1000n, - inactivityPenaltiesAmountPWei: 101n, - clBalanceOraclesErrorUpperBPLimit: 50n, // 0.5% - }; - - const correctOracleReport = { - timeElapsed: 24n * 60n * 60n, - preCLBalance: ether("100000"), - postCLBalance: ether("100001"), - withdrawalVaultBalance: 0n, - elRewardsVaultBalance: 0n, - sharesRequestedToBurn: 0n, - preCLValidators: 0n, - postCLValidators: 0n, - etherToLockForWithdrawals: 0n, + maxPositiveTokenRebase: 5_000_000n, + maxCLBalanceDecreaseBP: 360n, + clBalanceOraclesErrorUpperBPLimit: 50n, + consolidationEthAmountPerDayLimit: 10n, + exitedValidatorEthAmountLimit: 1n, }; - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - let elRewardsVault: HardhatEthersSigner; - - let stranger: HardhatEthersSigner; - let manager: HardhatEthersSigner; - let originalState: string; before(async () => { @@ -83,548 +73,1805 @@ describe("OracleReportSanityChecker.sol", () => { accountingOracle = await ethers.deployContract("AccountingOracle__MockForSanityChecker", [ deployer.address, - 12, // seconds per slot - 1606824023, // genesis time + 12, + 1_606_824_023, ]); stakingRouter = await ethers.deployContract("StakingRouter__MockForSanityChecker"); locator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ { - lido: deployer, - depositSecurityModule: deployer, - elRewardsVault: elRewardsVault, - accountingOracle: accountingOracle, - oracleReportSanityChecker: deployer, - burner: burner, - validatorsExitBusOracle: deployer, - stakingRouter: stakingRouter, - treasury: deployer, - withdrawalQueue: withdrawalQueue, - withdrawalVault: withdrawalVault, - postTokenRebaseReceiver: deployer, - oracleDaemonConfig: deployer, - validatorExitDelayVerifier: deployer, - triggerableWithdrawalsGateway: deployer, - accounting: accounting, - predepositGuarantee: deployer, - wstETH: deployer, - vaultHub: deployer, - vaultFactory: deployer, - lazyOracle: deployer, - operatorGrid: deployer, + lido: deployer.address, + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: withdrawalVault.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, }, ]); checker = await ethers.deployContract("OracleReportSanityChecker", [ - locator, - accountingOracle, - accounting, - admin, + await locator.getAddress(), + await accounting.getAddress(), + admin.address, defaultLimits, ]); }); - beforeEach(async () => (originalState = await Snapshot.take())); + beforeEach(async () => { + originalState = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(originalState); + }); + + const deployCheckerWithLidoStats = async ( + contractVersion: bigint, + balanceStats: { clActive: bigint; clPending: bigint; deposits: bigint; depositsCurrent: bigint } = { + clActive: ether("100"), + clPending: ether("7"), + deposits: ether("3"), + depositsCurrent: ether("3"), + }, + ) => { + const lido = await ethers.deployContract("Lido__MockForSanityChecker"); + await lido.mock__setContractVersion(contractVersion); + await lido.mock__setBalanceStats( + balanceStats.clActive, + balanceStats.clPending, + balanceStats.deposits, + balanceStats.depositsCurrent, + ); + + const migrationLocator = await ethers.deployContract("LidoLocator__MockForSanityChecker", [ + { + lido: await lido.getAddress(), + depositSecurityModule: deployer.address, + elRewardsVault: elRewardsVault.address, + accountingOracle: await accountingOracle.getAddress(), + oracleReportSanityChecker: deployer.address, + burner: await burner.getAddress(), + validatorsExitBusOracle: deployer.address, + stakingRouter: await stakingRouter.getAddress(), + treasury: deployer.address, + withdrawalQueue: await withdrawalQueue.getAddress(), + withdrawalVault: withdrawalVault.address, + postTokenRebaseReceiver: deployer.address, + oracleDaemonConfig: deployer.address, + validatorExitDelayVerifier: deployer.address, + triggerableWithdrawalsGateway: deployer.address, + consolidationGateway: deployer.address, + accounting: await accounting.getAddress(), + predepositGuarantee: deployer.address, + wstETH: deployer.address, + vaultHub: deployer.address, + vaultFactory: deployer.address, + lazyOracle: deployer.address, + operatorGrid: deployer.address, + topUpGateway: deployer.address, + }, + ]); + + const checkerWithLidoStats = await ethers.deployContract("OracleReportSanityChecker", [ + await migrationLocator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + ]); - afterEach(async () => await Snapshot.restore(originalState)); + return { checkerWithLidoStats, lido }; + }; - context("constructor", () => { - it("reverts if admin address is zero", async () => { + context("constructor and getters", () => { + it("reverts if admin is zero", async () => { await expect( ethers.deployContract("OracleReportSanityChecker", [ - locator, - accountingOracle, - accounting, + await locator.getAddress(), + await accounting.getAddress(), ZeroAddress, defaultLimits, ]), ).to.be.revertedWithCustomError(checker, "AdminCannotBeZero"); }); - }); - - context("getReportDataCount", () => { - it("retrieves correct report data count", async () => { - expect(await checker.getReportDataCount()).to.equal(0); - }); - }); - context("getLidoLocator", () => { - it("retrieves correct locator address", async () => { - expect(await checker.getLidoLocator()).to.equal(locator); - }); - }); + it("returns locator and initial limits", async () => { + expect(await checker.getLidoLocator()).to.equal(await locator.getAddress()); - context("getOracleReportLimits", () => { - it("retrieves correct oracle report limits", async () => { const limits = await checker.getOracleReportLimits(); - expect(limits.exitedValidatorsPerDayLimit).to.equal(defaultLimits.exitedValidatorsPerDayLimit); - expect(limits.appearedValidatorsPerDayLimit).to.equal(defaultLimits.appearedValidatorsPerDayLimit); + expect(limits.exitedEthAmountPerDayLimit).to.equal(defaultLimits.exitedEthAmountPerDayLimit); + expect(limits.appearedEthAmountPerDayLimit).to.equal(defaultLimits.appearedEthAmountPerDayLimit); expect(limits.annualBalanceIncreaseBPLimit).to.equal(defaultLimits.annualBalanceIncreaseBPLimit); - expect(limits.maxValidatorExitRequestsPerReport).to.equal(defaultLimits.maxValidatorExitRequestsPerReport); + expect(limits.simulatedShareRateDeviationBPLimit).to.equal(defaultLimits.simulatedShareRateDeviationBPLimit); + expect(limits.maxBalanceExitRequestedPerReportInEth).to.equal( + defaultLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(limits.maxEffectiveBalanceWeightWCType01).to.equal(defaultLimits.maxEffectiveBalanceWeightWCType01); + expect(limits.maxEffectiveBalanceWeightWCType02).to.equal(defaultLimits.maxEffectiveBalanceWeightWCType02); expect(limits.maxItemsPerExtraDataTransaction).to.equal(defaultLimits.maxItemsPerExtraDataTransaction); expect(limits.maxNodeOperatorsPerExtraDataItem).to.equal(defaultLimits.maxNodeOperatorsPerExtraDataItem); expect(limits.requestTimestampMargin).to.equal(defaultLimits.requestTimestampMargin); expect(limits.maxPositiveTokenRebase).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(limits.maxCLBalanceDecreaseBP).to.equal(defaultLimits.maxCLBalanceDecreaseBP); expect(limits.clBalanceOraclesErrorUpperBPLimit).to.equal(defaultLimits.clBalanceOraclesErrorUpperBPLimit); - expect(limits.initialSlashingAmountPWei).to.equal(defaultLimits.initialSlashingAmountPWei); - expect(limits.inactivityPenaltiesAmountPWei).to.equal(defaultLimits.inactivityPenaltiesAmountPWei); + expect(limits.consolidationEthAmountPerDayLimit).to.equal(defaultLimits.consolidationEthAmountPerDayLimit); + expect(limits.exitedValidatorEthAmountLimit).to.equal(defaultLimits.exitedValidatorEthAmountLimit); }); - }); - context("getMaxPositiveTokenRebase", () => { - it("returns correct max positive token rebase", async () => { + it("returns max positive token rebase and max CL decrease BP", async () => { expect(await checker.getMaxPositiveTokenRebase()).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(await checker.getMaxCLBalanceDecreaseBP()).to.equal(defaultLimits.maxCLBalanceDecreaseBP); + expect(await checker.getMaxEffectiveBalanceWeightWCType01()).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType01, + ); + expect(await checker.getMaxEffectiveBalanceWeightWCType02()).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType02, + ); }); }); - context("setOracleReportLimits", () => { - const newLimits = { - exitedValidatorsPerDayLimit: 50, - appearedValidatorsPerDayLimit: 75, - annualBalanceIncreaseBPLimit: 15_00, - simulatedShareRateDeviationBPLimit: 1_50, // 1.5% - maxValidatorExitRequestsPerReport: 3000, - maxItemsPerExtraDataTransaction: 15 + 1, - maxNodeOperatorsPerExtraDataItem: 16 + 1, - requestTimestampMargin: 2048, - maxPositiveTokenRebase: 10_000_000, - initialSlashingAmountPWei: 2000, - inactivityPenaltiesAmountPWei: 303, - clBalanceOraclesErrorUpperBPLimit: 12, - }; - - before(async () => { - await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager); - }); + context("limits management", () => { + it("setOracleReportLimits: ACL and update", async () => { + const newLimits = { + ...defaultLimits, + exitedEthAmountPerDayLimit: 42n, + appearedEthAmountPerDayLimit: 88n, + consolidationEthAmountPerDayLimit: 7n, + exitedValidatorEthAmountLimit: 2n, + }; - after(async () => { - await checker.connect(admin).revokeRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager); - }); + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( checker.connect(stranger).setOracleReportLimits(newLimits, ZeroAddress), ).to.be.revertedWithOZAccessControlError(stranger.address, await checker.ALL_LIMITS_MANAGER_ROLE()); + + await expect(checker.connect(manager).setOracleReportLimits(newLimits, ZeroAddress)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(42n) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(88n) + .to.emit(checker, "ConsolidationEthAmountPerDayLimitSet") + .withArgs(7n) + .to.emit(checker, "ExitedValidatorEthAmountLimitSet") + .withArgs(2n); + + const limits = await checker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(42n); + expect(limits.appearedEthAmountPerDayLimit).to.equal(88n); + expect(limits.consolidationEthAmountPerDayLimit).to.equal(7n); + expect(limits.exitedValidatorEthAmountLimit).to.equal(2n); }); - it("sets limits correctly", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.exitedValidatorsPerDayLimit).to.not.equal(newLimits.exitedValidatorsPerDayLimit); - expect(before.appearedValidatorsPerDayLimit).to.not.equal(newLimits.appearedValidatorsPerDayLimit); - expect(before.annualBalanceIncreaseBPLimit).to.not.equal(newLimits.annualBalanceIncreaseBPLimit); - expect(before.maxValidatorExitRequestsPerReport).to.not.equal(newLimits.maxValidatorExitRequestsPerReport); - expect(before.maxItemsPerExtraDataTransaction).to.not.equal(newLimits.maxItemsPerExtraDataTransaction); - expect(before.maxNodeOperatorsPerExtraDataItem).to.not.equal(newLimits.maxNodeOperatorsPerExtraDataItem); - expect(before.requestTimestampMargin).to.not.equal(newLimits.requestTimestampMargin); - expect(before.maxPositiveTokenRebase).to.not.equal(newLimits.maxPositiveTokenRebase); - expect(before.clBalanceOraclesErrorUpperBPLimit).to.not.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - expect(before.initialSlashingAmountPWei).to.not.equal(newLimits.initialSlashingAmountPWei); - expect(before.inactivityPenaltiesAmountPWei).to.not.equal(newLimits.inactivityPenaltiesAmountPWei); - - await checker.connect(manager).setOracleReportLimits(newLimits, ZeroAddress); - - const after = await checker.getOracleReportLimits(); - expect(after.exitedValidatorsPerDayLimit).to.equal(newLimits.exitedValidatorsPerDayLimit); - expect(after.appearedValidatorsPerDayLimit).to.equal(newLimits.appearedValidatorsPerDayLimit); - expect(after.annualBalanceIncreaseBPLimit).to.equal(newLimits.annualBalanceIncreaseBPLimit); - expect(after.maxValidatorExitRequestsPerReport).to.equal(newLimits.maxValidatorExitRequestsPerReport); - expect(after.maxItemsPerExtraDataTransaction).to.equal(newLimits.maxItemsPerExtraDataTransaction); - expect(after.maxNodeOperatorsPerExtraDataItem).to.equal(newLimits.maxNodeOperatorsPerExtraDataItem); - expect(after.requestTimestampMargin).to.equal(newLimits.requestTimestampMargin); - expect(after.maxPositiveTokenRebase).to.equal(newLimits.maxPositiveTokenRebase); - expect(after.clBalanceOraclesErrorUpperBPLimit).to.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - expect(after.initialSlashingAmountPWei).to.equal(newLimits.initialSlashingAmountPWei); - expect(after.inactivityPenaltiesAmountPWei).to.equal(newLimits.inactivityPenaltiesAmountPWei); - expect(after.clBalanceOraclesErrorUpperBPLimit).to.equal(newLimits.clBalanceOraclesErrorUpperBPLimit); - }); - - it("sets second opinion oracle", async () => { - const secondOpinionOracle = randomAddress(); - await expect(checker.connect(manager).setOracleReportLimits(newLimits, secondOpinionOracle)) - .to.emit(checker, "SecondOpinionOracleChanged") - .withArgs(secondOpinionOracle); + it("setExitedEthAmountPerDayLimit: validates bounds", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(OVER_UINT32)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(60n)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(60n); - expect(await checker.secondOpinionOracle()).to.equal(secondOpinionOracle); + expect((await checker.getOracleReportLimits()).exitedEthAmountPerDayLimit).to.equal(60n); }); - }); - context("setExitedValidatorsPerDayLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setExitedEthAmountPerDayLimit: ACL", async () => { + await expect(checker.connect(stranger).setExitedEthAmountPerDayLimit(60n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), + ); + }); + + it("sets exited/appeared ETH limits via dedicated setters", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await checker.connect(manager).setExitedEthAmountPerDayLimit(61n); + await checker.connect(manager).setAppearedEthAmountPerDayLimit(101n); + + const limits = await checker.getOracleReportLimits(); + expect(limits.exitedEthAmountPerDayLimit).to.equal(61n); + expect(limits.appearedEthAmountPerDayLimit).to.equal(101n); + }); + + it("dedicated exited/appeared ETH setters emit events", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedEthAmountPerDayLimit(62n)) + .to.emit(checker, "ExitedEthAmountPerDayLimitSet") + .withArgs(62n); + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(102n)) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(102n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + it("setExitedValidatorEthAmountLimit: validates min and updates", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setExitedValidatorEthAmountLimit(0n)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + + await expect(checker.connect(manager).setExitedValidatorEthAmountLimit(3n)) + .to.emit(checker, "ExitedValidatorEthAmountLimitSet") + .withArgs(3n); + + expect((await checker.getOracleReportLimits()).exitedValidatorEthAmountLimit).to.equal(3n); }); - it("reverts if called by non-manager", async () => { + it("setExitedValidatorEthAmountLimit: ACL", async () => { await expect( - checker.connect(stranger).setExitedValidatorsPerDayLimit(100n), + checker.connect(stranger).setExitedValidatorEthAmountLimit(2n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.EXITED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), ); }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setExitedValidatorsPerDayLimit(MAX_UINT16)).to.be.revertedWithCustomError( + it("setExitedValidatorEthAmountLimit: validates uint16 upper bound", async () => { + await checker + .connect(admin) + .grantRole(await checker.EXITED_VALIDATOR_ETH_AMOUNT_LIMIT_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(manager).setExitedValidatorEthAmountLimit(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + }); + + it("setRequestTimestampMargin validates uint32 bound", async () => { + await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(manager).setRequestTimestampMargin(OVER_UINT32)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); }); - it("sets limit correctly and emits `ExitedValidatorsPerDayLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.exitedValidatorsPerDayLimit).to.not.equal(100n); + it("setSecondOpinionOracleAndCLBalanceUpperMargin updates oracle and limit", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); - await expect(checker.connect(manager).setExitedValidatorsPerDayLimit(100n)) - .to.emit(checker, "ExitedValidatorsPerDayLimitSet") - .withArgs(100n); - - const after = await checker.getOracleReportLimits(); - expect(after.exitedValidatorsPerDayLimit).to.equal(100n); - }); - }); + const secondOpinion = deployer.address; + await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 99n)) + .to.emit(checker, "SecondOpinionOracleChanged") + .withArgs(secondOpinion) + .to.emit(checker, "CLBalanceOraclesErrorUpperBPLimitSet") + .withArgs(99n); - context("setAppearedValidatorsPerDayLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); + expect(await checker.secondOpinionOracle()).to.equal(secondOpinion); + expect((await checker.getOracleReportLimits()).clBalanceOraclesErrorUpperBPLimit).to.equal(99n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), manager); - }); + it("setAppearedEthAmountPerDayLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( - checker.connect(stranger).setAppearedValidatorsPerDayLimit(101n), + checker.connect(stranger).setAppearedEthAmountPerDayLimit(120n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.APPEARED_VALIDATORS_PER_DAY_LIMIT_MANAGER_ROLE(), + await checker.APPEARED_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setAppearedValidatorsPerDayLimit(MAX_UINT16)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(OVER_UINT32)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); + + await expect(checker.connect(manager).setAppearedEthAmountPerDayLimit(120n)) + .to.emit(checker, "AppearedEthAmountPerDayLimitSet") + .withArgs(120n); + + expect((await checker.getOracleReportLimits()).appearedEthAmountPerDayLimit).to.equal(120n); }); - it("sets limit correctly and emits `AppearedValidatorsPerDayLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.appearedValidatorsPerDayLimit).to.not.equal(101n); + it("setConsolidationEthAmountPerDayLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setConsolidationEthAmountPerDayLimit(11n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.CONSOLIDATION_ETH_AMOUNT_PER_DAY_LIMIT_MANAGER_ROLE(), + ); - await expect(checker.connect(manager).setAppearedValidatorsPerDayLimit(101n)) - .to.emit(checker, "AppearedValidatorsPerDayLimitSet") - .withArgs(101n); + await expect( + checker.connect(manager).setConsolidationEthAmountPerDayLimit(OVER_UINT32), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - const after = await checker.getOracleReportLimits(); - expect(after.appearedValidatorsPerDayLimit).to.equal(101n); - }); - }); + await expect(checker.connect(manager).setConsolidationEthAmountPerDayLimit(11n)) + .to.emit(checker, "ConsolidationEthAmountPerDayLimitSet") + .withArgs(11n); - context("setAnnualBalanceIncreaseBPLimit", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager); + expect((await checker.getOracleReportLimits()).consolidationEthAmountPerDayLimit).to.equal(11n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager); - }); + it("setAnnualBalanceIncreaseBPLimit: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( - checker.connect(stranger).setAnnualBalanceIncreaseBPLimit(100n), + checker.connect(stranger).setAnnualBalanceIncreaseBPLimit(250n), ).to.be.revertedWithOZAccessControlError( stranger.address, await checker.ANNUAL_BALANCE_INCREASE_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( checker.connect(manager).setAnnualBalanceIncreaseBPLimit(TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - - it("sets limit correctly and emits `AnnualBalanceIncreaseBPLimitSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.annualBalanceIncreaseBPLimit).to.not.equal(100n); - await expect(checker.connect(manager).setAnnualBalanceIncreaseBPLimit(100n)) + await expect(checker.connect(manager).setAnnualBalanceIncreaseBPLimit(250n)) .to.emit(checker, "AnnualBalanceIncreaseBPLimitSet") - .withArgs(100n); - - const after = await checker.getOracleReportLimits(); - expect(after.annualBalanceIncreaseBPLimit).to.equal(100n); - }); - }); + .withArgs(250n); - context("setMaxExitRequestsPerOracleReport", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), manager); + expect((await checker.getOracleReportLimits()).annualBalanceIncreaseBPLimit).to.equal(250n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), manager); - }); + it("setSimulatedShareRateDeviationBPLimit: ACL, bounds and update", async () => { + await checker.connect(admin).grantRole(await checker.SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( - checker.connect(stranger).setMaxExitRequestsPerOracleReport(100n), + checker.connect(stranger).setSimulatedShareRateDeviationBPLimit(300n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.MAX_VALIDATOR_EXIT_REQUESTS_PER_REPORT_ROLE(), + await checker.SHARE_RATE_DEVIATION_LIMIT_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( - checker.connect(manager).setMaxExitRequestsPerOracleReport(MAX_UINT16), + checker.connect(manager).setSimulatedShareRateDeviationBPLimit(TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setSimulatedShareRateDeviationBPLimit(300n)) + .to.emit(checker, "SimulatedShareRateDeviationBPLimitSet") + .withArgs(300n); + + expect((await checker.getOracleReportLimits()).simulatedShareRateDeviationBPLimit).to.equal(300n); }); - it("sets limit correctly and emits `MaxValidatorExitRequestsPerReportSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxValidatorExitRequestsPerReport).to.not.equal(100n); + it("setMaxBalanceExitRequestedPerReportInEth: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); - await expect(checker.connect(manager).setMaxExitRequestsPerOracleReport(100n)) - .to.emit(checker, "MaxValidatorExitRequestsPerReportSet") - .withArgs(100n); + await expect( + checker.connect(stranger).setMaxBalanceExitRequestedPerReportInEth(60_000n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), + ); - const after = await checker.getOracleReportLimits(); - expect(after.maxValidatorExitRequestsPerReport).to.equal(100n); - }); - }); + await expect( + checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - context("setRequestTimestampMargin", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n)) + .to.emit(checker, "MaxBalanceExitRequestedPerReportInEthSet") + .withArgs(60_000n); + + expect((await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth).to.equal(60_000n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); + it("setMaxBalanceExitRequestedPerReportInEth accepts zero", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(0n)) + .to.emit(checker, "MaxBalanceExitRequestedPerReportInEthSet") + .withArgs(0n); + + expect((await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth).to.equal(0n); }); - it("reverts if called by non-manager", async () => { - await expect(checker.connect(stranger).setRequestTimestampMargin(100n)).to.be.revertedWithOZAccessControlError( + it("setMaxEffectiveBalanceWeightWCType01: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setMaxEffectiveBalanceWeightWCType01(64n), + ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), + await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setRequestTimestampMargin(MAX_UINT32)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(0n)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); - }); - it("sets limit correctly and emits `RequestTimestampMarginSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.requestTimestampMargin).to.not.equal(100n); - - await expect(checker.connect(manager).setRequestTimestampMargin(100n)) - .to.emit(checker, "RequestTimestampMarginSet") - .withArgs(100n); + await expect( + checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - const after = await checker.getOracleReportLimits(); - expect(after.requestTimestampMargin).to.equal(100n); - }); - }); + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n)) + .to.emit(checker, "MaxEffectiveBalanceWeightWCType01Set") + .withArgs(64n); - context("setMaxPositiveTokenRebase", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); + expect((await checker.getOracleReportLimits()).maxEffectiveBalanceWeightWCType01).to.equal(64n); + expect(await checker.getMaxEffectiveBalanceWeightWCType01()).to.equal(64n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); - }); + it("setMaxEffectiveBalanceWeightWCType02: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { - await expect(checker.connect(stranger).setMaxPositiveTokenRebase(100n)).to.be.revertedWithOZAccessControlError( + await expect( + checker.connect(stranger).setMaxEffectiveBalanceWeightWCType02(4_096n), + ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), + await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { - await expect(checker.connect(manager).setMaxPositiveTokenRebase(MAX_UINT64 + 1n)).to.be.revertedWithCustomError( + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(0n)).to.be.revertedWithCustomError( checker, "IncorrectLimitValue", ); - }); - it("reverts if limit is less than min", async () => { - await expect(checker.connect(manager).setMaxPositiveTokenRebase(0n)).to.be.revertedWithCustomError( - checker, - "IncorrectLimitValue", - ); + await expect( + checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType02(4_096n)) + .to.emit(checker, "MaxEffectiveBalanceWeightWCType02Set") + .withArgs(4_096n); + + expect((await checker.getOracleReportLimits()).maxEffectiveBalanceWeightWCType02).to.equal(4_096n); + expect(await checker.getMaxEffectiveBalanceWeightWCType02()).to.equal(4_096n); }); - it("sets limit correctly and emits `MaxPositiveTokenRebaseSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxPositiveTokenRebase).to.not.equal(100n); + it("limit setters do not emit events when the value does not change", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); - await expect(checker.connect(manager).setMaxPositiveTokenRebase(100n)) - .to.emit(checker, "MaxPositiveTokenRebaseSet") - .withArgs(100n); + await checker.connect(manager).setMaxPositiveTokenRebase(600_000n); + await expect(checker.connect(manager).setMaxPositiveTokenRebase(600_000n)).to.not.emit( + checker, + "MaxPositiveTokenRebaseSet", + ); - const after = await checker.getOracleReportLimits(); - expect(after.maxPositiveTokenRebase).to.equal(100n); - }); - }); + await checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n); + await expect(checker.connect(manager).setMaxBalanceExitRequestedPerReportInEth(60_000n)).to.not.emit( + checker, + "MaxBalanceExitRequestedPerReportInEthSet", + ); - context("setMaxItemsPerExtraDataTransaction", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager); + await checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n); + await expect(checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n)).to.not.emit( + checker, + "MaxEffectiveBalanceWeightWCType01Set", + ); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager); - }); + it("setMaxItemsPerExtraDataTransaction: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), manager.address); - it("reverts if called by non-manager", async () => { await expect( checker.connect(stranger).setMaxItemsPerExtraDataTransaction(100n), ).to.be.revertedWithOZAccessControlError( stranger.address, await checker.MAX_ITEMS_PER_EXTRA_DATA_TRANSACTION_ROLE(), ); - }); - it("reverts if limit is greater than max", async () => { await expect( - checker.connect(manager).setMaxItemsPerExtraDataTransaction(MAX_UINT16), + checker.connect(manager).setMaxItemsPerExtraDataTransaction(OVER_UINT16), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - it("sets limit correctly and emits `MaxItemsPerExtraDataTransactionSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxItemsPerExtraDataTransaction).to.not.equal(100n); - - await expect(checker.connect(manager).setMaxItemsPerExtraDataTransaction(100n)) + await expect(checker.connect(manager).setMaxItemsPerExtraDataTransaction(20n)) .to.emit(checker, "MaxItemsPerExtraDataTransactionSet") - .withArgs(100n); + .withArgs(20n); - const after = await checker.getOracleReportLimits(); - expect(after.maxItemsPerExtraDataTransaction).to.equal(100n); + expect((await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction).to.equal(20n); }); - }); - context("setMaxNodeOperatorsPerExtraDataItem", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager); + it("setMaxNodeOperatorsPerExtraDataItem: ACL, bounds and update", async () => { + await checker + .connect(admin) + .grantRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager.address); + + await expect( + checker.connect(stranger).setMaxNodeOperatorsPerExtraDataItem(100n), + ).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), + ); + + await expect( + checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(OVER_UINT16), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect(checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(20n)) + .to.emit(checker, "MaxNodeOperatorsPerExtraDataItemSet") + .withArgs(20n); + + expect((await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem).to.equal(20n); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), manager); + it("setRequestTimestampMargin updates value and emits event", async () => { + await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(stranger).setRequestTimestampMargin(512n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), + ); + + await expect(checker.connect(manager).setRequestTimestampMargin(512n)) + .to.emit(checker, "RequestTimestampMarginSet") + .withArgs(512n); + + expect((await checker.getOracleReportLimits()).requestTimestampMargin).to.equal(512n); }); - it("reverts if called by non-manager", async () => { + it("setMaxPositiveTokenRebase: ACL, min/max and update", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + await expect( - checker.connect(stranger).setMaxNodeOperatorsPerExtraDataItem(100n), + checker.connect(stranger).setMaxPositiveTokenRebase(600_000n), ).to.be.revertedWithOZAccessControlError( stranger.address, - await checker.MAX_NODE_OPERATORS_PER_EXTRA_DATA_ITEM_ROLE(), + await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), + ); + + await expect(checker.connect(manager).setMaxPositiveTokenRebase(0n)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", + ); + await expect(checker.connect(manager).setMaxPositiveTokenRebase(OVER_UINT64)).to.be.revertedWithCustomError( + checker, + "IncorrectLimitValue", ); + + await expect(checker.connect(manager).setMaxPositiveTokenRebase(600_000n)) + .to.emit(checker, "MaxPositiveTokenRebaseSet") + .withArgs(600_000n); + + expect((await checker.getOracleReportLimits()).maxPositiveTokenRebase).to.equal(600_000n); }); - it("reverts if limit is greater than max", async () => { + it("setMaxCLBalanceDecreaseBP: ACL, bounds and update", async () => { + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + + await expect(checker.connect(stranger).setMaxCLBalanceDecreaseBP(200n)).to.be.revertedWithOZAccessControlError( + stranger.address, + await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), + ); + await expect( - checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(MAX_UINT16), + checker.connect(manager).setMaxCLBalanceDecreaseBP(TOTAL_BASIS_POINTS + 1n), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); - it("sets limit correctly and emits `MaxNodeOperatorsPerExtraDataItemSet` event", async () => { - const before = await checker.getOracleReportLimits(); - expect(before.maxNodeOperatorsPerExtraDataItem).to.not.equal(100n); + await expect(checker.connect(manager).setMaxCLBalanceDecreaseBP(200n)) + .to.emit(checker, "MaxCLBalanceDecreaseBPSet") + .withArgs(200n); - await expect(checker.connect(manager).setMaxNodeOperatorsPerExtraDataItem(100n)) - .to.emit(checker, "MaxNodeOperatorsPerExtraDataItemSet") - .withArgs(100n); + expect((await checker.getOracleReportLimits()).maxCLBalanceDecreaseBP).to.equal(200n); + }); + + it("setSecondOpinionOracleAndCLBalanceUpperMargin validates basis points bound", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); - const after = await checker.getOracleReportLimits(); - expect(after.maxNodeOperatorsPerExtraDataItem).to.equal(100n); + await expect( + checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, TOTAL_BASIS_POINTS + 1n), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - }); - context("setSecondOpinionOracleAndCLBalanceUpperMargin", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager); + it("setSecondOpinionOracleAndCLBalanceUpperMargin does not emit oracle change for same address", async () => { + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + + const secondOpinion = deployer.address; + await checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 50n); + await expect( + checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinion, 51n), + ).to.not.emit(checker, "SecondOpinionOracleChanged"); }); - after(async () => { - await checker.connect(admin).revokeRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager); + it("setOracleReportLimits rejects invalid exitedValidatorEthAmountLimit", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, exitedValidatorEthAmountLimit: 0n }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, exitedValidatorEthAmountLimit: OVER_UINT16 }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - it("reverts if called by non-manager", async () => { + it("setOracleReportLimits rejects invalid annualBalanceIncreaseBPLimit", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + await expect( - checker.connect(stranger).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 100n), - ).to.be.revertedWithOZAccessControlError(stranger.address, await checker.SECOND_OPINION_MANAGER_ROLE()); + checker + .connect(manager) + .setOracleReportLimits( + { ...defaultLimits, annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS + 1n }, + ZeroAddress, + ), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - it("reverts if limit is greater than max", async () => { + it("setOracleReportLimits rejects invalid maxEffectiveBalanceWeight values", async () => { + await checker.connect(admin).grantRole(await checker.ALL_LIMITS_MANAGER_ROLE(), manager.address); + await expect( - checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, TOTAL_BASIS_POINTS + 1n), + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, maxEffectiveBalanceWeightWCType01: 0n }, ZeroAddress), + ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + + await expect( + checker + .connect(manager) + .setOracleReportLimits({ ...defaultLimits, maxEffectiveBalanceWeightWCType02: OVER_UINT16 }, ZeroAddress), ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); }); - it("sets limit correctly and emits `CLBalanceOraclesErrorUpperBPLimitSet` event", async () => { - await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(ZeroAddress, 100n)) - .to.emit(checker, "CLBalanceOraclesErrorUpperBPLimitSet") - .withArgs(100n); + it("roundtrips limits at packed type boundaries", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; + + const maxPackedLimits = { + exitedEthAmountPerDayLimit: OVER_UINT32 - 1n, + appearedEthAmountPerDayLimit: OVER_UINT32 - 1n, + annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS, + simulatedShareRateDeviationBPLimit: TOTAL_BASIS_POINTS, + maxBalanceExitRequestedPerReportInEth: OVER_UINT16 - 1n, + maxEffectiveBalanceWeightWCType01: OVER_UINT16 - 1n, + maxEffectiveBalanceWeightWCType02: OVER_UINT16 - 1n, + maxItemsPerExtraDataTransaction: OVER_UINT16 - 1n, + maxNodeOperatorsPerExtraDataItem: OVER_UINT16 - 1n, + requestTimestampMargin: OVER_UINT32 - 1n, + maxPositiveTokenRebase: OVER_UINT64 - 1n, + maxCLBalanceDecreaseBP: TOTAL_BASIS_POINTS, + clBalanceOraclesErrorUpperBPLimit: TOTAL_BASIS_POINTS, + consolidationEthAmountPerDayLimit: OVER_UINT32 - 1n, + exitedValidatorEthAmountLimit: OVER_UINT16 - 1n, + }; + + const roundtrip = await wrapper.roundtripRawLimits(maxPackedLimits); + + expect(roundtrip.exitedEthAmountPerDayLimit).to.equal(maxPackedLimits.exitedEthAmountPerDayLimit); + expect(roundtrip.appearedEthAmountPerDayLimit).to.equal(maxPackedLimits.appearedEthAmountPerDayLimit); + expect(roundtrip.annualBalanceIncreaseBPLimit).to.equal(maxPackedLimits.annualBalanceIncreaseBPLimit); + expect(roundtrip.simulatedShareRateDeviationBPLimit).to.equal(maxPackedLimits.simulatedShareRateDeviationBPLimit); + expect(roundtrip.maxBalanceExitRequestedPerReportInEth).to.equal( + maxPackedLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(roundtrip.maxEffectiveBalanceWeightWCType01).to.equal(maxPackedLimits.maxEffectiveBalanceWeightWCType01); + expect(roundtrip.maxEffectiveBalanceWeightWCType02).to.equal(maxPackedLimits.maxEffectiveBalanceWeightWCType02); + expect(roundtrip.maxItemsPerExtraDataTransaction).to.equal(maxPackedLimits.maxItemsPerExtraDataTransaction); + expect(roundtrip.maxNodeOperatorsPerExtraDataItem).to.equal(maxPackedLimits.maxNodeOperatorsPerExtraDataItem); + expect(roundtrip.requestTimestampMargin).to.equal(maxPackedLimits.requestTimestampMargin); + expect(roundtrip.maxPositiveTokenRebase).to.equal(maxPackedLimits.maxPositiveTokenRebase); + expect(roundtrip.maxCLBalanceDecreaseBP).to.equal(maxPackedLimits.maxCLBalanceDecreaseBP); + expect(roundtrip.clBalanceOraclesErrorUpperBPLimit).to.equal(maxPackedLimits.clBalanceOraclesErrorUpperBPLimit); + expect(roundtrip.consolidationEthAmountPerDayLimit).to.equal(maxPackedLimits.consolidationEthAmountPerDayLimit); + expect(roundtrip.exitedValidatorEthAmountLimit).to.equal(maxPackedLimits.exitedValidatorEthAmountLimit); + }); + + it("packAndStore caches packed limits in wrapper storage", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; + + await wrapper.packAndStore(); + + const accountingPacked = await wrapper.exposeAccountingCorePackedLimits(); + expect(accountingPacked.exitedEthAmountPerDayLimit).to.equal(defaultLimits.exitedEthAmountPerDayLimit); + expect(accountingPacked.appearedEthAmountPerDayLimit).to.equal(defaultLimits.appearedEthAmountPerDayLimit); + expect(accountingPacked.consolidationEthAmountPerDayLimit).to.equal( + defaultLimits.consolidationEthAmountPerDayLimit, + ); + expect(accountingPacked.exitedValidatorEthAmountLimit).to.equal(defaultLimits.exitedValidatorEthAmountLimit); + expect(accountingPacked.annualBalanceIncreaseBPLimit).to.equal(defaultLimits.annualBalanceIncreaseBPLimit); + expect(accountingPacked.simulatedShareRateDeviationBPLimit).to.equal( + defaultLimits.simulatedShareRateDeviationBPLimit, + ); + expect(accountingPacked.maxPositiveTokenRebase).to.equal(defaultLimits.maxPositiveTokenRebase); + expect(accountingPacked.maxCLBalanceDecreaseBP).to.equal(defaultLimits.maxCLBalanceDecreaseBP); + expect(accountingPacked.clBalanceOraclesErrorUpperBPLimit).to.equal( + defaultLimits.clBalanceOraclesErrorUpperBPLimit, + ); + + const operationalPacked = await wrapper.exposeOperationalPackedLimits(); + expect(operationalPacked.maxBalanceExitRequestedPerReportInEth).to.equal( + defaultLimits.maxBalanceExitRequestedPerReportInEth, + ); + expect(operationalPacked.maxEffectiveBalanceWeightWCType01).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType01, + ); + expect(operationalPacked.maxEffectiveBalanceWeightWCType02).to.equal( + defaultLimits.maxEffectiveBalanceWeightWCType02, + ); + expect(operationalPacked.maxItemsPerExtraDataTransaction).to.equal(defaultLimits.maxItemsPerExtraDataTransaction); + expect(operationalPacked.maxNodeOperatorsPerExtraDataItem).to.equal( + defaultLimits.maxNodeOperatorsPerExtraDataItem, + ); + expect(operationalPacked.requestTimestampMargin).to.equal(defaultLimits.requestTimestampMargin); + }); + + it("slot-local setters do not affect the other packed storage block", async () => { + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_BALANCE_EXIT_REQUESTED_PER_REPORT_IN_ETH_ROLE(), manager.address); + await checker + .connect(admin) + .grantRole(await checker.MAX_EFFECTIVE_BALANCE_WEIGHTS_MANAGER_ROLE(), manager.address); + + const initialLimits = await checker.getOracleReportLimits(); + + await checker.connect(manager).setMaxEffectiveBalanceWeightWCType01(64n); + const afterOperationalUpdate = await checker.getOracleReportLimits(); + expect(afterOperationalUpdate.maxEffectiveBalanceWeightWCType01).to.equal(64n); + expect(afterOperationalUpdate.maxPositiveTokenRebase).to.equal(initialLimits.maxPositiveTokenRebase); + expect(afterOperationalUpdate.exitedEthAmountPerDayLimit).to.equal(initialLimits.exitedEthAmountPerDayLimit); + expect(afterOperationalUpdate.consolidationEthAmountPerDayLimit).to.equal( + initialLimits.consolidationEthAmountPerDayLimit, + ); + + await checker.connect(manager).setMaxPositiveTokenRebase(600_000n); + const afterAccountingUpdate = await checker.getOracleReportLimits(); + expect(afterAccountingUpdate.maxPositiveTokenRebase).to.equal(600_000n); + expect(afterAccountingUpdate.maxEffectiveBalanceWeightWCType01).to.equal( + afterOperationalUpdate.maxEffectiveBalanceWeightWCType01, + ); + expect(afterAccountingUpdate.requestTimestampMargin).to.equal(afterOperationalUpdate.requestTimestampMargin); + expect(afterAccountingUpdate.maxItemsPerExtraDataTransaction).to.equal( + afterOperationalUpdate.maxItemsPerExtraDataTransaction, + ); }); - it("changes the second opinion oracle if it is new", async () => { - const secondOpinionOracle = randomAddress(); - await expect(checker.connect(manager).setSecondOpinionOracleAndCLBalanceUpperMargin(secondOpinionOracle, 100n)) - .to.emit(checker, "SecondOpinionOracleChanged") - .withArgs(secondOpinionOracle); + it("packed limits helpers revert with BasisPointsOverflow on raw pack over MAX_BASIS_POINTS", async () => { + const wrapper = (await ethers.deployContract("OracleReportSanityCheckerWrapper", [ + await locator.getAddress(), + await accounting.getAddress(), + admin.address, + defaultLimits, + true, + ])) as OracleReportSanityCheckerWrapper; - expect(await checker.secondOpinionOracle()).to.equal(secondOpinionOracle); + const malformedLimits = { + ...defaultLimits, + annualBalanceIncreaseBPLimit: TOTAL_BASIS_POINTS + 1n, + }; + + await expect(wrapper.packRawLimits(malformedLimits)) + .to.be.revertedWithCustomError(wrapper, "BasisPointsOverflow") + .withArgs(TOTAL_BASIS_POINTS + 1n, TOTAL_BASIS_POINTS); }); }); - context("setInitialSlashingAndPenaltiesAmount", () => { - before(async () => { - await checker.connect(admin).grantRole(await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), manager); - }); + context("standalone sanity checks", () => { + it("checkExitBusOracleReport", async () => { + const limit = (await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth; + + await expect(checker.checkExitBusOracleReport(limit)).not.to.be.reverted; + await expect(checker.checkExitBusOracleReport(limit + 1n)) + .to.be.revertedWithCustomError(checker, "IncorrectSumOfExitBalancePerReport") + .withArgs(limit + 1n); + }); + + it("checkExitBusOracleReport allows zero and below-limit values", async () => { + const limit = (await checker.getOracleReportLimits()).maxBalanceExitRequestedPerReportInEth; + await expect(checker.checkExitBusOracleReport(0n)).not.to.be.reverted; + await expect(checker.checkExitBusOracleReport(limit - 1n)).not.to.be.reverted; + }); + + it("checkExitedEthAmountPerDay uses timeElapsed (seconds)", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.exitedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + const oneDay = 24n * 60n * 60n; + const exitedValidatorEthAmountLimit = limits.exitedValidatorEthAmountLimit; + const exitedValidatorEthAmountLimitInWei = exitedValidatorEthAmountLimit * ether("1"); + + await expect(checker.checkExitedEthAmountPerDay(0n, oneDay)).not.to.be.reverted; + + const exitedValidatorsCountForDailyExceededRevert = + limitWithConsolidationInWei / exitedValidatorEthAmountLimitInWei + 1n; + const exitedPerDayForDailyExceededRevert = + exitedValidatorsCountForDailyExceededRevert * exitedValidatorEthAmountLimitInWei; + + await expect(checker.checkExitedEthAmountPerDay(exitedValidatorsCountForDailyExceededRevert, oneDay)) + .to.be.revertedWithCustomError(checker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, exitedPerDayForDailyExceededRevert); + + const exitedPerDayForOneValidatorAndZeroTime = exitedValidatorEthAmountLimitInWei * 86_400n; + const exitedValidatorsCountForGuaranteedRevert = + limitWithConsolidationInWei / exitedPerDayForOneValidatorAndZeroTime + 1n; + const exitedPerDayForGuaranteedRevert = + exitedValidatorsCountForGuaranteedRevert * exitedPerDayForOneValidatorAndZeroTime; + + await expect(checker.checkExitedEthAmountPerDay(exitedValidatorsCountForGuaranteedRevert, 0n)) + .to.be.revertedWithCustomError(checker, "ExitedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, exitedPerDayForGuaranteedRevert); + }); + + it("checkAppearedEthAmountPerDay includes consolidation limit", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await expect(checker.checkAppearedEthAmountPerDay(0n)).not.to.be.reverted; + + const guaranteedExceededAppearedPerDayValue = limitWithConsolidationInWei + 1n; + + await expect(checker.checkAppearedEthAmountPerDay(guaranteedExceededAppearedPerDayValue)) + .to.be.revertedWithCustomError(checker, "AppearedEthAmountPerDayLimitExceeded") + .withArgs(limitWithConsolidationInWei, guaranteedExceededAppearedPerDayValue); + }); + + it("checkAppearedEthAmountPerDay allows exact configured limit", async () => { + const limits = await checker.getOracleReportLimits(); + const limitWithConsolidationInWei = + (limits.appearedEthAmountPerDayLimit + limits.consolidationEthAmountPerDayLimit) * ether("1"); + + await expect(checker.checkAppearedEthAmountPerDay(limitWithConsolidationInWei)).not.to.be.reverted; + }); + + it("checkNodeOperatorsPerExtraDataItemCount", async () => { + const limit = (await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem; + await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12n, limit)).not.to.be.reverted; + + await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12n, limit + 1n)) + .to.be.revertedWithCustomError(checker, "TooManyNodeOpsPerExtraDataItem") + .withArgs(12n, limit + 1n); + }); + + it("checkExtraDataItemsCountPerTransaction", async () => { + const limit = (await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction; + await expect(checker.checkExtraDataItemsCountPerTransaction(limit)).not.to.be.reverted; + + await expect(checker.checkExtraDataItemsCountPerTransaction(limit + 1n)) + .to.be.revertedWithCustomError(checker, "TooManyItemsPerExtraDataTransaction") + .withArgs(limit, limit + 1n); + }); + + it("checkWithdrawalQueueOracleReport", async () => { + const now = 1_700_000_000n; + const margin = (await checker.getOracleReportLimits()).requestTimestampMargin; + + const oldRequestId = 1n; + const newRequestId = 2n; + + const oldTs = now - margin; + const newTs = now - margin / 2n; + + await withdrawalQueue.setRequestTimestamp(oldRequestId, oldTs); + await withdrawalQueue.setRequestTimestamp(newRequestId, newTs); + + await expect(checker.checkWithdrawalQueueOracleReport(oldRequestId, now)).not.to.be.reverted; + + await expect(checker.checkWithdrawalQueueOracleReport(newRequestId, now)) + .to.be.revertedWithCustomError(checker, "IncorrectRequestFinalization") + .withArgs(newTs); + }); + + context("checkCLPendingBalanceIncrease cold start", () => { + const oneDay = 24n * 60n * 60n; + const noDeposits = 0n; + const unexpectedPendingWei = 1n; + const coldStartDepositsWei = ether("200"); + const largeColdStartDepositsWei = ether("1000000"); + const firstDayAppearedLimitWei = defaultLimits.appearedEthAmountPerDayLimit * ether("1"); + const pendingAfterExactFirstDayActivationWei = coldStartDepositsWei - firstDayAppearedLimitWei; + const validatorsBeyondFirstDayLimitWei = firstDayAppearedLimitWei + 1n; + const pendingAfterExceededFirstDayActivationWei = pendingAfterExactFirstDayActivationWei - 1n; + + it("allows a zero-balance first report without deposits", async () => { + await expect(checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, 0n, 0n, noDeposits)).not.to.be.reverted; + }); + + it("rejects a positive first report without deposits", async () => { + await expect(checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, unexpectedPendingWei, 0n, noDeposits)) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, unexpectedPendingWei); + }); + + it("allows the first-report total CL increase up to deposits", async () => { + await expect( + checker.checkCLPendingBalanceIncrease(oneDay, 0n, 0n, 0n, coldStartDepositsWei, 0n, coldStartDepositsWei), + ).not.to.be.reverted; + }); + + it("does not cap first-report deposits by annual growth allowance when they remain pending", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + 0n, + largeColdStartDepositsWei, + 0n, + largeColdStartDepositsWei, + ), + ).not.to.be.reverted; + }); + + it("limits first-report validator activation by appeared ETH amount per day", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + firstDayAppearedLimitWei, + pendingAfterExactFirstDayActivationWei, + 0n, + coldStartDepositsWei, + ), + ).not.to.be.reverted; + + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + 0n, + 0n, + validatorsBeyondFirstDayLimitWei, + pendingAfterExceededFirstDayActivationWei, + 0n, + coldStartDepositsWei, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalActivatedBalance") + .withArgs(firstDayAppearedLimitWei, firstDayAppearedLimitWei + 1n); + }); + }); + + context("checkCLPendingBalanceIncrease with existing state", () => { + const oneDay = 24n * 60n * 60n; + const previousValidatorsWei = ether("3650"); + const previousPendingWei = ether("2"); + const allowedActivationWei = ether("1"); + const excessiveActivationWei = ether("2"); + + it("allows a non-cold-start report within the pending corridor", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + previousValidatorsWei, + previousPendingWei, + previousValidatorsWei + allowedActivationWei, + previousPendingWei - allowedActivationWei, + 0n, + 0n, + ), + ).not.to.be.reverted; + }); + + it("reverts with IncorrectTotalCLBalanceIncrease when validators growth exceeds the activated budget", async () => { + await expect( + checker.checkCLPendingBalanceIncrease( + oneDay, + previousValidatorsWei, + 0n, + previousValidatorsWei + excessiveActivationWei, + 0n, + 0n, + 0n, + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(ether("1"), excessiveActivationWei); + }); + + it("reverts with InvalidClBalancesData when CL withdrawals exceed previous validators balance", async () => { + await expect( + checker.checkCLPendingBalanceIncrease(oneDay, ether("10"), 0n, 0n, 0n, ether("11"), 0n), + ).to.be.revertedWithCustomError(checker, "InvalidClBalancesData"); + }); + }); + }); + + context("checkCLBalancesConsistency", () => { + it("reverts on array length mismatch", async () => { + await expect(checker.checkCLBalancesConsistency([1n], [], 10n)).to.be.revertedWithCustomError( + checker, + "InvalidClBalancesData", + ); + }); + + it("reverts when module sums are inconsistent", async () => { + await expect(checker.checkCLBalancesConsistency([1n, 2n], [10n, 20n], 40n)) + .to.be.revertedWithCustomError(checker, "InconsistentValidatorsBalanceByModule") + .withArgs(40n, 30n); + }); + + it("passes with consistent data", async () => { + await expect(checker.checkCLBalancesConsistency([1n, 2n], [10n, 20n], 30n)).not.to.be.reverted; + }); + + it("passes for empty arrays and zero totals", async () => { + await expect(checker.checkCLBalancesConsistency([], [], 0n)).not.to.be.reverted; + }); + }); + + context("checkAccountingOracleReport", () => { + const baseReport = { + timeElapsed: 24n * 60n * 60n, + preCLBalance: ether("100000"), + postCLBalance: ether("100001"), + preCLPendingBalance: 0n, + postCLPendingBalance: 0n, + withdrawalVaultBalance: 0n, + elRewardsVaultBalance: 0n, + sharesRequestedToBurn: 0n, + deposits: 0n, + withdrawalsVaultTransfer: 0n, + }; + + const report = ( + overrides: Partial = {}, + ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { + const r = { ...baseReport, ...overrides }; + return [ + r.timeElapsed, + r.preCLBalance - r.preCLPendingBalance - r.deposits, + r.preCLPendingBalance, + r.postCLBalance - r.postCLPendingBalance, + r.postCLPendingBalance, + r.withdrawalVaultBalance, + r.elRewardsVaultBalance, + r.sharesRequestedToBurn, + r.deposits, + r.withdrawalsVaultTransfer, + ]; + }; + + let accountingSigner: HardhatEthersSigner; + + before(async () => { + accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + }); + + it("reverts when not called by accounting", async () => { + await expect(checker.connect(stranger).checkAccountingOracleReport(...report())).to.be.revertedWithCustomError( + checker, + "CalledNotFromAccounting", + ); + }); + + it("reverts when withdrawal vault balance is overstated", async () => { + const actual = await ethers.provider.getBalance(withdrawalVault.address); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ withdrawalVaultBalance: actual + 1n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultBalance") + .withArgs(actual); + }); + + it("reverts when EL rewards vault balance is overstated", async () => { + const actual = await ethers.provider.getBalance(elRewardsVault.address); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ elRewardsVaultBalance: actual + 1n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectELRewardsVaultBalance") + .withArgs(actual); + }); + + it("reverts when withdrawals vault transfer exceeds reported vault balance", async () => { + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ withdrawalVaultBalance: 10n, withdrawalsVaultTransfer: 11n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultTransfer") + .withArgs(10n, 11n); + }); + + it("reverts when shares requested to burn are overstated", async () => { + await burner.setSharesRequestedToBurn(10n, 21n); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport(...report({ sharesRequestedToBurn: 32n })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectSharesRequestedToBurn") + .withArgs(31n); + }); + + it("reverts when positive CL increase exceeds the pending-backed one-day allowance", async () => { + const preCLBalance = 3_650_000n; + const preCLPendingBalance = 1_000n; + const postCLPendingBalance = 0n; + const allowedIncrease = preCLPendingBalance + preCLBalance / 3650n; + const clIncrease = allowedIncrease + 1n; + const postCLBalance = preCLBalance + clIncrease; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedIncrease, clIncrease); + }); + + it("reverts when a one-day positive CL increase exceeds the pending-backed allowance", async () => { + const preCLBalance = ether("1000000"); + const preCLPendingBalance = ether("100"); + const postCLPendingBalance = 0n; + const allowedIncrease = preCLPendingBalance + preCLBalance / 3650n; + const clIncrease = allowedIncrease + 1n; + const postCLBalance = preCLBalance + clIncrease; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance, + timeElapsed: 24n * 60n * 60n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalCLBalanceIncrease") + .withArgs(allowedIncrease, clIncrease); + }); + + it("passes with valid report", async () => { + // This scenario uses 1 wei of pending explicitly, though positive growth can also be covered by the validators-based safety cap. + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: baseReport.preCLBalance + 1n, + preCLPendingBalance: 1n, + postCLBalance: baseReport.preCLBalance + 1n, + postCLPendingBalance: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("allows cold-start onboarding from deposits into pending and then into validators", async () => { + const deposits = ether("200"); + const activated = ether("100"); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: deposits, + postCLBalance: deposits, + preCLPendingBalance: 0n, + postCLPendingBalance: deposits, + deposits, + }), + ), + ).not.to.be.reverted; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: deposits, + postCLBalance: deposits, + preCLPendingBalance: deposits, + postCLPendingBalance: deposits - activated, + deposits: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("does not skip cold-start pending sanity on the first report", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: 0n, + postCLBalance: 1n, + preCLPendingBalance: 0n, + postCLPendingBalance: 1n, + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, 1n); + }); + + it("reverts when validator decrease is hidden by pending increase", async () => { + const preCLBalance = ether("10000"); + const postCLBalance = preCLBalance; + const postCLPendingBalance = ether("1000"); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance, + postCLBalance, + postCLPendingBalance, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectTotalPendingBalance") + .withArgs(0n, postCLPendingBalance); + }); + + it("handles CL balance increase exactly at appeared ETH amount limit", async () => { + const preCLBalance = ether("1000000"); + const preCLPendingBalance = ether("100"); + const postCLBalance = preCLBalance + preCLPendingBalance; + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: preCLBalance + preCLPendingBalance, + preCLPendingBalance, + postCLBalance, + postCLPendingBalance: 0n, + timeElapsed: 24n * 60n * 60n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero time elapsed path for annual increase", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("100000") + 1n, + preCLPendingBalance: 1n, + postCLBalance: ether("100000") + 1n, + postCLPendingBalance: 0n, + timeElapsed: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero time elapsed path for CL balance increase normalization", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("1000000") + 1n, + preCLPendingBalance: 1n, + postCLBalance: ether("1000000") + 1n, + postCLPendingBalance: 0n, + timeElapsed: 0n, + }), + ), + ).not.to.be.reverted; + }); + + it("handles zero pre CL balance for annual increase", async () => { + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: 1n, + postCLBalance: 1n, + }), + ), + ).not.to.be.reverted; + }); + + it("stores post-cl balance snapshots in reportData", async () => { + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("100") })), + ).not.to.be.reverted; + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ preCLBalance: ether("100"), postCLBalance: ether("100"), deposits: 2n }), + ), + ).not.to.be.reverted; + + expect(await checker.getReportDataCount()).to.equal(2n); + + const first = await checker.reportData(0n); + const second = await checker.reportData(1n); + expect(first.timestamp).to.equal(24n * 60n * 60n); + expect(first.clBalance).to.equal(ether("100")); + expect(first.deposits).to.equal(0n); + expect(first.clWithdrawals).to.equal(0n); + expect(second.timestamp).to.equal(2n * 24n * 60n * 60n); + expect(second.clBalance).to.equal(ether("100")); + expect(second.deposits).to.equal(2n); + expect(second.clWithdrawals).to.equal(0n); + }); + }); + + context("checkAccountingOracleReport: CL decrease window and second opinion", () => { + const baseWindowReport = { + timeElapsed: 24n * 60n * 60n, + preCLBalance: ether("100"), + postCLBalance: ether("100"), + preCLPendingBalance: 0n, + postCLPendingBalance: 0n, + withdrawalVaultBalance: 0n, + elRewardsVaultBalance: 0n, + sharesRequestedToBurn: 0n, + deposits: 0n, + withdrawalsVaultTransfer: 0n, + }; + + const report = ( + overrides: Partial = {}, + ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { + const r = { ...baseWindowReport, ...overrides }; + return [ + r.timeElapsed, + r.preCLBalance - r.preCLPendingBalance - r.deposits, + r.preCLPendingBalance, + r.postCLBalance - r.postCLPendingBalance, + r.postCLPendingBalance, + r.withdrawalVaultBalance, + r.elRewardsVaultBalance, + r.sharesRequestedToBurn, + r.deposits, + r.withdrawalsVaultTransfer, + ]; + }; + + let accountingSigner: HardhatEthersSigner; + + before(async () => { + accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + }); + + it("emits NegativeCLRebaseAccepted when decrease is within limit", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await accountingOracle.setLastProcessingRefSlot(42n); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("97") })), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(42n, ether("97"), ether("3"), ether("3.6")); + }); + + it("uses 36-day timestamp window (not report count) and keeps left boundary report in range", async () => { + const ONE_DAY = 24n * 60n * 60n; + + // Report timestamps become: day 1, day 10, day 46. + // For the third report, windowStart = 46 - 36 = day 10. + // So baseline must be day 10 report (left boundary is included), not day 1. + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("50"), postCLBalance: ether("50") }), + ); + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 9n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + + await accountingOracle.setLastProcessingRefSlot(314n); + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 36n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("97") }), + ), + ) + .to.emit(checker, "NegativeCLRebaseAccepted") + .withArgs(314n, ether("97"), ether("3"), ether("3.6")); + + expect(await checker.getReportDataCount()).to.equal(3n); + const first = await checker.reportData(0n); + const second = await checker.reportData(1n); + const third = await checker.reportData(2n); + expect(first.timestamp).to.equal(ONE_DAY); + expect(second.timestamp).to.equal(10n * ONE_DAY); + expect(third.timestamp).to.equal(46n * ONE_DAY); + }); + + it("excludes all outdated snapshots from the window after a long gap", async () => { + const ONE_DAY = 24n * 60n * 60n; + + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + await checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("100") }), + ); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport( + ...report({ timeElapsed: 48n * ONE_DAY, preCLBalance: ether("100"), postCLBalance: ether("90") }), + ), + ).not.to.be.reverted; + + expect(await checker.getReportDataCount()).to.equal(3n); + const third = await checker.reportData(2n); + expect(third.timestamp).to.equal(50n * ONE_DAY); + expect(third.clBalance).to.equal(ether("90")); + }); + + it("uses absolute window diff between baseline and current balances", async () => { + await checker.connect(admin).grantRole(await checker.MAX_CL_BALANCE_DECREASE_MANAGER_ROLE(), manager.address); + await checker.connect(manager).setMaxCLBalanceDecreaseBP(1n); + + await checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100000"), postCLBalance: ether("100000") })); + // This intermediate increase is meant to exercise the decrease window, so it needs a matching + // pending-funded activation budget and must not fail earlier in the global CL growth check. + await checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("100020"), + preCLPendingBalance: ether("20"), + postCLBalance: ether("100020"), + postCLPendingBalance: 0n, + }), + ); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100020"), postCLBalance: ether("100015") })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(ether("15"), ether("10")); + }); + + it("reverts with IncorrectCLBalanceDecrease when decrease exceeds limit and no second opinion", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecrease") + .withArgs(ether("10"), ether("3.6")); + }); + + it("reverts with IncorrectCLBalanceDecreaseWindowData on baseline/flows underflow", async () => { + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + + await expect( + checker.connect(accountingSigner).checkAccountingOracleReport( + ...report({ + preCLBalance: ether("300"), + postCLBalance: ether("90"), + withdrawalVaultBalance: ether("200"), + deposits: 0n, + }), + ), + ) + .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceDecreaseWindowData") + .withArgs(ether("100"), 0n, ether("200")); + }); + + it("reverts with NegativeRebaseFailedSecondOpinionReportIsNotReady when second opinion report is absent", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ).to.be.revertedWithCustomError(checker, "NegativeRebaseFailedSecondOpinionReportIsNotReady"); + }); + + it("reverts with NegativeRebaseFailedCLBalanceMismatch when second opinion CL balance is lower", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("89") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") + .withArgs(ether("90"), ether("89"), 50n); + }); + + it("reverts with NegativeRebaseFailedCLBalanceMismatch when second opinion deviation exceeds upper BP limit", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("100") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedCLBalanceMismatch") + .withArgs(ether("90"), ether("100"), 50n); + }); + + it("reverts with NegativeRebaseFailedWithdrawalVaultBalanceMismatch when second opinion withdrawal balance differs", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("90.4") / 1_000_000_000n, 1n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.be.revertedWithCustomError(checker, "NegativeRebaseFailedWithdrawalVaultBalanceMismatch") + .withArgs(0n, 1n); + }); + + it("emits NegativeCLRebaseConfirmed when second opinion validates report", async () => { + const secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock"); + + await checker.connect(admin).grantRole(await checker.SECOND_OPINION_MANAGER_ROLE(), manager.address); + await checker + .connect(manager) + .setSecondOpinionOracleAndCLBalanceUpperMargin(await secondOpinion.getAddress(), 50n); + + await secondOpinion.addPlainReport(77n, ether("90.4") / 1_000_000_000n, 0n); + await checker.connect(accountingSigner).checkAccountingOracleReport(...report()); + await accountingOracle.setLastProcessingRefSlot(77n); + + await expect( + checker + .connect(accountingSigner) + .checkAccountingOracleReport(...report({ preCLBalance: ether("100"), postCLBalance: ether("90") })), + ) + .to.emit(checker, "NegativeCLRebaseConfirmed") + .withArgs(77n, ether("90"), 0n); + }); + }); + + context("checkSimulatedShareRate", () => { + const SHARE_RATE_PRECISION_E27 = 10n ** 27n; + + const actualShareRate = ( + postInternalEther: bigint, + postInternalShares: bigint, + etherToFinalizeWQ: bigint, + sharesToBurnForWithdrawals: bigint, + ) => + ((postInternalEther + etherToFinalizeWQ) * SHARE_RATE_PRECISION_E27) / + (postInternalShares + sharesToBurnForWithdrawals); + + it("passes when simulated rate equals actual rate", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const simulated = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)).not.to.be + .reverted; + }); + + it("passes when deviation is below configured limit", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const actual = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + const simulated = actual + (actual * 200n) / TOTAL_BASIS_POINTS; + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)).not.to.be + .reverted; + }); + + it("reverts when deviation is above configured limit", async () => { + const postInternalEther = ether("100"); + const postInternalShares = ether("100"); + const actual = actualShareRate(postInternalEther, postInternalShares, 0n, 0n); + const simulated = actual + (actual * 251n) / TOTAL_BASIS_POINTS; + + await expect(checker.checkSimulatedShareRate(postInternalEther, postInternalShares, 0n, 0n, simulated)) + .to.be.revertedWithCustomError(checker, "IncorrectSimulatedShareRate") + .withArgs(simulated, actual); + }); + + it("accounts for withdrawal finalization offsets in actual rate", async () => { + const postInternalEther = ether("90"); + const postInternalShares = ether("90"); + const etherToFinalizeWQ = ether("10"); + const sharesToBurnForWithdrawals = ether("10"); + const simulated = actualShareRate( + postInternalEther, + postInternalShares, + etherToFinalizeWQ, + sharesToBurnForWithdrawals, + ); + + await expect( + checker.checkSimulatedShareRate( + postInternalEther, + postInternalShares, + etherToFinalizeWQ, + sharesToBurnForWithdrawals, + simulated, + ), + ).not.to.be.reverted; + }); + }); + + context("migrateBaselineSnapshot", () => { + const MIGRATION_WITHDRAWALS = ether("57600"); + + it("is permissionless before migration completes", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); + + await expect(migrationChecker.connect(stranger).migrateBaselineSnapshot()).not.to.be.reverted; + }); + + it("reverts on unexpected Lido version", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(3n); + + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()) + .to.be.revertedWithCustomError(migrationChecker, "UnexpectedLidoVersion") + .withArgs(3n, 4n); + }); + + it("seeds baseline and bootstrap report snapshots", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); + + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()) + .to.emit(migrationChecker, "BaselineSnapshotMigrated") + .withArgs(ether("107"), ether("3"), MIGRATION_WITHDRAWALS); + + expect(await migrationChecker.getReportDataCount()).to.equal(2n); + + const baselineReport = await migrationChecker.reportData(0n); + const bootstrapFlowReport = await migrationChecker.reportData(1n); + + expect(baselineReport.timestamp).to.equal(0n); + expect(baselineReport.clBalance).to.equal(ether("107")); + expect(baselineReport.deposits).to.equal(0n); + expect(baselineReport.clWithdrawals).to.equal(0n); + + expect(bootstrapFlowReport.timestamp).to.equal(0n); + expect(bootstrapFlowReport.clBalance).to.equal(ether("107")); + expect(bootstrapFlowReport.deposits).to.equal(ether("3")); + expect(bootstrapFlowReport.clWithdrawals).to.equal(MIGRATION_WITHDRAWALS); + }); + + it("uses migrated bootstrap flows in first CL decrease window check", async () => { + const migratedCLBalance = ether("107000"); + const migrationDeposits = ether("3"); + const migrationDepositsCur = ether("3"); + const reportDecrease = ether("2500"); + + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n, { + clActive: ether("100000"), + clPending: ether("7000"), + deposits: migrationDeposits, + depositsCurrent: migrationDepositsCur, + }); + + await migrationChecker.connect(manager).migrateBaselineSnapshot(); - after(async () => { - await checker.connect(admin).revokeRole(await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), manager); - }); + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + const withdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault.address); - it("reverts if called by non-manager", async () => { - await expect( - checker.connect(stranger).setInitialSlashingAndPenaltiesAmount(100n, 100n), - ).to.be.revertedWithOZAccessControlError( - stranger.address, - await checker.INITIAL_SLASHING_AND_PENALTIES_MANAGER_ROLE(), - ); - }); + const maxAllowedCLBalanceDecrease = + ((migratedCLBalance + migrationDeposits - MIGRATION_WITHDRAWALS) * defaultLimits.maxCLBalanceDecreaseBP) / + TOTAL_BASIS_POINTS; - it("reverts if initial slashing amount is greater than max", async () => { await expect( - checker.connect(manager).setInitialSlashingAndPenaltiesAmount(MAX_UINT16, 100n), - ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); + migrationChecker + .connect(accountingSigner) + .checkAccountingOracleReport( + 24n * 60n * 60n, + migratedCLBalance, + 0n, + migratedCLBalance - reportDecrease, + 0n, + withdrawalVaultBalance, + 0n, + 0n, + 0n, + 0n, + ), + ) + .to.be.revertedWithCustomError(migrationChecker, "IncorrectCLBalanceDecrease") + .withArgs(reportDecrease, maxAllowedCLBalanceDecrease); }); - it("reverts if penalties amount is greater than max", async () => { - await expect( - checker.connect(manager).setInitialSlashingAndPenaltiesAmount(100n, MAX_UINT16), - ).to.be.revertedWithCustomError(checker, "IncorrectLimitValue"); - }); + it("reverts when migration is called more than once", async () => { + const { checkerWithLidoStats: migrationChecker } = await deployCheckerWithLidoStats(4n); - it("sets limit correctly and emits `InitialSlashingAmountSet` and `InactivityPenaltiesAmountSet` events", async () => { - await expect(checker.connect(manager).setInitialSlashingAndPenaltiesAmount(100n, 100n)) - .to.emit(checker, "InitialSlashingAmountSet") - .withArgs(100n) - .to.emit(checker, "InactivityPenaltiesAmountSet") - .withArgs(100n); + await migrationChecker.connect(manager).migrateBaselineSnapshot(); + await expect(migrationChecker.connect(manager).migrateBaselineSnapshot()).to.be.revertedWithCustomError( + migrationChecker, + "MigrationAlreadyDone", + ); }); }); @@ -661,11 +1908,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); - }); - - after(async () => { - await checker.connect(admin).revokeRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager); + await checker.connect(admin).grantRole(await checker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), manager.address); }); it("works with zero data", async () => { @@ -673,181 +1916,148 @@ describe("OracleReportSanityChecker.sol", () => { ...report(), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); context("trivial post CL < pre CL", () => { before(async () => { - const newRebaseLimit = 100_000; // 0.01% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(100_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - }), + ...report({ postCLBalance: ether("99") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - elRewardsVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), elRewardsVaultBalance: ether("0.1") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("0.1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - withdrawalVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), withdrawalVaultBalance: ether("0.1") }), ); expect(withdrawals).to.equal(ether("0.1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - sharesRequestedToBurn: ether("0.1"), - }), + ...report({ postCLBalance: ether("99"), sharesRequestedToBurn: ether("0.1") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); expect(sharesToBurn).to.equal(ether("0.1")); }); }); context("trivial post CL > pre CL", () => { before(async () => { - const newRebaseLimit = 100_000_000; // 10% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(100_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - }), + ...report({ postCLBalance: ether("100.01") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - elRewardsVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), elRewardsVaultBalance: ether("0.1") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("0.1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - withdrawalVaultBalance: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), withdrawalVaultBalance: ether("0.1") }), ); expect(withdrawals).to.equal(ether("0.1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("100.01"), - sharesRequestedToBurn: ether("0.1"), - }), + ...report({ postCLBalance: ether("100.01"), sharesRequestedToBurn: ether("0.1") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); expect(sharesToBurn).to.equal(ether("0.1")); }); }); - context("non-trivial post CL < pre CL ", () => { + context("non-trivial post CL < pre CL", () => { before(async () => { - const newRebaseLimit = 10_000_000; // 1% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(10_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - }), + ...report({ postCLBalance: ether("99") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - elRewardsVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("99"), elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("2")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - withdrawalVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("99"), withdrawalVaultBalance: ether("5") }), ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -860,71 +2070,59 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("99"), - sharesRequestedToBurn: ether("5"), - }), + ...report({ postCLBalance: ether("99"), sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(1980198019801980198n); // ether(100. - (99. / 1.01)) + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(1980198019801980198n); }); }); context("non-trivial post CL > pre CL", () => { before(async () => { - const newRebaseLimit = 20_000_000; // 2% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(20_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - }), + ...report({ postCLBalance: ether("101") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with el rewards", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - elRewardsVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("101"), elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("1")); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - withdrawalVaultBalance: ether("5"), - }), + ...report({ postCLBalance: ether("101"), withdrawalVaultBalance: ether("5") }), ); expect(withdrawals).to.equal(ether("1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -937,23 +2135,20 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(0); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(0n); }); it("smoothens with shares requested to burn", async () => { const { withdrawals, elRewards, sharesFromWQToBurn, sharesToBurn } = await checker.smoothenTokenRebase( - ...report({ - postCLBalance: ether("101"), - sharesRequestedToBurn: ether("5"), - }), + ...report({ postCLBalance: ether("101"), sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(0); - expect(sharesToBurn).to.equal(980392156862745098n); // ether(100. - (101. / 1.02)) + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(0n); + expect(sharesToBurn).to.equal(980392156862745098n); }); }); @@ -966,8 +2161,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 5_000_000; // 0.5% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(5_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { @@ -975,8 +2169,8 @@ describe("OracleReportSanityChecker.sol", () => { ...report(defaultRebaseParams), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(ether("10")); expect(sharesToBurn).to.equal(ether("10")); }); @@ -986,9 +2180,9 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("1.5")); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -998,8 +2192,8 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1.5")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -1009,8 +2203,8 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("1.5")); - expect(elRewards).to.equal(0); - expect(sharesFromWQToBurn).to.equal(9950248756218905472n); // 100. - 90.5 / 1.005 + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905472n); expect(sharesToBurn).to.equal(9950248756218905472n); }); @@ -1019,11 +2213,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); - - expect(sharesFromWQToBurn).to.equal(9950248756218905473n); // ether("100. - (90.5 / 1.005)") - expect(sharesToBurn).to.equal(11442786069651741293n); // ether("100. - (89. / 1.005)") + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); + expect(sharesFromWQToBurn).to.equal(9950248756218905473n); + expect(sharesToBurn).to.equal(11442786069651741293n); }); }); @@ -1036,8 +2229,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 40_000_000; // 4% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(40_000_000n); }); it("smoothens with no rewards and no withdrawals", async () => { @@ -1045,8 +2237,8 @@ describe("OracleReportSanityChecker.sol", () => { ...report(defaultRebaseParams), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(ether("10")); expect(sharesToBurn).to.equal(ether("10")); }); @@ -1056,10 +2248,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, elRewardsVaultBalance: ether("5") }), ); - expect(withdrawals).to.equal(0); + expect(withdrawals).to.equal(0n); expect(elRewards).to.equal(ether("2")); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with withdrawals", async () => { @@ -1068,9 +2260,9 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with withdrawals and el rewards", async () => { @@ -1079,9 +2271,9 @@ describe("OracleReportSanityChecker.sol", () => { ); expect(withdrawals).to.equal(ether("2")); - expect(elRewards).to.equal(0); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615384n); - expect(sharesToBurn).to.equal(9615384615384615384n); // 100. - 94. / 1.04 + expect(sharesToBurn).to.equal(9615384615384615384n); }); it("smoothens with shares requested to burn", async () => { @@ -1089,10 +2281,10 @@ describe("OracleReportSanityChecker.sol", () => { ...report({ ...defaultRebaseParams, sharesRequestedToBurn: ether("5") }), ); - expect(withdrawals).to.equal(0); - expect(elRewards).to.equal(0); + expect(withdrawals).to.equal(0n); + expect(elRewards).to.equal(0n); expect(sharesFromWQToBurn).to.equal(9615384615384615385n); - expect(sharesToBurn).to.equal(11538461538461538461n); // 100. - (92. / 1.04) + expect(sharesToBurn).to.equal(11538461538461538461n); }); }); @@ -1104,14 +2296,13 @@ describe("OracleReportSanityChecker.sol", () => { postCLBalance: ether("1000000"), withdrawalVaultBalance: ether("500"), elRewardsVaultBalance: ether("500"), - sharesRequestedToBurn: ether("0"), + sharesRequestedToBurn: 0n, etherToLockForWithdrawals: ether("40000"), newSharesToBurnForWithdrawals: ether("40000"), }; before(async () => { - const newRebaseLimit = 1_000_000; // 0.1% - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(1_000_000n); }); it("smoothens the rebase", async () => { @@ -1121,12 +2312,12 @@ describe("OracleReportSanityChecker.sol", () => { expect(withdrawals).to.equal(ether("500")); expect(elRewards).to.equal(ether("500")); - expect(sharesFromWQToBurn).to.equal(39960039960039960039960n); // ether(1000000 - 961000. / 1.001) + expect(sharesFromWQToBurn).to.equal(39960039960039960039960n); expect(sharesToBurn).to.equal(39960039960039960039960n); }); }); - context("rounding case from Görli", () => { + context("rounding case from Goerli", () => { const rebaseParams = { preTotalPooledEther: 125262263468962792235936n, preTotalShares: 120111767594397261197918n, @@ -1140,8 +2331,7 @@ describe("OracleReportSanityChecker.sol", () => { }; before(async () => { - const newRebaseLimit = 750_000; // 0.075% or 7.5 basis points - await checker.connect(manager).setMaxPositiveTokenRebase(newRebaseLimit); + await checker.connect(manager).setMaxPositiveTokenRebase(750_000n); }); it("smoothens the rebase", async () => { @@ -1156,281 +2346,4 @@ describe("OracleReportSanityChecker.sol", () => { }); }); }); - - // NB: negative rebase is handled in `oracleReportSanityChecker.negative-rebase.test.ts` - context("checkAccountingOracleReport", () => { - const report = ( - overrides: Partial<{ - [key in keyof typeof correctOracleReport]: bigint; - }> = {}, - ): [bigint, bigint, bigint, bigint, bigint, bigint, bigint, bigint] => { - const reportData = { ...correctOracleReport, ...overrides }; - return [ - reportData.timeElapsed, - reportData.preCLBalance, - reportData.postCLBalance, - reportData.withdrawalVaultBalance, - reportData.elRewardsVaultBalance, - reportData.sharesRequestedToBurn, - reportData.preCLValidators, - reportData.postCLValidators, - ]; - }; - - let accountingSigher: HardhatEthersSigner; - before(async () => { - accountingSigher = await impersonate(await locator.accounting(), ether("1")); - }); - - it("reverts when not called by accounting", async () => { - await expect(checker.connect(stranger).checkAccountingOracleReport(...report())).to.be.revertedWithCustomError( - checker, - "CalledNotFromAccounting", - ); - }); - - it("reverts when actual withdrawal vault balance is less than passed", async () => { - const currentWithdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - withdrawalVaultBalance: currentWithdrawalVaultBalance + 1n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectWithdrawalsVaultBalance") - .withArgs(currentWithdrawalVaultBalance); - }); - - it("reverts when actual el rewards vault balance is less than passed", async () => { - const currentELRewardsVaultBalance = await ethers.provider.getBalance(elRewardsVault); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - elRewardsVaultBalance: currentELRewardsVaultBalance + 1n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectELRewardsVaultBalance") - .withArgs(currentELRewardsVaultBalance); - }); - - it("reverts when actual shares to burn is less than passed", async () => { - await burner.setSharesRequestedToBurn(10, 21); - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - sharesRequestedToBurn: 32n, - }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectSharesRequestedToBurn") - .withArgs(31n); - }); - - it("reverts when reported values overcome annual CL balance limit", async () => { - const maxBasisPoints = 10_000n; - const secondsInOneYear = 365n * 24n * 60n * 60n; - const postCLBalance = ether("150000"); - - // This formula calculates the annualized balance increase in basis points (BP) - // 1. Calculate the absolute balance increase: (postCLBalance - preCLBalance) - // 2. Convert to a relative increase by dividing by preCLBalance - // 3. Annualize by multiplying by (secondsInOneYear / timeElapsed) - // 4. Convert to basis points by multiplying by maxBasisPoints (100_00n) - // The result represents how much the balance would increase over a year at the current rate - const annualBalanceIncrease = - (secondsInOneYear * maxBasisPoints * (postCLBalance - correctOracleReport.preCLBalance)) / - correctOracleReport.preCLBalance / - correctOracleReport.timeElapsed; - - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport(...report({ postCLBalance: postCLBalance })), - ) - .to.be.revertedWithCustomError(checker, "IncorrectCLBalanceIncrease") - .withArgs(annualBalanceIncrease); - }); - - it("reverts when amount of appeared validators is greater than possible", async () => { - const insaneValidators = 100000n; - await expect( - checker - .connect(accountingSigher) - .checkAccountingOracleReport( - ...report({ postCLValidators: correctOracleReport.preCLValidators + insaneValidators }), - ), - ) - .to.be.revertedWithCustomError(checker, "IncorrectAppearedValidators") - .withArgs(correctOracleReport.preCLValidators + insaneValidators); - }); - - it("passes all checks with correct oracle report data", async () => { - await expect(checker.connect(accountingSigher).checkAccountingOracleReport(...report())).not.to.be.reverted; - }); - - it("handles zero time passed for annual balance increase", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - postCLBalance: correctOracleReport.preCLBalance + 1000n, - timeElapsed: 0n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles zero pre CL balance estimating balance increase", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLBalance: 0n, - postCLBalance: 1000n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles appeared validators", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLValidators: correctOracleReport.preCLValidators, - postCLValidators: correctOracleReport.preCLValidators + 2n, - }), - ), - ).not.to.be.reverted; - }); - - it("handles zero time passed for appeared validators", async () => { - await expect( - checker.connect(accountingSigher).checkAccountingOracleReport( - ...report({ - preCLValidators: correctOracleReport.preCLValidators, - postCLValidators: correctOracleReport.preCLValidators + 2n, - timeElapsed: 0n, - }), - ), - ).not.to.be.reverted; - }); - }); - - context("checkExitBusOracleReport", () => { - let maxExitRequests: bigint; - - before(async () => { - maxExitRequests = (await checker.getOracleReportLimits()).maxValidatorExitRequestsPerReport; - }); - - it("reverts on too many exit requests", async () => { - await expect(checker.checkExitBusOracleReport(maxExitRequests + 1n)) - .to.be.revertedWithCustomError(checker, "IncorrectNumberOfExitRequestsPerReport") - .withArgs(maxExitRequests); - }); - - it("works with correct validators count", async () => { - await expect(checker.checkExitBusOracleReport(maxExitRequests)).not.to.be.reverted; - }); - }); - - context("checkExitedValidatorsRatePerDay", () => { - let maxExitedValidators: bigint; - - before(async () => { - maxExitedValidators = (await checker.getOracleReportLimits()).exitedValidatorsPerDayLimit; - }); - - it("reverts on too many exited validators", async () => { - await expect(checker.checkExitedValidatorsRatePerDay(maxExitedValidators + 1n)) - .to.be.revertedWithCustomError(checker, "ExitedValidatorsLimitExceeded") - .withArgs(maxExitedValidators, maxExitedValidators + 1n); - }); - - it("works with correct exited validators count", async () => { - await expect(checker.checkExitedValidatorsRatePerDay(maxExitedValidators)).not.to.be.reverted; - }); - }); - - context("checkNodeOperatorsPerExtraDataItemCount", () => { - let maxCount: bigint; - - before(async () => { - maxCount = (await checker.getOracleReportLimits()).maxNodeOperatorsPerExtraDataItem; - }); - - it("reverts on too many node operators", async () => { - await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12, maxCount + 1n)) - .to.be.revertedWithCustomError(checker, "TooManyNodeOpsPerExtraDataItem") - .withArgs(12, maxCount + 1n); - }); - - it("works with correct count", async () => { - await expect(checker.checkNodeOperatorsPerExtraDataItemCount(12, maxCount)).not.to.be.reverted; - }); - }); - - context("checkExtraDataItemsCountPerTransaction", () => { - let maxCount: bigint; - - before(async () => { - maxCount = (await checker.getOracleReportLimits()).maxItemsPerExtraDataTransaction; - }); - - it("reverts on too many items", async () => { - await expect(checker.checkExtraDataItemsCountPerTransaction(maxCount + 1n)) - .to.be.revertedWithCustomError(checker, "TooManyItemsPerExtraDataTransaction") - .withArgs(maxCount, maxCount + 1n); - }); - - it("works with correct count", async () => { - await expect(checker.checkExtraDataItemsCountPerTransaction(maxCount)).not.to.be.reverted; - }); - }); - - context("checkWithdrawalQueueOracleReport", () => { - const oldRequestId = 1n; - const newRequestId = 2n; - let oldRequestCreationTimestamp; - let newRequestCreationTimestamp: bigint; - - const correctWithdrawalQueueOracleReport = { - lastFinalizableRequestId: oldRequestId, - refReportTimestamp: -1n, - }; - - before(async () => { - const currentBlockTimestamp = await getCurrentBlockTimestamp(); - correctWithdrawalQueueOracleReport.refReportTimestamp = currentBlockTimestamp; - oldRequestCreationTimestamp = currentBlockTimestamp - defaultLimits.requestTimestampMargin; - - correctWithdrawalQueueOracleReport.lastFinalizableRequestId = oldRequestCreationTimestamp; - newRequestCreationTimestamp = currentBlockTimestamp - defaultLimits.requestTimestampMargin / 2n; - - await withdrawalQueue.setRequestTimestamp(oldRequestId, oldRequestCreationTimestamp); - await withdrawalQueue.setRequestTimestamp(newRequestId, newRequestCreationTimestamp); - - await checker.connect(admin).grantRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); - }); - - after(async () => { - await checker.connect(admin).revokeRole(await checker.REQUEST_TIMESTAMP_MARGIN_MANAGER_ROLE(), manager); - }); - - it("reverts when the creation timestamp of requestIdToFinalizeUpTo is too close to report timestamp", async () => { - await expect( - checker.checkWithdrawalQueueOracleReport(newRequestId, correctWithdrawalQueueOracleReport.refReportTimestamp), - ) - .to.be.revertedWithCustomError(checker, "IncorrectRequestFinalization") - .withArgs(newRequestCreationTimestamp); - }); - - it("passes all checks with correct withdrawal queue report data", async () => { - await checker.checkWithdrawalQueueOracleReport( - correctWithdrawalQueueOracleReport.lastFinalizableRequestId, - correctWithdrawalQueueOracleReport.refReportTimestamp, - ); - }); - }); }); diff --git a/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts b/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts deleted file mode 100644 index 528acdd8af..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.misc.test.ts +++ /dev/null @@ -1,160 +0,0 @@ -import { expect } from "chai"; -import { hexlify, randomBytes, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { DepositContract__MockForBeaconChainDepositor, StakingRouter__Harness } from "typechain-types"; - -import { certainAddress, ether, MAX_UINT256, proxify, randomString } from "lib"; - -import { Snapshot } from "test/suite"; - -describe("StakingRouter.sol:misc", () => { - let deployer: HardhatEthersSigner; - let proxyAdmin: HardhatEthersSigner; - let stakingRouterAdmin: HardhatEthersSigner; - let user: HardhatEthersSigner; - - let depositContract: DepositContract__MockForBeaconChainDepositor; - let stakingRouter: StakingRouter__Harness; - let impl: StakingRouter__Harness; - - let originalState: string; - - const lido = certainAddress("test:staking-router:lido"); - const withdrawalCredentials = hexlify(randomBytes(32)); - - before(async () => { - [deployer, proxyAdmin, stakingRouterAdmin, user] = await ethers.getSigners(); - - depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin: proxyAdmin, caller: user }); - }); - - beforeEach(async () => (originalState = await Snapshot.take())); - - afterEach(async () => await Snapshot.restore(originalState)); - - context("initialize", () => { - it("Reverts if admin is zero address", async () => { - await expect(stakingRouter.initialize(ZeroAddress, lido, withdrawalCredentials)).to.be.revertedWithCustomError( - stakingRouter, - "ZeroAddressAdmin", - ); - }); - - it("Reverts if lido is zero address", async () => { - await expect( - stakingRouter.initialize(stakingRouterAdmin.address, ZeroAddress, withdrawalCredentials), - ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddressLido"); - }); - - it("Initializes the contract version, sets up roles and variables", async () => { - await expect(stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials)) - .to.emit(stakingRouter, "ContractVersionSet") - .withArgs(3) - .and.to.emit(stakingRouter, "RoleGranted") - .withArgs(await stakingRouter.DEFAULT_ADMIN_ROLE(), stakingRouterAdmin.address, user.address) - .and.to.emit(stakingRouter, "WithdrawalCredentialsSet") - .withArgs(withdrawalCredentials, user.address); - - expect(await stakingRouter.getContractVersion()).to.equal(3); - expect(await stakingRouter.getLido()).to.equal(lido); - expect(await stakingRouter.getWithdrawalCredentials()).to.equal(withdrawalCredentials); - }); - }); - - context("finalizeUpgrade_v3()", () => { - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - const modulesCount = 3; - - beforeEach(async () => { - // initialize staking router - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); - // grant roles - await stakingRouter - .connect(stakingRouterAdmin) - .grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), stakingRouterAdmin); - - for (let i = 0; i < modulesCount; i++) { - await stakingRouter - .connect(stakingRouterAdmin) - .addStakingModule( - randomString(8), - certainAddress(`test:staking-router:staking-module-${i}`), - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - } - expect(await stakingRouter.getStakingModulesCount()).to.equal(modulesCount); - }); - - it("fails with UnexpectedContractVersion error when called on implementation", async () => { - await expect(impl.finalizeUpgrade_v3()) - .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(MAX_UINT256, 2); - }); - - it("fails with UnexpectedContractVersion error when called on deployed from scratch SRv2", async () => { - await expect(stakingRouter.finalizeUpgrade_v3()) - .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(3, 2); - }); - - context("simulate upgrade from v2", () => { - beforeEach(async () => { - // reset contract version - await stakingRouter.testing_setBaseVersion(2); - }); - - it("sets correct contract version", async () => { - expect(await stakingRouter.getContractVersion()).to.equal(2); - await stakingRouter.finalizeUpgrade_v3(); - expect(await stakingRouter.getContractVersion()).to.be.equal(3); - }); - }); - }); - - context("receive", () => { - it("Reverts", async () => { - await expect( - user.sendTransaction({ - to: stakingRouter, - value: ether("1.0"), - }), - ).to.be.revertedWithCustomError(stakingRouter, "DirectETHTransfer"); - }); - }); - - context("getLido", () => { - it("Returns zero address before initialization", async () => { - expect(await stakingRouter.getLido()).to.equal(ZeroAddress); - }); - - it("Returns lido address after initialization", async () => { - await stakingRouter.initialize(stakingRouterAdmin.address, lido, withdrawalCredentials); - - expect(await stakingRouter.getLido()).to.equal(lido); - }); - }); -}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts b/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts deleted file mode 100644 index 06c5579b41..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.module-management.test.ts +++ /dev/null @@ -1,483 +0,0 @@ -import { expect } from "chai"; -import { hexlify, randomBytes, ZeroAddress } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { StakingRouter } from "typechain-types"; - -import { certainAddress, getNextBlock, proxify, randomString } from "lib"; - -const UINT64_MAX = 2n ** 64n - 1n; - -describe("StakingRouter.sol:module-management", () => { - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - let user: HardhatEthersSigner; - - let stakingRouter: StakingRouter; - - beforeEach(async () => { - [deployer, admin, user] = await ethers.getSigners(); - - const depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor", deployer); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - const impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [stakingRouter] = await proxify({ impl, admin }); - - // initialize staking router - await stakingRouter.initialize( - admin, - certainAddress("test:staking-router-modules:lido"), // mock lido address - hexlify(randomBytes(32)), // mock withdrawal credentials - ); - - // grant roles - await stakingRouter.grantRole(await stakingRouter.STAKING_MODULE_MANAGE_ROLE(), admin); - }); - - context("addStakingModule", () => { - const NAME = "StakingModule"; - const ADDRESS = certainAddress("test:staking-router:staking-module"); - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - it("Reverts if the caller does not have the role", async () => { - await expect( - stakingRouter - .connect(user) - .addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); - }); - - it("Reverts if the target share is greater than 100%", async () => { - const STAKE_SHARE_LIMIT_OVER_100 = 100_01; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT_OVER_100, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); - }); - - it("Reverts if the sum of module and treasury fees is greater than 100%", async () => { - const MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE_INVALID, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - - const TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE_INVALID, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - }); - - it("Reverts if the staking module address is zero address", async () => { - await expect( - stakingRouter.addStakingModule( - NAME, - ZeroAddress, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "ZeroAddressStakingModule"); - }); - - it("Reverts if the staking module name is empty string", async () => { - const NAME_EMPTY_STRING = ""; - - await expect( - stakingRouter.addStakingModule( - NAME_EMPTY_STRING, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); - }); - - it("Reverts if the staking module name is too long", async () => { - const MAX_STAKING_MODULE_NAME_LENGTH = await stakingRouter.MAX_STAKING_MODULE_NAME_LENGTH(); - const NAME_TOO_LONG = randomString(Number(MAX_STAKING_MODULE_NAME_LENGTH + 1n)); - - await expect( - stakingRouter.addStakingModule( - NAME_TOO_LONG, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleWrongName"); - }); - - it("Reverts if the max number of staking modules is reached", async () => { - const MAX_STAKING_MODULES_COUNT = await stakingRouter.MAX_STAKING_MODULES_COUNT(); - - for (let i = 0; i < MAX_STAKING_MODULES_COUNT; i++) { - await stakingRouter.addStakingModule( - randomString(8), - certainAddress(`test:staking-router:staking-module-${i}`), - 1_00, - 1_00, - 1_00, - 1_00, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - } - - expect(await stakingRouter.getStakingModulesCount()).to.equal(MAX_STAKING_MODULES_COUNT); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModulesLimitExceeded"); - }); - - it("Reverts if adding a module with the same address", async () => { - await stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "StakingModuleAddressExists"); - }); - - it("Adds the module to stakingRouter and emits events", async () => { - const stakingModuleId = (await stakingRouter.getStakingModulesCount()) + 1n; - const moduleAddedBlock = await getNextBlock(); - - await expect( - stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ) - .to.be.emit(stakingRouter, "StakingRouterETHDeposited") - .withArgs(stakingModuleId, 0) - .and.to.be.emit(stakingRouter, "StakingModuleAdded") - .withArgs(stakingModuleId, ADDRESS, NAME, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleShareLimitSet") - .withArgs(stakingModuleId, STAKE_SHARE_LIMIT, PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") - .withArgs(stakingModuleId, MODULE_FEE, TREASURY_FEE, admin.address); - - expect(await stakingRouter.getStakingModule(stakingModuleId)).to.deep.equal([ - stakingModuleId, - ADDRESS, - MODULE_FEE, - TREASURY_FEE, - STAKE_SHARE_LIMIT, - 0n, // status active - NAME, - moduleAddedBlock.timestamp, - moduleAddedBlock.number, - 0n, // exited validators, - PRIORITY_EXIT_SHARE_THRESHOLD, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ]); - }); - }); - - context("updateStakingModule", () => { - const NAME = "StakingModule"; - const ADDRESS = certainAddress("test:staking-router-modules:staking-module"); - const STAKE_SHARE_LIMIT = 1_00n; - const PRIORITY_EXIT_SHARE_THRESHOLD = STAKE_SHARE_LIMIT; - const MODULE_FEE = 5_00n; - const TREASURY_FEE = 5_00n; - const MAX_DEPOSITS_PER_BLOCK = 150n; - const MIN_DEPOSIT_BLOCK_DISTANCE = 25n; - - let ID: bigint; - - const NEW_STAKE_SHARE_LIMIT = 2_00n; - const NEW_PRIORITY_EXIT_SHARE_THRESHOLD = NEW_STAKE_SHARE_LIMIT; - - const NEW_MODULE_FEE = 6_00n; - const NEW_TREASURY_FEE = 4_00n; - - const NEW_MAX_DEPOSITS_PER_BLOCK = 100n; - const NEW_MIN_DEPOSIT_BLOCK_DISTANCE = 20n; - - beforeEach(async () => { - await stakingRouter.addStakingModule( - NAME, - ADDRESS, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ); - ID = await stakingRouter.getStakingModulesCount(); - }); - - it("Reverts if the caller does not have the role", async () => { - stakingRouter = stakingRouter.connect(user); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithOZAccessControlError(user.address, await stakingRouter.STAKING_MODULE_MANAGE_ROLE()); - }); - - it("Reverts if the new target share is greater than 100%", async () => { - const NEW_STAKE_SHARE_LIMIT_OVER_100 = 100_01; - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT_OVER_100, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidStakeShareLimit"); - }); - - it("Reverts if the new priority exit share is greater than 100%", async () => { - const NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100 = 100_01; - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD_OVER_100, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); - }); - - it("Reverts if the new priority exit share is less than stake share limit", async () => { - const UPGRADED_STAKE_SHARE_LIMIT = 55_00n; - const UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD = 50_00n; - await expect( - stakingRouter.updateStakingModule( - ID, - UPGRADED_STAKE_SHARE_LIMIT, - UPGRADED_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidPriorityExitShareThreshold"); - }); - - it("Reverts if the new deposit block distance is zero", async () => { - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - 0n, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); - }); - - it("Reverts if the new deposit block distance is great then uint64 max", async () => { - await stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - UINT64_MAX, - ); - - expect((await stakingRouter.getStakingModule(ID)).minDepositBlockDistance).to.be.equal(UINT64_MAX); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - UINT64_MAX + 1n, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMinDepositBlockDistance"); - }); - - it("Reverts if the new max deposits per block is great then uint64 max", async () => { - await stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - UINT64_MAX, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ); - - expect((await stakingRouter.getStakingModule(ID)).maxDepositsPerBlock).to.be.equal(UINT64_MAX); - - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - UINT64_MAX + 1n, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidMaxDepositPerBlockValue"); - }); - - it("Reverts if the sum of the new module and treasury fees is greater than 100%", async () => { - const NEW_MODULE_FEE_INVALID = 100_01n - TREASURY_FEE; - - await expect( - stakingRouter.updateStakingModule( - ID, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE_INVALID, - TREASURY_FEE, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - - const NEW_TREASURY_FEE_INVALID = 100_01n - MODULE_FEE; - await expect( - stakingRouter.updateStakingModule( - ID, - STAKE_SHARE_LIMIT, - PRIORITY_EXIT_SHARE_THRESHOLD, - MODULE_FEE, - NEW_TREASURY_FEE_INVALID, - MAX_DEPOSITS_PER_BLOCK, - MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ).to.be.revertedWithCustomError(stakingRouter, "InvalidFeeSum"); - }); - - it("Update target share, module and treasury fees and emits events", async () => { - await expect( - stakingRouter.updateStakingModule( - ID, - NEW_STAKE_SHARE_LIMIT, - NEW_PRIORITY_EXIT_SHARE_THRESHOLD, - NEW_MODULE_FEE, - NEW_TREASURY_FEE, - NEW_MAX_DEPOSITS_PER_BLOCK, - NEW_MIN_DEPOSIT_BLOCK_DISTANCE, - ), - ) - .to.be.emit(stakingRouter, "StakingModuleShareLimitSet") - .withArgs(ID, NEW_STAKE_SHARE_LIMIT, NEW_PRIORITY_EXIT_SHARE_THRESHOLD, admin.address) - .and.to.be.emit(stakingRouter, "StakingModuleFeesSet") - .withArgs(ID, NEW_MODULE_FEE, NEW_TREASURY_FEE, admin.address); - }); - }); -}); diff --git a/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts b/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts deleted file mode 100644 index 059ee1148c..0000000000 --- a/test/0.8.9/stakingRouter/stakingRouter.versioned.test.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { expect } from "chai"; -import { randomBytes } from "ethers"; -import { ethers } from "hardhat"; - -import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; - -import { StakingRouter } from "typechain-types"; - -import { MAX_UINT256, proxify, randomAddress } from "lib"; - -describe("StakingRouter.sol:Versioned", () => { - let deployer: HardhatEthersSigner; - let admin: HardhatEthersSigner; - - let impl: StakingRouter; - let versioned: StakingRouter; - - const petrifiedVersion = MAX_UINT256; - - before(async () => { - [deployer, admin] = await ethers.getSigners(); - - // deploy staking router - const depositContract = randomAddress(); - const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); - const stakingRouterFactory = await ethers.getContractFactory("StakingRouter", { - libraries: { - ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), - }, - }); - - impl = await stakingRouterFactory.connect(deployer).deploy(depositContract); - - [versioned] = await proxify({ impl, admin }); - }); - - context("constructor", () => { - it("Petrifies the implementation", async () => { - expect(await impl.getContractVersion()).to.equal(petrifiedVersion); - }); - }); - - context("getContractVersion", () => { - it("Returns 0 as the initial contract version", async () => { - expect(await versioned.getContractVersion()).to.equal(0n); - }); - }); - - context("initialize", () => { - it("Increments version", async () => { - await versioned.initialize(randomAddress(), randomAddress(), randomBytes(32)); - - expect(await versioned.getContractVersion()).to.equal(3n); - }); - }); -}); diff --git a/test/0.8.9/withdrawalVault/eip7251Mock.ts b/test/0.8.9/withdrawalVault/eip7251Mock.ts new file mode 100644 index 0000000000..1501a0dc54 --- /dev/null +++ b/test/0.8.9/withdrawalVault/eip7251Mock.ts @@ -0,0 +1,55 @@ +import { expect } from "chai"; +import { ContractTransactionReceipt, ContractTransactionResponse } from "ethers"; +import { ethers } from "hardhat"; + +import { EIP7251ConsolidationRequest__Mock } from "typechain-types"; + +import { EIP7251_ADDRESS, findEventsWithInterfaces } from "lib"; + +const eventName = "ConsolidationRequestAdded__Mock"; +const eip7251MockEventABI = [`event ${eventName}(bytes request, uint256 fee)`]; +const eip7251MockInterface = new ethers.Interface(eip7251MockEventABI); + +export const deployEIP7251ConsolidationRequestContractMock = async ( + fee: bigint, +): Promise => { + const eip7251Mock = await ethers.deployContract("EIP7251ConsolidationRequest__Mock"); + const eip7251MockAddress = await eip7251Mock.getAddress(); + + await ethers.provider.send("hardhat_setCode", [EIP7251_ADDRESS, await ethers.provider.getCode(eip7251MockAddress)]); + + const contract = await ethers.getContractAt("EIP7251ConsolidationRequest__Mock", EIP7251_ADDRESS); + await contract.mock__setFee(fee); + + return contract; +}; + +export const encodeEIP7251Payload = (sourcePubkey: string, targetPubkey: string): string => { + const sourcePubkeyHex = sourcePubkey.startsWith("0x") ? sourcePubkey.slice(2) : sourcePubkey; + const targetPubkeyHex = targetPubkey.startsWith("0x") ? targetPubkey.slice(2) : targetPubkey; + return `0x${sourcePubkeyHex}${targetPubkeyHex}`; +}; + +export function findEIP7251MockEvents(receipt: ContractTransactionReceipt) { + return findEventsWithInterfaces(receipt!, eventName, [eip7251MockInterface]); +} + +export const testEIP7251Mock = async ( + addConsolidationRequests: () => Promise, + sourcePubkeys: string[], + targetPubkeys: string[], + expectedFee: bigint, +): Promise<{ tx: ContractTransactionResponse; receipt: ContractTransactionReceipt }> => { + const tx = await addConsolidationRequests(); + const receipt = (await tx.wait()) as ContractTransactionReceipt; + + const events = findEIP7251MockEvents(receipt); + expect(events.length).to.equal(sourcePubkeys.length); + + for (let i = 0; i < sourcePubkeys.length; i++) { + expect(events[i].args[0]).to.equal(encodeEIP7251Payload(sourcePubkeys[i], targetPubkeys[i])); + expect(events[i].args[1]).to.equal(expectedFee); + } + + return { tx, receipt }; +}; diff --git a/test/0.8.9/withdrawalVault/utils.ts b/test/0.8.9/withdrawalVault/utils.ts index 968e73df9b..72560c9573 100644 --- a/test/0.8.9/withdrawalVault/utils.ts +++ b/test/0.8.9/withdrawalVault/utils.ts @@ -35,3 +35,20 @@ export function generateWithdrawalRequestPayload(numberOfRequests: number) { mixedWithdrawalAmounts, }; } + +export function generateConsolidationRequestPayload(numberOfRequests: number) { + const sourcePubkeys: string[] = []; + const targetPubkeys: string[] = []; + + for (let i = 1; i <= numberOfRequests; i++) { + sourcePubkeys.push(toValidatorPubKey(i)); + targetPubkeys.push(toValidatorPubKey(i + numberOfRequests)); // Ensure unique target pubkeys + } + + return { + sourcePubkeysHexArray: sourcePubkeys.map((pk) => `0x${pk}`), + targetPubkeysHexArray: targetPubkeys.map((pk) => `0x${pk}`), + sourcePubkeys, + targetPubkeys, + }; +} diff --git a/test/0.8.9/withdrawalVault/withdrawalVault.test.ts b/test/0.8.9/withdrawalVault/withdrawalVault.test.ts index d6260ae9cb..98b12ab945 100644 --- a/test/0.8.9/withdrawalVault/withdrawalVault.test.ts +++ b/test/0.8.9/withdrawalVault/withdrawalVault.test.ts @@ -7,13 +7,21 @@ import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { EIP7002WithdrawalRequest__Mock, + EIP7251ConsolidationRequest__Mock, ERC20__Harness, ERC721__Harness, Lido__MockForWithdrawalVault, WithdrawalVault__Harness, } from "typechain-types"; -import { EIP7002_ADDRESS, EIP7002_MIN_WITHDRAWAL_REQUEST_FEE, MAX_UINT256, proxify } from "lib"; +import { + EIP7002_ADDRESS, + EIP7002_MIN_WITHDRAWAL_REQUEST_FEE, + EIP7251_ADDRESS, + EIP7251_MIN_CONSOLIDATION_FEE, + MAX_UINT256, + proxify, +} from "lib"; import { Snapshot } from "test/suite"; @@ -23,7 +31,13 @@ import { findEIP7002MockEvents, testEIP7002Mock, } from "./eip7002Mock"; -import { generateWithdrawalRequestPayload } from "./utils"; +import { + deployEIP7251ConsolidationRequestContractMock, + encodeEIP7251Payload, + findEIP7251MockEvents, + testEIP7251Mock, +} from "./eip7251Mock"; +import { generateConsolidationRequestPayload, generateWithdrawalRequestPayload } from "./utils"; const PETRIFIED_VERSION = MAX_UINT256; @@ -32,11 +46,13 @@ describe("WithdrawalVault.sol", () => { let user: HardhatEthersSigner; let treasury: HardhatEthersSigner; let triggerableWithdrawalsGateway: HardhatEthersSigner; + let consolidationGateway: HardhatEthersSigner; let stranger: HardhatEthersSigner; let originalState: string; let withdrawalsPredeployed: EIP7002WithdrawalRequest__Mock; + let consolidationPredeployed: EIP7251ConsolidationRequest__Mock; let lido: Lido__MockForWithdrawalVault; let lidoAddress: string; @@ -47,18 +63,29 @@ describe("WithdrawalVault.sol", () => { before(async () => { [owner, user, treasury] = await ethers.getSigners(); // TODO - [owner, treasury, triggerableWithdrawalsGateway, stranger] = await ethers.getSigners(); + [owner, treasury, triggerableWithdrawalsGateway, consolidationGateway, stranger] = await ethers.getSigners(); withdrawalsPredeployed = await deployEIP7002WithdrawalRequestContractMock(EIP7002_MIN_WITHDRAWAL_REQUEST_FEE); expect(await withdrawalsPredeployed.getAddress()).to.equal(EIP7002_ADDRESS); + consolidationPredeployed = await deployEIP7251ConsolidationRequestContractMock(EIP7251_MIN_CONSOLIDATION_FEE); + + expect(await consolidationPredeployed.getAddress()).to.equal(EIP7251_ADDRESS); + lido = await ethers.deployContract("Lido__MockForWithdrawalVault"); lidoAddress = await lido.getAddress(); impl = await ethers.deployContract( "WithdrawalVault__Harness", - [lidoAddress, treasury.address, triggerableWithdrawalsGateway.address], + [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ], owner, ); @@ -78,25 +105,91 @@ describe("WithdrawalVault.sol", () => { ZeroAddress, treasury.address, triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Reverts if the treasury address is zero", async () => { await expect( - ethers.deployContract("WithdrawalVault", [lidoAddress, ZeroAddress, triggerableWithdrawalsGateway.address]), + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + ZeroAddress, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Reverts if the triggerable withdrawal gateway address is zero", async () => { await expect( - ethers.deployContract("WithdrawalVault", [lidoAddress, treasury.address, ZeroAddress]), + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + ZeroAddress, + consolidationGateway.address, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the consolidation gateway address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + ZeroAddress, + EIP7002_ADDRESS, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the withdrawal request address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + ZeroAddress, + EIP7251_ADDRESS, + ]), + ).to.be.revertedWithCustomError(vault, "ZeroAddress"); + }); + + it("Reverts if the consolidation request address is zero", async () => { + await expect( + ethers.deployContract("WithdrawalVault", [ + lidoAddress, + treasury.address, + triggerableWithdrawalsGateway.address, + consolidationGateway.address, + EIP7002_ADDRESS, + ZeroAddress, + ]), ).to.be.revertedWithCustomError(vault, "ZeroAddress"); }); it("Sets initial properties", async () => { expect(await vault.LIDO()).to.equal(lidoAddress, "Lido address"); expect(await vault.TREASURY()).to.equal(treasury.address, "Treasury address"); + expect(await vault.TRIGGERABLE_WITHDRAWALS_GATEWAY()).to.equal( + triggerableWithdrawalsGateway.address, + "Triggerable Withdrawals Gateway address", + ); + expect(await vault.CONSOLIDATION_GATEWAY()).to.equal( + consolidationGateway.address, + "Consolidation Gateway address", + ); + expect(await vault.WITHDRAWAL_REQUEST()).to.equal(EIP7002_ADDRESS, "Withdrawal Request address"); + expect(await vault.CONSOLIDATION_REQUEST()).to.equal(EIP7251_ADDRESS, "Consolidation Request address"); }); it("Petrifies the implementation", async () => { @@ -112,38 +205,38 @@ describe("WithdrawalVault.sol", () => { it("Should revert if the contract is already initialized", async () => { await vault.initialize(); - await expect(vault.initialize()).to.be.revertedWithCustomError(vault, "UnexpectedContractVersion").withArgs(2, 0); + await expect(vault.initialize()).to.be.revertedWithCustomError(vault, "UnexpectedContractVersion").withArgs(3, 0); }); it("Initializes the contract", async () => { - await expect(vault.initialize()).to.emit(vault, "ContractVersionSet").withArgs(2); + await expect(vault.initialize()).to.emit(vault, "ContractVersionSet").withArgs(3); }); }); - context("finalizeUpgrade_v2()", () => { + context("finalizeUpgrade_v3()", () => { it("Should revert with UnexpectedContractVersion error when called on implementation", async () => { - await expect(impl.finalizeUpgrade_v2()) + await expect(impl.finalizeUpgrade_v3()) .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(MAX_UINT256, 1); + .withArgs(MAX_UINT256, 2); }); - it("Should revert with UnexpectedContractVersion error when called on deployed from scratch WithdrawalVaultV2", async () => { + it("Should revert with UnexpectedContractVersion error when called on deployed from scratch WithdrawalVaultV3", async () => { await vault.initialize(); - await expect(vault.finalizeUpgrade_v2()) + await expect(vault.finalizeUpgrade_v3()) .to.be.revertedWithCustomError(impl, "UnexpectedContractVersion") - .withArgs(2, 1); + .withArgs(3, 2); }); - context("Simulate upgrade from v1", () => { + context("Simulate upgrade from v2", () => { beforeEach(async () => { - await vault.harness__initializeContractVersionTo(1); + await vault.harness__initializeContractVersionTo(2); }); it("Should set correct contract version", async () => { - expect(await vault.getContractVersion()).to.equal(1); - await vault.finalizeUpgrade_v2(); - expect(await vault.getContractVersion()).to.be.equal(2); + expect(await vault.getContractVersion()).to.equal(2); + await vault.finalizeUpgrade_v3(); + expect(await vault.getContractVersion()).to.be.equal(3); }); }); }); @@ -346,20 +439,24 @@ describe("WithdrawalVault.sol", () => { vault .connect(triggerableWithdrawalsGateway) .addWithdrawalRequests(invalidPubkeyHexString, [1n], { value: fee }), - ).to.be.revertedWithPanic(1); // assertion + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidPubkeyHexString[0]); }); it("Should revert if last pubkey not 48 bytes", async function () { const validPubkey = - "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f"; - const invalidPubkey = "1234"; - const pubkeysHexArray = [`0x${validPubkey}`, `0x${invalidPubkey}`]; + "0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f"; + const invalidPubkey = `0x${"12345".repeat(10)}`; // 50 characters, i.e. 25 bytes + const pubkeysHexArray = [validPubkey, invalidPubkey]; const fee = (await getFee()) * 2n; // 2 requests await expect( vault.connect(triggerableWithdrawalsGateway).addWithdrawalRequests(pubkeysHexArray, [1n, 2n], { value: fee }), - ).to.be.revertedWithPanic(1); // assertion + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidPubkey); }); it("Should revert if addition fails at the withdrawal request contract", async function () { @@ -582,4 +679,349 @@ describe("WithdrawalVault.sol", () => { }); }); }); + + context("get consolidation request fee", () => { + it("Should get fee from the EIP-7251 contract", async function () { + await consolidationPredeployed.mock__setFee(333n); + expect( + (await vault.getConsolidationRequestFee()) == 333n, + "consolidation request should use fee from the EIP-7251 contract", + ); + }); + + it("Should revert if fee read fails", async function () { + await consolidationPredeployed.mock__setFailOnGetFee(true); + await expect(vault.getConsolidationRequestFee()).to.be.revertedWithCustomError(vault, "FeeReadFailed"); + }); + + ["0x", "0x01", "0x" + "0".repeat(61) + "1", "0x" + "0".repeat(65) + "1"].forEach((unexpectedFee) => { + it(`Should revert if unexpected fee value ${unexpectedFee} is returned`, async function () { + await consolidationPredeployed.mock__setFeeRaw(unexpectedFee); + await expect(vault.getConsolidationRequestFee()).to.be.revertedWithCustomError(vault, "FeeInvalidData"); + }); + }); + }); + + async function getConsolidationFee(): Promise { + const fee = await vault.getConsolidationRequestFee(); + + return ethers.parseUnits(fee.toString(), "wei"); + } + + async function getConsolidationPredeployedContractBalance(): Promise { + const contractAddress = await consolidationPredeployed.getAddress(); + return await ethers.provider.getBalance(contractAddress); + } + + context("add consolidation requests", () => { + beforeEach(async () => { + await vault.initialize(); + }); + + it("Should revert if the caller is not Consolidation Gateway", async () => { + await expect( + vault.connect(stranger).addConsolidationRequests(["0x1234"], ["0x5678"]), + ).to.be.revertedWithCustomError(vault, "NotConsolidationGateway"); + }); + + it("Should revert if empty arrays are provided", async function () { + await expect(vault.connect(consolidationGateway).addConsolidationRequests([], [], { value: 1n })) + .to.be.revertedWithCustomError(vault, "ZeroArgument") + .withArgs("sourcePubkeys"); + }); + + it("Should revert if array lengths do not match", async function () { + const requestCount = 2; + const { sourcePubkeysHexArray } = generateConsolidationRequestPayload(requestCount); + const { targetPubkeysHexArray } = generateConsolidationRequestPayload(1); // Only one target pubkey + + const totalConsolidationFee = (await getConsolidationFee()) * BigInt(requestCount); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: totalConsolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "ArraysLengthMismatch") + .withArgs(requestCount, targetPubkeysHexArray.length); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, [], { value: totalConsolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "ArraysLengthMismatch") + .withArgs(requestCount, 0); + }); + + it("Should revert if not enough fee is sent", async function () { + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + + await consolidationPredeployed.mock__setFee(3n); // Set fee to 3 gwei + + // 1. Should revert if no fee is sent + await expect( + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(3n, 0); + + // 2. Should revert if fee is less than required + const insufficientFee = 2n; + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: insufficientFee }), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(3n, 2n); + }); + + it("Should revert if source pubkey is not 48 bytes", async function () { + // Invalid source pubkey (only 2 bytes) + const invalidSourcePubkey = "0x1234"; + const validTargetPubkey = "0x" + "5".repeat(96); // 48 bytes + + const fee = await getConsolidationFee(); + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests([invalidSourcePubkey], [validTargetPubkey], { value: fee }), + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidSourcePubkey); + }); + + it("Should revert if target pubkey is not 48 bytes", async function () { + const validSourcePubkey = "0x" + "1".repeat(96); // 48 bytes + // Invalid target pubkey (only 2 bytes) + const invalidTargetPubkey = "0x5678"; + + const fee = await getConsolidationFee(); + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests([validSourcePubkey], [invalidTargetPubkey], { value: fee }), + ) + .to.be.revertedWithCustomError(vault, "InvalidPublicKeyLength") + .withArgs(invalidTargetPubkey); + }); + + it("Should revert if addition fails at the consolidation request contract", async function () { + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + const fee = await getConsolidationFee(); + + // Set mock to fail on add + await consolidationPredeployed.mock__setFailOnAddRequest(true); + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "RequestAdditionFailed"); + }); + + it("Should revert when fee read fails", async function () { + await consolidationPredeployed.mock__setFailOnGetFee(true); + + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(2); + const fee = 10n; + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "FeeReadFailed"); + }); + + it("Should revert when the provided fee exceeds the required amount", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const consolidationFee = 9n + 1n; // 3 request * 3 gwei (fee) + 1 gwei (extra fee)= 10 gwei + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: consolidationFee }), + ) + .to.be.revertedWithCustomError(vault, "IncorrectFee") + .withArgs(9n, 10n); + }); + + ["0x", "0x01", "0x" + "0".repeat(61) + "1", "0x" + "0".repeat(65) + "1"].forEach((unexpectedFee) => { + it(`Should revert if unexpected fee value ${unexpectedFee} is returned`, async function () { + await consolidationPredeployed.mock__setFeeRaw(unexpectedFee); + + const { sourcePubkeysHexArray, targetPubkeysHexArray } = generateConsolidationRequestPayload(1); + const fee = 10n; + + await expect( + vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: fee }), + ).to.be.revertedWithCustomError(vault, "FeeInvalidData"); + }); + }); + + it("Should accept consolidation requests when the provided fee matches the exact required amount", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(3n); + const expectedTotalConsolidationFee = 9n; + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + + // Check extremely high fee + const highFee = ethers.parseEther("10"); + await consolidationPredeployed.mock__setFee(highFee); + const expectedLargeTotalConsolidationFee = ethers.parseEther("30"); + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedLargeTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + highFee, + ); + }); + + it("Should emit consolidation event", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const expectedTotalConsolidationFee = 9n; // 3 requests * 3 gwei (fee) = 9 gwei + + await expect( + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + ) + .to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[0], targetPubkeys[0])) + .and.to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[1], targetPubkeys[1])) + .and.to.emit(vault, "ConsolidationRequestAdded") + .withArgs(encodeEIP7251Payload(sourcePubkeys[2], targetPubkeys[2])); + }); + + it("Should not affect contract balance", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(fee); + const expectedTotalConsolidationFee = 9n; // 3 requests * 3 gwei (fee) = 9 gwei + + const initialBalance = await getWithdrawalCredentialsContractBalance(); + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + expect(await getWithdrawalCredentialsContractBalance()).to.equal(initialBalance); + }); + + it("Should transfer the total calculated fee to the EIP-7251 consolidation contract", async function () { + const requestCount = 3; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 3n; + await consolidationPredeployed.mock__setFee(3n); + const expectedTotalConsolidationFee = 9n; + + const initialBalance = await getConsolidationPredeployedContractBalance(); + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + + expect(await getConsolidationPredeployedContractBalance()).to.equal( + initialBalance + expectedTotalConsolidationFee, + ); + }); + + it("Should ensure consolidation requests are encoded as expected with a 96-byte pubkeys ", async function () { + const requestCount = 16; + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const tx = await vault + .connect(consolidationGateway) + .addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { value: 16n }); + + const receipt = await tx.wait(); + + const events = findEIP7251MockEvents(receipt!); + expect(events.length).to.equal(requestCount); + + for (let i = 0; i < requestCount; i++) { + const encodedRequest = events[i].args[0]; + // 0x (2 characters) + 48-byte pubkey (96 characters) + 48-byte pubkey (96 characters) = 194 characters + expect(encodedRequest.length).to.equal(194); + + expect(encodedRequest.slice(0, 2)).to.equal("0x"); + expect(encodedRequest.slice(2, 98)).to.equal(sourcePubkeys[i]); + expect(encodedRequest.slice(98, 194)).to.equal(targetPubkeys[i]); + } + }); + + const testCasesForConsolidationRequests = [ + { requestCount: 1 }, + { requestCount: 3 }, + { requestCount: 7 }, + { requestCount: 10 }, + { requestCount: 100 }, + ]; + + testCasesForConsolidationRequests.forEach(({ requestCount }) => { + it(`Should process ${requestCount} consolidation request(s) successfully`, async function () { + const { sourcePubkeysHexArray, sourcePubkeys, targetPubkeysHexArray, targetPubkeys } = + generateConsolidationRequestPayload(requestCount); + + const fee = 1n; + const expectedTotalConsolidationFee = BigInt(requestCount) * fee; + + await testEIP7251Mock( + () => + vault.connect(consolidationGateway).addConsolidationRequests(sourcePubkeysHexArray, targetPubkeysHexArray, { + value: expectedTotalConsolidationFee, + }), + sourcePubkeys, + targetPubkeys, + fee, + ); + }); + }); + }); }); diff --git a/test/common/contracts/RateLimit__Harness.sol b/test/common/contracts/RateLimit__Harness.sol new file mode 100644 index 0000000000..3c45dc089a --- /dev/null +++ b/test/common/contracts/RateLimit__Harness.sol @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: UNLICENSED +// for testing purposes only +pragma solidity 0.8.25; + +import {LimitData, RateLimitStorage, RateLimit} from "contracts/common/lib/RateLimit.sol"; + +contract RateLimitStorage__Harness { + using RateLimitStorage for bytes32; + + bytes32 public constant TEST_POSITION = keccak256("rate.limit.test.position"); + + function getStorageLimit() external view returns (LimitData memory data) { + return TEST_POSITION.getStorageLimit(); + } + + function setStorageLimit(LimitData memory _data) external { + TEST_POSITION.setStorageLimit(_data); + } +} + +contract RateLimit__Harness { + using RateLimit for LimitData; + + LimitData public state; + + function harness_setState( + uint32 maxLimit, + uint32 prevLimit, + uint32 itemsPerFrame, + uint32 frameDurationInSec, + uint32 timestamp + ) external { + state.maxLimit = maxLimit; + state.itemsPerFrame = itemsPerFrame; + state.frameDurationInSec = frameDurationInSec; + state.prevLimit = prevLimit; + state.prevTimestamp = timestamp; + } + + function harness_getState() external view returns (LimitData memory) { + return + LimitData( + state.maxLimit, + state.prevLimit, + state.prevTimestamp, + state.frameDurationInSec, + state.itemsPerFrame + ); + } + + function calculateCurrentLimit(uint256 currentTimestamp) external view returns (uint256) { + return state.calculateCurrentLimit(currentTimestamp); + } + + function updatePrevLimit(uint256 newLimit, uint256 timestamp) external view returns (LimitData memory) { + return state.updatePrevLimit(newLimit, timestamp); + } + + function setLimits( + uint256 maxLimit, + uint256 itemsPerFrame, + uint256 frameDurationInSec, + uint256 timestamp + ) external view returns (LimitData memory) { + return state.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp); + } + + function isLimitSet() external view returns (bool) { + return state.isLimitSet(); + } +} diff --git a/test/common/lib/rateLimit.test.ts b/test/common/lib/rateLimit.test.ts new file mode 100644 index 0000000000..b94eb39f5c --- /dev/null +++ b/test/common/lib/rateLimit.test.ts @@ -0,0 +1,398 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +interface LimitData { + maxLimit: bigint; + prevLimit: bigint; + prevTimestamp: bigint; + frameDurationInSec: bigint; + itemsPerFrame: bigint; +} + +describe("RateLimit.sol", () => { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let rateLimitStorage: any; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let rateLimit: any; + + before(async () => { + rateLimitStorage = await ethers.deployContract("RateLimitStorage__Harness"); + rateLimit = await ethers.deployContract("RateLimit__Harness"); + }); + + context("RateLimitStorage", () => { + let data: LimitData; + + it("Min possible values", async () => { + data = { + maxLimit: 0n, + prevLimit: 0n, + prevTimestamp: 0n, + frameDurationInSec: 0n, + itemsPerFrame: 0n, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(0n); + expect(result.prevLimit).to.equal(0n); + expect(result.prevTimestamp).to.equal(0n); + expect(result.frameDurationInSec).to.equal(0n); + expect(result.itemsPerFrame).to.equal(0n); + }); + + it("Max possible values", async () => { + const MAX_UINT32 = 2n ** 32n - 1n; + + data = { + maxLimit: MAX_UINT32, + prevLimit: MAX_UINT32, + prevTimestamp: MAX_UINT32, + frameDurationInSec: MAX_UINT32, + itemsPerFrame: MAX_UINT32, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(MAX_UINT32); + expect(result.prevLimit).to.equal(MAX_UINT32); + expect(result.prevTimestamp).to.equal(MAX_UINT32); + expect(result.frameDurationInSec).to.equal(MAX_UINT32); + expect(result.itemsPerFrame).to.equal(MAX_UINT32); + }); + + it("Some random values", async () => { + const maxLimit = 100n; + const prevLimit = 9n; + const prevTimestamp = 90n; + const frameDurationInSec = 10n; + const itemsPerFrame = 1n; + + data = { + maxLimit, + prevLimit, + prevTimestamp, + frameDurationInSec, + itemsPerFrame, + }; + + await rateLimitStorage.setStorageLimit(data); + + const result = await rateLimitStorage.getStorageLimit(); + expect(result.maxLimit).to.equal(maxLimit); + expect(result.prevLimit).to.equal(prevLimit); + expect(result.prevTimestamp).to.equal(prevTimestamp); + expect(result.frameDurationInSec).to.equal(frameDurationInSec); + expect(result.itemsPerFrame).to.equal(itemsPerFrame); + }); + }); + + context("RateLimit", () => { + context("calculateCurrentLimit", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should return prevLimit value (nothing restored), if no time passed", async () => { + const timestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const result = await rateLimit.calculateCurrentLimit(timestamp); + expect(result).to.equal(prevLimit); + }); + + it("should return prevLimit value (nothing restored), if less than one frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 9); + expect(result).to.equal(prevLimit); + }); + + it("Should return prevLimit + 1 (restored one item), if exactly one frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + frameDurationInSec); + expect(result).to.equal(prevLimit + 1); + }); + + it("Should return prevLimit + restored value, if multiple full frames passed, restored value does not exceed maxLimit", async () => { + const prevTimestamp = 1000; + const maxLimit = 20; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 40); + expect(result).to.equal(prevLimit + 4); + }); + + it("Should return maxLimit, if restored limit exceeds max", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 90; // remaining limit from prev usage + const itemsPerFrame = 3; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 100); // 10 frames * 3 = 30 + expect(result).to.equal(maxLimit); + }); + + it("Should return prevLimit, if itemsPerFrame = 0", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 7; // remaining limit from prev usage + const itemsPerFrame = 0; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 100); + expect(result).to.equal(7); + }); + + it("non-multiple frame passed (should truncate fractional frame)", async () => { + const prevTimestamp = 1000; + const maxLimit = 20; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const result = await rateLimit.calculateCurrentLimit(prevTimestamp + 25); + expect(result).to.equal(7); // 5 + 2 + }); + }); + + context("updatePrevLimit", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should revert with LimitExceeded, if newLimit exceeded maxLimit", async () => { + const prevTimestamp = 1000; + + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + await expect(rateLimit.updatePrevLimit(11, prevTimestamp + 10)).to.be.revertedWithCustomError( + rateLimit, + "LimitExceeded", + ); + }); + + it("should increase prevTimestamp on frame duration if one frame passed", async () => { + const prevTimestamp = 1000; + + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(4, prevTimestamp + 10); + expect(updated.prevLimit).to.equal(4); + expect(updated.prevTimestamp).to.equal(prevTimestamp + 10); + }); + + it("should not change prevTimestamp, as less than frame passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 10; + const prevLimit = 5; // remaining limit from prev usage + const itemsPerFrame = 1; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(3, prevTimestamp + 9); + expect(updated.prevLimit).to.equal(3); + expect(updated.prevTimestamp).to.equal(prevTimestamp); + }); + + it("should increase prevTimestamp on multiple frames value, if multiple frames passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 100; + const prevLimit = 90; // remaining limit from prev usage + const itemsPerFrame = 5; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(85, prevTimestamp + 45); + expect(updated.prevLimit).to.equal(85); + expect(updated.prevTimestamp).to.equal(prevTimestamp + 40); + }); + + it("should not change prevTimestamp, if no time passed", async () => { + const prevTimestamp = 1000; + const maxLimit = 50; + const prevLimit = 25; // remaining limit from prev usage + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(maxLimit, prevLimit, itemsPerFrame, frameDurationInSec, prevTimestamp); + + const updated = await rateLimit.updatePrevLimit(20, prevTimestamp); + expect(updated.prevLimit).to.equal(20); + expect(updated.prevTimestamp).to.equal(prevTimestamp); + }); + }); + + context("setLimits", () => { + beforeEach(async () => { + await rateLimit.harness_setState(0, 0, 0, 0, 0); + }); + + it("should initialize limits", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + const result = await rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(maxLimit); + expect(result.itemsPerFrame).to.equal(itemsPerFrame); + expect(result.frameDurationInSec).to.equal(frameDurationInSec); + expect(result.prevLimit).to.equal(maxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should set prevLimit to new maxLimit, if new maxLimit is lower than prevLimit", async () => { + const timestamp = 900; + const oldMaxLimit = 100; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 50; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(newMaxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should not update prevLimit, if new maxLimit is higher", async () => { + const timestamp = 900; + const oldMaxLimit = 100; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 150; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(prevLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should reset prevLimit if old max was zero", async () => { + const timestamp = 900; + const oldMaxLimit = 0; + const prevLimit = 80; + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await rateLimit.harness_setState(oldMaxLimit, prevLimit, itemsPerFrame, frameDurationInSec, timestamp); + + const newMaxLimit = 150; + const result = await rateLimit.setLimits(newMaxLimit, itemsPerFrame, frameDurationInSec, timestamp); + + expect(result.maxLimit).to.equal(newMaxLimit); + expect(result.prevLimit).to.equal(newMaxLimit); + expect(result.prevTimestamp).to.equal(timestamp); + }); + + it("should revert if maxLimit is too large", async () => { + const timestamp = 1000; + const maxLimit = 2n ** 32n; // exceeds uint32 max + const itemsPerFrame = 2; + const frameDurationInSec = 10; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeMaxLimit"); + }); + + it("should revert if itemsPerFrame bigger than maxLimit", async () => { + const timestamp = 1000; + const maxLimit = 10; + const itemsPerFrame = 15; + const frameDurationInSec = 10; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeItemsPerFrame"); + }); + + it("should revert if frameDurationInSec is too large", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 2n ** 32n; // exceeds uint32 max + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "TooLargeFrameDuration"); + }); + + it("should revert if frameDurationInSec is zero", async () => { + const timestamp = 1000; + const maxLimit = 100; + const itemsPerFrame = 2; + const frameDurationInSec = 0; + + await expect( + rateLimit.setLimits(maxLimit, itemsPerFrame, frameDurationInSec, timestamp), + ).to.be.revertedWithCustomError(rateLimit, "ZeroFrameDuration"); + }); + }); + + context("isLimitSet", () => { + it("returns false when maxLimit is 0", async () => { + await rateLimit.harness_setState(0, 10, 1, 10, 1000); + const result = await rateLimit.isLimitSet(); + expect(result).to.be.false; + }); + + it("returns true when maxLimit is non-zero", async () => { + await rateLimit.harness_setState(100, 50, 1, 10, 1000); + const result = await rateLimit.isLimitSet(); + expect(result).to.be.true; + }); + }); + }); +}); diff --git a/test/common/minFirstAllocationStrategy.t.sol b/test/common/minFirstAllocationStrategy.t.sol index 8e46dc729c..05d6044f17 100644 --- a/test/common/minFirstAllocationStrategy.t.sol +++ b/test/common/minFirstAllocationStrategy.t.sol @@ -15,12 +15,18 @@ contract MinFirstAllocationStrategyInvariants is Test { uint256 private constant MAX_CAPACITY_VALUE = 8192; uint256 private constant MAX_ALLOCATION_SIZE = 1024; - MinFirstAllocationStrategyBase internal handler; + MinFirstAllocationStrategyAllocateHandler internal handler; MinFirstAllocationStrategy__Harness internal harness; function setUp() external { handler = new MinFirstAllocationStrategyAllocateHandler(); harness = new MinFirstAllocationStrategy__Harness(); + + targetContract(address(handler)); + + bytes4[] memory selectors = new bytes4[](1); + selectors[0] = MinFirstAllocationStrategyAllocateHandler.allocate.selector; + targetSelector(FuzzSelector({addr: address(handler), selectors: selectors})); } function test_allocateToBestCandidate_ReturnsZeroWhenAllocationSizeIsZero() public view { diff --git a/test/deploy/accountingOracle.ts b/test/deploy/accountingOracle.ts index 926c7f5b27..32ef716af6 100644 --- a/test/deploy/accountingOracle.ts +++ b/test/deploy/accountingOracle.ts @@ -17,6 +17,10 @@ import { import { deployHashConsensus } from "./hashConsensus"; import { deployLidoLocator, updateLidoLocatorImplementation } from "./locator"; +import { + MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, +} from "./validatorExitBusOracle"; export const ORACLE_LAST_COMPLETED_EPOCH = 2n * EPOCHS_PER_FRAME; export const ORACLE_LAST_REPORT_SLOT = ORACLE_LAST_COMPLETED_EPOCH * SLOTS_PER_EPOCH; @@ -24,8 +28,18 @@ export const ORACLE_LAST_REPORT_SLOT = ORACLE_LAST_COMPLETED_EPOCH * SLOTS_PER_E async function deployMockAccountingAndStakingRouter() { const stakingRouter = await ethers.deployContract("StakingRouter__MockForAccountingOracle"); const withdrawalQueue = await ethers.deployContract("WithdrawalQueue__MockForAccountingOracle"); + const lido = await ethers.deployContract("Lido__MockForAccounting"); const accounting = await ethers.deployContract("Accounting__MockForAccountingOracle"); - return { accounting, stakingRouter, withdrawalQueue }; + + // Initialize Lido mock with reasonable defaults for balance-based accounting + await lido.mock__setClValidatorsBalance(300n * 10n ** 18n); // 300 ETH active + await lido.mock__setClPendingBalance(20n * 10n ** 18n); // 20 ETH pending + await lido.mock__setDepositedValidators(10); + // Router mock stores validators balance only; pending balance is seeded on the Lido mock. + await stakingRouter.mock__registerStakingModule(1); + await stakingRouter.reportValidatorBalancesByStakingModule([1], [300n * 10n ** 9n]); + + return { accounting, stakingRouter, withdrawalQueue, lido }; } async function deployMockLazyOracle() { @@ -46,7 +60,7 @@ export async function deployAccountingOracleSetup( ) { const locator = await deployLidoLocator(); const locatorAddr = await locator.getAddress(); - const { accounting, stakingRouter, withdrawalQueue } = await getLidoAndStakingRouter(); + const { accounting, stakingRouter, withdrawalQueue, lido } = await getLidoAndStakingRouter(); const oracle = await ethers.deployContract("AccountingOracle__Harness", [ lidoLocatorAddr || locatorAddr, @@ -71,13 +85,13 @@ export async function deployAccountingOracleSetup( withdrawalQueue: await withdrawalQueue.getAddress(), accountingOracle: accountingOracleAddress, accounting: accountingAddress, + lido: await lido.getAddress(), }); const lazyOracle = await deployMockLazyOracle(); const oracleReportSanityChecker = await deployOracleReportSanityCheckerForAccounting( locatorAddr, - accountingOracleAddress, accountingAddress, admin, ); @@ -94,6 +108,7 @@ export async function deployAccountingOracleSetup( accounting, stakingRouter, withdrawalQueue, + lido, locatorAddr, oracle, consensus, @@ -135,28 +150,26 @@ export async function initAccountingOracle({ return initTx; } -async function deployOracleReportSanityCheckerForAccounting( - lidoLocator: string, - accountingOracle: string, - accounting: string, - admin: string, -) { - const exitedValidatorsPerDayLimit = 55; - const appearedValidatorsPerDayLimit = 100; +async function deployOracleReportSanityCheckerForAccounting(lidoLocator: string, accounting: string, admin: string) { + const exitedEthAmountPerDayLimit = 65_535n; + const appearedEthAmountPerDayLimit = 65_535n; return await ethers.getContractFactory("OracleReportSanityChecker").then((f) => - f.deploy(lidoLocator, accountingOracle, accounting, admin, { - exitedValidatorsPerDayLimit, - appearedValidatorsPerDayLimit, + f.deploy(lidoLocator, accounting, admin, { + exitedEthAmountPerDayLimit, + appearedEthAmountPerDayLimit, annualBalanceIncreaseBPLimit: 0n, simulatedShareRateDeviationBPLimit: 0n, - maxValidatorExitRequestsPerReport: 32n * 12n, + maxBalanceExitRequestedPerReportInEth: 65_535n, // Max uint16 (65,535 ETH) + maxEffectiveBalanceWeightWCType01: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + maxEffectiveBalanceWeightWCType02: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, maxItemsPerExtraDataTransaction: 15n, maxNodeOperatorsPerExtraDataItem: 16n, requestTimestampMargin: 0n, - maxPositiveTokenRebase: 0n, - initialSlashingAmountPWei: 0n, - inactivityPenaltiesAmountPWei: 0n, + maxPositiveTokenRebase: 1n, + maxCLBalanceDecreaseBP: 360n, clBalanceOraclesErrorUpperBPLimit: 0n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, }), ); } diff --git a/test/deploy/dao.ts b/test/deploy/dao.ts index 70e18dc012..c5cfebfe8c 100644 --- a/test/deploy/dao.ts +++ b/test/deploy/dao.ts @@ -62,7 +62,9 @@ export async function addAragonApp({ dao, name, impl, rootAccount }: CreateAddAp export async function deployLidoDao({ rootAccount, initialized, locatorConfig = {} }: DeployLidoDaoArgs) { const { dao, acl } = await createAragonDao(rootAccount); - const impl = await ethers.deployContract("Lido", rootAccount); + const impl = await ethers.deployContract("Lido", { + signer: rootAccount, + }); const lidoProxyAddress = await addAragonApp({ dao, @@ -85,7 +87,9 @@ export async function deployLidoDao({ rootAccount, initialized, locatorConfig = export async function deployLidoDaoForNor({ rootAccount, initialized, locatorConfig = {} }: DeployLidoDaoArgs) { const { dao, acl } = await createAragonDao(rootAccount); - const impl = await ethers.deployContract("Lido__HarnessForDistributeReward", rootAccount); + const impl = await ethers.deployContract("Lido__HarnessForDistributeReward", { + signer: rootAccount, + }); const lidoProxyAddress = await addAragonApp({ dao, diff --git a/test/deploy/index.ts b/test/deploy/index.ts index 5b35dfceb8..85ab73b69e 100644 --- a/test/deploy/index.ts +++ b/test/deploy/index.ts @@ -6,3 +6,4 @@ export * from "./hashConsensus"; export * from "./withdrawalQueue"; export * from "./validatorExitBusOracle"; export * from "./vaults"; +export * from "./stakingRouter"; diff --git a/test/deploy/locator.ts b/test/deploy/locator.ts index d5fcadd5ee..9b152fb8f9 100644 --- a/test/deploy/locator.ts +++ b/test/deploy/locator.ts @@ -29,6 +29,7 @@ async function deployDummyLocator(config?: Partial, de oracleDaemonConfig: certainAddress("dummy-locator:oracleDaemonConfig"), validatorExitDelayVerifier: certainAddress("dummy-locator:validatorExitDelayVerifier"), triggerableWithdrawalsGateway: certainAddress("dummy-locator:triggerableWithdrawalsGateway"), + consolidationGateway: certainAddress("dummy-locator:consolidationGateway"), accounting: certainAddress("dummy-locator:accounting"), predepositGuarantee: certainAddress("dummy-locator:predepositGuarantee"), wstETH: certainAddress("dummy-locator:wstETH"), @@ -36,6 +37,7 @@ async function deployDummyLocator(config?: Partial, de vaultFactory: certainAddress("dummy-locator:vaultFactory"), operatorGrid: certainAddress("dummy-locator:operatorGrid"), lazyOracle: certainAddress("dummy-locator:lazyOracle"), + topUpGateway: certainAddress("dummy-locator:topUpGateway"), ...config, }); @@ -111,6 +113,7 @@ async function getLocatorConfig(locatorAddress: string): Promise[]; const configPromises = addresses.map((name) => locator[name]()); diff --git a/test/deploy/stakingRouter.ts b/test/deploy/stakingRouter.ts new file mode 100644 index 0000000000..0f241b2754 --- /dev/null +++ b/test/deploy/stakingRouter.ts @@ -0,0 +1,80 @@ +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { + BeaconChainDepositor, + DepositContract__MockForBeaconChainDepositor, + Lido__MockForStakingRouter, + LidoLocator, + StakingRouter__Harness, +} from "typechain-types"; + +import { MAX_EFFECTIVE_BALANCE_WC_TYPE_01, MAX_EFFECTIVE_BALANCE_WC_TYPE_02, proxify } from "lib"; + +import { deployLidoLocator } from "test/deploy"; + +export interface DeployStakingRouterSigners { + deployer: HardhatEthersSigner; + admin: HardhatEthersSigner; + user?: HardhatEthersSigner; +} + +export interface DeployStakingRouterParams { + depositContract?: DepositContract__MockForBeaconChainDepositor; + lido?: Lido__MockForStakingRouter; + lidoLocator?: LidoLocator; + maxEBType1?: bigint; + maxEBType2?: bigint; +} + +export async function deployStakingRouter( + { deployer, admin, user }: DeployStakingRouterSigners, + { + depositContract, + lido, + lidoLocator, + maxEBType1 = MAX_EFFECTIVE_BALANCE_WC_TYPE_01, + maxEBType2 = MAX_EFFECTIVE_BALANCE_WC_TYPE_02, + }: DeployStakingRouterParams = {}, +): Promise<{ + depositContract: DepositContract__MockForBeaconChainDepositor; + stakingRouter: StakingRouter__Harness; + impl: StakingRouter__Harness; + beaconChainDepositor: BeaconChainDepositor; +}> { + if (!depositContract) { + depositContract = await ethers.deployContract("DepositContract__MockForBeaconChainDepositor"); + } + + if (!lido) { + lido = await ethers.deployContract("Lido__MockForStakingRouter", deployer); + } + + if (!lidoLocator) { + lidoLocator = await deployLidoLocator({ lido }); + } + + const beaconChainDepositor = await ethers.deployContract("BeaconChainDepositor", deployer); + const allocLib = await ethers.deployContract("MinFirstAllocationStrategy", deployer); + const srLib = await ethers.deployContract("SRLib", { + signer: deployer, + libraries: { + ["contracts/common/lib/MinFirstAllocationStrategy.sol:MinFirstAllocationStrategy"]: await allocLib.getAddress(), + }, + }); + const stakingRouterFactory = await ethers.getContractFactory("StakingRouter__Harness", { + signer: deployer, + libraries: { + ["contracts/0.8.25/lib/BeaconChainDepositor.sol:BeaconChainDepositor"]: await beaconChainDepositor.getAddress(), + ["contracts/0.8.25/sr/SRLib.sol:SRLib"]: await srLib.getAddress(), + }, + }); + + const impl = await stakingRouterFactory + .connect(deployer) + .deploy(depositContract, lido, lidoLocator, maxEBType1, maxEBType2); + const [stakingRouter] = await proxify({ impl, admin, caller: user }); + + return { stakingRouter, depositContract, impl, beaconChainDepositor }; +} diff --git a/test/deploy/validatorExitBusOracle.ts b/test/deploy/validatorExitBusOracle.ts index dda2368bbb..39bb6c9a75 100644 --- a/test/deploy/validatorExitBusOracle.ts +++ b/test/deploy/validatorExitBusOracle.ts @@ -1,7 +1,12 @@ import { expect } from "chai"; import { ethers } from "hardhat"; -import { HashConsensus__Harness, ReportProcessor__Mock, ValidatorsExitBusOracle } from "typechain-types"; +import { + HashConsensus__Harness, + ReportProcessor__Mock, + StakingModule__MockForKeyVerification, + ValidatorsExitBusOracle, +} from "typechain-types"; import { EPOCHS_PER_FRAME, @@ -16,6 +21,11 @@ import { deployHashConsensus } from "./hashConsensus"; import { deployLidoLocator, updateLidoLocatorImplementation } from "./locator"; export const DATA_FORMAT_LIST = 1; +export const DATA_FORMAT_LIST_WITH_KEY_INDEX = 2; + +// MaxEB weights (in ETH) +export const MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01 = 32n; // 32 ETH for WC 0x01 validators +export const MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02 = 2048n; // 2048 ETH for 0x02 validators async function deployMockAccountingOracle(secondsPerSlot = SECONDS_PER_SLOT, genesisTime = GENESIS_TIME) { const lido = await ethers.deployContract("Accounting__MockForAccountingOracle"); @@ -27,26 +37,24 @@ async function deployMockAccountingOracle(secondsPerSlot = SECONDS_PER_SLOT, gen return { ao, lido }; } -async function deployOracleReportSanityCheckerForExitBus( - lidoLocator: string, - accountingOracle: string, - accounting: string, - admin: string, -) { +async function deployOracleReportSanityCheckerForExitBus(lidoLocator: string, accounting: string, admin: string) { return await ethers.getContractFactory("OracleReportSanityChecker").then((f) => - f.deploy(lidoLocator, accountingOracle, accounting, admin, { - exitedValidatorsPerDayLimit: 0n, - appearedValidatorsPerDayLimit: 0n, + f.deploy(lidoLocator, accounting, admin, { + exitedEthAmountPerDayLimit: 0n, + appearedEthAmountPerDayLimit: 0n, annualBalanceIncreaseBPLimit: 0n, simulatedShareRateDeviationBPLimit: 0n, - maxValidatorExitRequestsPerReport: 2000, + maxBalanceExitRequestedPerReportInEth: 65_535n, // Max uint16 (65,535 ETH) + maxEffectiveBalanceWeightWCType01: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_01, + maxEffectiveBalanceWeightWCType02: MAX_EFFECTIVE_BALANCE_WEIGHT_WC_TYPE_02, maxItemsPerExtraDataTransaction: 0n, maxNodeOperatorsPerExtraDataItem: 0n, requestTimestampMargin: 0n, - maxPositiveTokenRebase: 0n, - initialSlashingAmountPWei: 0n, - inactivityPenaltiesAmountPWei: 0n, + maxPositiveTokenRebase: 1n, + maxCLBalanceDecreaseBP: 360n, clBalanceOraclesErrorUpperBPLimit: 0n, + consolidationEthAmountPerDayLimit: 0n, + exitedValidatorEthAmountLimit: 1n, }), ); } @@ -68,6 +76,48 @@ export async function deployVEBO( const locator = await deployLidoLocator(); const locatorAddr = await locator.getAddress(); + // Deploy mock StakingRouter with default module configurations + const stakingRouter = await ethers.deployContract("StakingRouter__MockForValidatorsExitBus"); + const stakingRouterAddr = await stakingRouter.getAddress(); + + // Configure default modules: + // Module 1: Legacy (0x01) - 32 ETH validators + await stakingRouter.setStakingModuleWithdrawalCredentialsType(1, 0x01); + // Modules 2, 3, 4, 5, 7: MaxEB (0x02) - 2048 ETH validators + await stakingRouter.setStakingModuleWithdrawalCredentialsType(2, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(3, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(4, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(5, 0x02); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(7, 0x02); + // Modules 100, 101: Used in tests - configure as Legacy (0x01) + await stakingRouter.setStakingModuleWithdrawalCredentialsType(100, 0x01); + await stakingRouter.setStakingModuleWithdrawalCredentialsType(101, 0x01); + + // Deploy universal mock modules for key verification (Format 2 testing) + // These mocks return requested keys and work for both legacy and new interfaces + const mockModule1 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule2 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule3 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule4 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule5 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + const mockModule7 = await ethers.deployContract("StakingModule__MockForKeyVerification"); + + await stakingRouter.setStakingModuleAddress(1, await mockModule1.getAddress()); + await stakingRouter.setStakingModuleAddress(2, await mockModule2.getAddress()); + await stakingRouter.setStakingModuleAddress(3, await mockModule3.getAddress()); + await stakingRouter.setStakingModuleAddress(4, await mockModule4.getAddress()); + await stakingRouter.setStakingModuleAddress(5, await mockModule5.getAddress()); + await stakingRouter.setStakingModuleAddress(7, await mockModule7.getAddress()); + + await updateLidoLocatorImplementation(locatorAddr, { + stakingRouter: stakingRouterAddr, + }); + + // Deploy mock NodeOperatorsRegistry + // In permissive mode (default), it returns empty keys which causes ValidatorsExitBus + // to skip validation. Tests can explicitly configure keys if needed. + const nodeOperatorsRegistry = await ethers.deployContract("NodeOperatorsRegistry__Mock"); + const oracle = await ethers.deployContract("ValidatorsExitBus__Harness", [secondsPerSlot, genesisTime, locatorAddr]); const { consensus } = await deployHashConsensus(admin, { @@ -91,7 +141,6 @@ export async function deployVEBO( const oracleReportSanityChecker = await deployOracleReportSanityCheckerForExitBus( locatorAddr, - accountingOracleAddress, accountingAddress, admin, ); @@ -111,9 +160,61 @@ export async function deployVEBO( consensus, oracleReportSanityChecker, triggerableWithdrawalsGateway, + nodeOperatorsRegistry, + stakingRouter, + mockModules: { + module1: mockModule1, + module2: mockModule2, + module3: mockModule3, + module4: mockModule4, + module5: mockModule5, + module7: mockModule7, + }, }; } +// Derive the same 48-byte pubkey as StakingModule__MockForKeyVerification fallback: +// pubkey = keccak(nodeOpId, keyIndex) || first 16 bytes of keccak(nodeOpId, keyIndex, 1) +export function makeMockPubkey(nodeOpId: number | bigint, keyIndex: number | bigint): string { + const hash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["uint256", "uint256"], [nodeOpId, keyIndex]), + ); + const hash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode(["uint256", "uint256", "uint256"], [nodeOpId, keyIndex, 1]), + ); + return ("0x" + hash1.slice(2) + hash2.slice(2)).slice(0, 2 + 96); +} + +// Seed StakingModule__MockForKeyVerification instances with signing keys matching requests +export async function seedMockModuleSigningKeys( + mockModules: { + module1: StakingModule__MockForKeyVerification; + module2: StakingModule__MockForKeyVerification; + module3: StakingModule__MockForKeyVerification; + module4: StakingModule__MockForKeyVerification; + module5: StakingModule__MockForKeyVerification; + module7: StakingModule__MockForKeyVerification; + }, + requests: { moduleId: number; nodeOpId: number; keyIndex?: number; valIndex: number; valPubkey?: string }[], +) { + const modMap: Record = { + 1: mockModules.module1, + 2: mockModules.module2, + 3: mockModules.module3, + 4: mockModules.module4, + 5: mockModules.module5, + 7: mockModules.module7, + }; + + for (const r of requests) { + const mod = modMap[r.moduleId]; + if (!mod || !mod.setSigningKey) continue; + const keyIdx = r.keyIndex ?? r.valIndex; + const pubkey = r.valPubkey ?? makeMockPubkey(r.nodeOpId, keyIdx); + await mod.setSigningKey(r.nodeOpId, keyIdx, pubkey); + } +} + interface VEBOConfig { admin: string; oracle: ValidatorsExitBusOracle; @@ -123,8 +224,8 @@ interface VEBOConfig { lastProcessingRefSlot?: number; resumeAfterDeploy?: boolean; maxRequestsPerBatch?: number; - maxExitRequestsLimit?: number; - exitsPerFrame?: number; + maxExitBalanceEth?: bigint; + balancePerFrameEth?: bigint; frameDurationInSec?: number; } @@ -137,8 +238,8 @@ export async function initVEBO({ lastProcessingRefSlot = 0, resumeAfterDeploy = false, maxRequestsPerBatch = 600, - maxExitRequestsLimit = 13000, - exitsPerFrame = 1, + maxExitBalanceEth = 13_000n, // 13,000 ETH + balancePerFrameEth = 32n, // 32 ETH (1 legacy validator per frame) frameDurationInSec = 48, }: VEBOConfig) { const initTx = await oracle.initialize( @@ -147,8 +248,8 @@ export async function initVEBO({ consensusVersion, lastProcessingRefSlot, maxRequestsPerBatch, - maxExitRequestsLimit, - exitsPerFrame, + maxExitBalanceEth, + balancePerFrameEth, frameDurationInSec, ); diff --git a/test/integration/consolidation/consolidation-gas.integration.ts b/test/integration/consolidation/consolidation-gas.integration.ts new file mode 100644 index 0000000000..cf9c947e31 --- /dev/null +++ b/test/integration/consolidation/consolidation-gas.integration.ts @@ -0,0 +1,278 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway, ConsolidationMigrator, NodeOperatorsRegistry } from "typechain-types"; + +import { addressToWC, certainAddress } from "lib"; +import { LocalMerkleTree, prepareLocalMerkleTree } from "lib/pdg"; +import { getProtocolContext, ProtocolContext } from "lib/protocol"; +import { + depositAndReportValidators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, +} from "lib/protocol/helpers"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; +import { LoadedContract } from "lib/protocol/types"; + +import { Snapshot } from "test/suite"; + +/** + * Gas measurement integration test for consolidation (full stack, no mocks). + * Uses: ConsolidationMigrator → ConsolidationBus → ConsolidationGateway → WithdrawalVault + * + * Results for batch of 5 x 63 requests: + * ┌──────────────────────────┬─────────────┐ + * │ Operation │ Gas │ + * ├──────────────────────────┼─────────────┤ + * │ submitConsolidationBatch │ 7,941,893 │ + * │ executeConsolidation │ 6,463,147 │ + * │ Total │ 14,405,040 │ + * │ Per request │ 45,730 │ + * └──────────────────────────┴─────────────┘ + */ +describe("Integration: Consolidation gas measurement (full stack via Migrator)", () => { + let ctx: ProtocolContext; + let nor: LoadedContract; + let consolidationBus: ConsolidationBus; + let consolidationGateway: ConsolidationGateway; + let consolidationMigrator: ConsolidationMigrator; + + let submitter: HardhatEthersSigner; + let executor: HardhatEthersSigner; + + const MAX_BLOCK_GAS = 16_000_000n; + const NUM_GROUPS = 5; + const REQUESTS_PER_GROUP = 63; + const TOTAL_REQUESTS = NUM_GROUPS * REQUESTS_PER_GROUP; // 315 + const TOTAL_SOURCE_KEYS = BigInt(TOTAL_REQUESTS); // 315 + const TOTAL_TARGET_KEYS = BigInt(NUM_GROUPS); // 5 + + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + + let sourceOperatorId: bigint; + let targetOperatorId: bigint; + + // Source pubkeys grouped: 5 groups × 63 pubkeys + let sourcePubkeysGroups: string[][]; + // Target pubkeys: 5 + let targetPubkeys: string[]; + + // Key index groups for submitConsolidationBatch + let consolidationIndexGroups: { sourceKeyIndices: bigint[]; targetKeyIndex: bigint }[]; + + let originalState: string; + + before(async () => { + ctx = await getProtocolContext(); + + // Take snapshot before any modifications to restore clean state for other tests + originalState = await Snapshot.take(); + + [, submitter, executor] = await ethers.getSigners(); + + nor = ctx.contracts.nor; + consolidationBus = ctx.contracts.consolidationBus; + consolidationGateway = ctx.contracts.consolidationGateway; + consolidationMigrator = ctx.contracts.consolidationMigrator; + + const agentSigner = await ctx.getSigner("agent"); + + // ========================================= + // Deposit all existing depositable validators first to clear them + // ========================================= + const { stakingRouter } = ctx.contracts; + const existingDepositable = await stakingRouter.getStakingModuleMaxDepositsCount( + NOR_MODULE_ID, + await ctx.contracts.lido.getDepositableEther(), + ); + if (existingDepositable > 0n) { + const DEPOSIT_BATCH = 50n; + for (let deposited = 0n; deposited < existingDepositable; deposited += DEPOSIT_BATCH) { + const batch = deposited + DEPOSIT_BATCH > existingDepositable ? existingDepositable - deposited : DEPOSIT_BATCH; + await depositAndReportValidators(ctx, NOR_MODULE_ID, batch); + } + } + + // ========================================= + // Setup source operator with deposited keys + // ========================================= + sourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "gas_test_source_operator", + rewardAddress: certainAddress("gas:source:reward"), + }); + + // Add keys in batches to avoid exceeding block gas limit + const KEYS_BATCH = 100n; + for (let added = 0n; added < TOTAL_SOURCE_KEYS; added += KEYS_BATCH) { + const batch = added + KEYS_BATCH > TOTAL_SOURCE_KEYS ? TOTAL_SOURCE_KEYS - added : KEYS_BATCH; + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: sourceOperatorId, + keysToAdd: batch, + }); + } + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: sourceOperatorId, + limit: TOTAL_SOURCE_KEYS, + }); + + // Deposit source keys in batches + const DEPOSIT_BATCH = 50n; + for (let deposited = 0n; deposited < TOTAL_SOURCE_KEYS; deposited += DEPOSIT_BATCH) { + const batch = deposited + DEPOSIT_BATCH > TOTAL_SOURCE_KEYS ? TOTAL_SOURCE_KEYS - deposited : DEPOSIT_BATCH; + await depositAndReportValidators(ctx, NOR_MODULE_ID, batch); + } + + // ========================================= + // Setup target operator with deposited keys + // ========================================= + targetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "gas_test_target_operator", + rewardAddress: certainAddress("gas:target:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId, + keysToAdd: TOTAL_TARGET_KEYS, + }); + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId, + limit: TOTAL_TARGET_KEYS, + }); + + await depositAndReportValidators(ctx, NOR_MODULE_ID, TOTAL_TARGET_KEYS); + + // ========================================= + // Retrieve pubkeys from NOR + // ========================================= + sourcePubkeysGroups = []; + consolidationIndexGroups = []; + for (let g = 0; g < NUM_GROUPS; g++) { + const group: string[] = []; + const indices: bigint[] = []; + for (let r = 0; r < REQUESTS_PER_GROUP; r++) { + const keyIndex = g * REQUESTS_PER_GROUP + r; + const key = await nor.getSigningKey(sourceOperatorId, keyIndex); + expect(key.used).to.be.true; + group.push(key.key); + indices.push(BigInt(keyIndex)); + } + sourcePubkeysGroups.push(group); + consolidationIndexGroups.push({ sourceKeyIndices: indices, targetKeyIndex: BigInt(g) }); + } + + targetPubkeys = []; + for (let t = 0; t < NUM_GROUPS; t++) { + const key = await nor.getSigningKey(targetOperatorId, t); + expect(key.used).to.be.true; + targetPubkeys.push(key.key); + } + + // ========================================= + // Setup roles and limits + // ========================================= + + // Allow pair in ConsolidationMigrator + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(agentSigner).grantRole(ALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).grantRole(DISALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).allowPair(sourceOperatorId, targetOperatorId, submitter.address); + + // Increase ConsolidationBus batch size to accommodate 315 requests in 5 groups + const MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + await consolidationBus.connect(agentSigner).grantRole(MANAGE_ROLE, agentSigner.address); + await consolidationBus.connect(agentSigner).setBatchSize(TOTAL_REQUESTS); + + // Set rate limit high enough for all requests + const EXIT_LIMIT_MANAGER_ROLE = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await ( + await consolidationGateway.connect(agentSigner).grantRole(EXIT_LIMIT_MANAGER_ROLE, agentSigner.address) + ).wait(); + await ( + await consolidationGateway.connect(agentSigner).setConsolidationRequestLimit(TOTAL_REQUESTS, TOTAL_REQUESTS, 1) + ).wait(); + + // Advance time by 1 second so the rate limit replenishes to maxLimit + await ethers.provider.send("evm_increaseTime", [1]); + await ethers.provider.send("evm_mine", []); + }); + + after(async () => await Snapshot.restore(originalState)); + + it(`should execute batch of ${NUM_GROUPS} x ${REQUESTS_PER_GROUP} (${TOTAL_REQUESTS}) requests within gas limit`, async () => { + // Build merkle tree witnesses for target pubkeys + const merkleTree: LocalMerkleTree = await prepareLocalMerkleTree(); + + const validatorIndices: number[] = []; + const withdrawalCredentials = addressToWC(await ctx.contracts.withdrawalVault.getAddress(), 2); + for (const pubkey of targetPubkeys) { + const { validatorIndex } = await merkleTree.addValidator({ + pubkey, + withdrawalCredentials, + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + validatorIndices.push(validatorIndex); + } + + const { childBlockTimestamp, beaconBlockHeader } = await merkleTree.commitChangesToBeaconRoot(); + + const targetWitnesses = await Promise.all( + targetPubkeys.map(async (pubkey, i) => ({ + proof: await merkleTree.buildProof(validatorIndices[i], beaconBlockHeader), + pubkey, + validatorIndex: validatorIndices[i], + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + })), + ); + + // Submit batch via ConsolidationMigrator → ConsolidationBus + const submitTx = await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, consolidationIndexGroups); + const submitReceipt = await submitTx.wait(); + + // Get fee from real WithdrawalVault + const { withdrawalVault } = ctx.contracts; + const fee = await withdrawalVault.getConsolidationRequestFee(); + const totalFee = fee * BigInt(TOTAL_REQUESTS); + + // Execute batch through full stack - build ConsolidationWitnessGroup array + const consolidationWitnessGroups = sourcePubkeysGroups.map((sourcePubkeys, i) => ({ + sourcePubkeys, + targetWitness: targetWitnesses[i], + })); + + const executeTx = await consolidationBus.connect(executor).executeConsolidation(consolidationWitnessGroups, { + value: totalFee, + }); + const executeReceipt = await executeTx.wait(); + + // Gas assertions + expect(submitReceipt!.gasUsed).to.be.lessThan(MAX_BLOCK_GAS); + expect(executeReceipt!.gasUsed).to.be.lessThan(MAX_BLOCK_GAS); + + // Log gas usage + const submitGas = submitReceipt!.gasUsed; + const execGas = executeReceipt!.gasUsed; + const totalGas = submitGas + execGas; + const perRequest = totalGas / BigInt(TOTAL_REQUESTS); + + console.log(`\n Gas usage for ${NUM_GROUPS} x ${REQUESTS_PER_GROUP} (${TOTAL_REQUESTS}) requests:`); + console.log(` submitConsolidationBatch: ${Number(submitGas).toLocaleString()}`); + console.log(` executeConsolidation: ${Number(execGas).toLocaleString()}`); + console.log(` Total: ${Number(totalGas).toLocaleString()}`); + console.log(` Per request: ${Number(perRequest).toLocaleString()}`); + }); +}); diff --git a/test/integration/consolidation/consolidation-migration.integration.ts b/test/integration/consolidation/consolidation-migration.integration.ts new file mode 100644 index 0000000000..46f77d5e50 --- /dev/null +++ b/test/integration/consolidation/consolidation-migration.integration.ts @@ -0,0 +1,931 @@ +import { expect } from "chai"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { ConsolidationBus, ConsolidationGateway, ConsolidationMigrator, NodeOperatorsRegistry } from "typechain-types"; + +import { addressToWC, certainAddress, findEventsWithInterfaces } from "lib"; +import { LocalMerkleTree, prepareLocalMerkleTree } from "lib/pdg"; +import { getProtocolContext, ProtocolContext } from "lib/protocol"; +import { + depositAndReportValidators, + norSdvtAddNodeOperator, + norSdvtAddOperatorKeys, + norSdvtSetOperatorStakingLimit, +} from "lib/protocol/helpers"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; +import { LoadedContract } from "lib/protocol/types"; + +import { Snapshot } from "test/suite"; + +const fakeWitnessForTarget = (pubkey: string) => ({ + proof: [], + pubkey, + validatorIndex: 0, + childBlockTimestamp: 0, + slot: 0, + proposerIndex: 0, +}); + +/** + * Integration test for the full consolidation migration flow using real NOR modules. + * + * The flow tested: + * 1. ConsolidationMigrator validates source/target keys and submits to ConsolidationBus + * 2. ConsolidationBus stores the batch for later execution + * 3. Executor calls executeConsolidation on ConsolidationBus + * 4. ConsolidationBus forwards to ConsolidationGateway + * 5. ConsolidationGateway forwards to WithdrawalVault + * 6. WithdrawalVault processes EIP-7251 consolidation requests + */ +describe("Integration: Consolidation Migration Flow (Real NOR)", () => { + let ctx: ProtocolContext; + let nor: LoadedContract; + let consolidationGateway: ConsolidationGateway; + let consolidationBus: ConsolidationBus; + let consolidationMigrator: ConsolidationMigrator; + + let executor: HardhatEthersSigner; + let submitter: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + // Operator IDs will be assigned during setup + let sourceOperatorId: bigint; + let targetOperatorId: bigint; + + // Pubkeys will be retrieved from real NOR + let SOURCE_PUBKEY_1: string; + let SOURCE_PUBKEY_2: string; + let TARGET_PUBKEY_1: string; + let TARGET_PUBKEY_2: string; + + let merkleTree: LocalMerkleTree; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let targetWitness1: any; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let targetWitness2: any; + + let globalSnapshot: string; + let testSnapshot: string; + + before(async () => { + ctx = await getProtocolContext(); + [, executor, submitter, stranger] = await ethers.getSigners(); + + // Get real contracts from protocol context + nor = ctx.contracts.nor; + consolidationGateway = ctx.contracts.consolidationGateway; + consolidationBus = ctx.contracts.consolidationBus; + consolidationMigrator = ctx.contracts.consolidationMigrator; + + const agentSigner = await ctx.getSigner("agent"); + + // ========================================= + // Setup source operator with deposited keys + // ========================================= + + // Create source operator + sourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_source_operator", + rewardAddress: certainAddress("consolidation:source:reward"), + }); + + // Add signing keys to source operator + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: sourceOperatorId, + keysToAdd: 5n, + }); + + // Set staking limit to vet the keys + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: sourceOperatorId, + limit: 5n, + }); + + // Deposit validators to make keys "used" + await depositAndReportValidators(ctx, NOR_MODULE_ID, 2n); + + // ========================================= + // Setup target operator with deposited keys (active validators) + // Per EIP-7251, consolidation can only happen TO active validators + // ========================================= + + // Create target operator + targetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_target_operator", + rewardAddress: certainAddress("consolidation:target:reward"), + }); + + // Add signing keys to target operator + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId, + keysToAdd: 5n, + }); + + // Set staking limit to vet the keys + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId, + limit: 5n, + }); + + // Deposit validators to make target keys "used" (active validators) + await depositAndReportValidators(ctx, NOR_MODULE_ID, 2n); + + // ========================================= + // Retrieve pubkeys from real NOR + // ========================================= + + // Get source pubkeys (these are deposited/used) + const sourceKey1 = await nor.getSigningKey(sourceOperatorId, 0); + const sourceKey2 = await nor.getSigningKey(sourceOperatorId, 1); + SOURCE_PUBKEY_1 = sourceKey1.key; + SOURCE_PUBKEY_2 = sourceKey2.key; + + // Verify source keys are used (deposited) + expect(sourceKey1.used).to.be.true; + expect(sourceKey2.used).to.be.true; + + // Get target pubkeys (these are deposited - active validators) + const targetKey1 = await nor.getSigningKey(targetOperatorId, 0); + const targetKey2 = await nor.getSigningKey(targetOperatorId, 1); + TARGET_PUBKEY_1 = targetKey1.key; + TARGET_PUBKEY_2 = targetKey2.key; + + // Verify target keys ARE used (deposited - active validators) + expect(targetKey1.used).to.be.true; + expect(targetKey2.used).to.be.true; + + // ========================================= + // Setup CL proof merkle tree for target witnesses + // ========================================= + merkleTree = await prepareLocalMerkleTree(); + + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + const withdrawalVaultAddress = await ctx.contracts.withdrawalVault.getAddress(); + const withdrawalCredentials = addressToWC(withdrawalVaultAddress, 2); + const makeValidatorContainer = (pubkey: string) => ({ + pubkey, + withdrawalCredentials, + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + + const { validatorIndex: vi1 } = await merkleTree.addValidator(makeValidatorContainer(TARGET_PUBKEY_1)); + const { validatorIndex: vi2 } = await merkleTree.addValidator(makeValidatorContainer(TARGET_PUBKEY_2)); + const { childBlockTimestamp, beaconBlockHeader } = await merkleTree.commitChangesToBeaconRoot(); + + const buildWitness = async (pubkey: string, validatorIndex: number) => ({ + proof: await merkleTree.buildProof(validatorIndex, beaconBlockHeader), + pubkey, + validatorIndex, + childBlockTimestamp, + slot: beaconBlockHeader.slot, + proposerIndex: beaconBlockHeader.proposerIndex, + }); + + targetWitness1 = await buildWitness(TARGET_PUBKEY_1, vi1); + targetWitness2 = await buildWitness(TARGET_PUBKEY_2, vi2); + + // ========================================= + // Setup roles + // ========================================= + + // Grant MANAGE_ROLE on ConsolidationBus to agent (for batch management tests) + const MANAGE_ROLE = await consolidationBus.MANAGE_ROLE(); + const REMOVE_ROLE = await consolidationBus.REMOVE_ROLE(); + await consolidationBus.connect(agentSigner).grantRole(MANAGE_ROLE, agentSigner.address); + await consolidationBus.connect(agentSigner).grantRole(REMOVE_ROLE, agentSigner.address); + + // Grant ALLOW_PAIR_ROLE and DISALLOW_PAIR_ROLE on ConsolidationMigrator to agent + const ALLOW_PAIR_ROLE = await consolidationMigrator.ALLOW_PAIR_ROLE(); + const DISALLOW_PAIR_ROLE = await consolidationMigrator.DISALLOW_PAIR_ROLE(); + await consolidationMigrator.connect(agentSigner).grantRole(ALLOW_PAIR_ROLE, agentSigner.address); + await consolidationMigrator.connect(agentSigner).grantRole(DISALLOW_PAIR_ROLE, agentSigner.address); + + // Allow the consolidation pair with submitter + await consolidationMigrator.connect(agentSigner).allowPair(sourceOperatorId, targetOperatorId, submitter.address); + + globalSnapshot = await Snapshot.take(); + }); + + after(async () => await Snapshot.restore(globalSnapshot)); + + beforeEach(async () => { + testSnapshot = await Snapshot.take(); + }); + + afterEach(async () => await Snapshot.restore(testSnapshot)); + + context("Full consolidation flow with real NOR", () => { + it("Should successfully complete the full consolidation flow with single validator", async () => { + const { withdrawalVault } = ctx.contracts; + + // Single validator consolidation + const groups = [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]; + + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, groups); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + const tx = await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + const receipt = await tx.wait(); + const consolidationEvents = findEventsWithInterfaces(receipt!, "ConsolidationRequestAdded", [ + withdrawalVault.interface, + ]); + expect(consolidationEvents?.length).to.equal(1); + }); + + it("Should successfully complete the full consolidation flow with multiple validators", async () => { + const { withdrawalVault } = ctx.contracts; + + // Step 1: Operator submits consolidation batch via ConsolidationMigrator + const groups = [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]; + + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, groups), + ) + .to.emit(consolidationMigrator, "ConsolidationSubmitted") + .withArgs( + sourceOperatorId, + targetOperatorId, + groups.map((g) => [g.sourceKeyIndices, g.targetKeyIndex]), + ); + + // Step 2: Verify batch is stored in ConsolidationBus + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [ + [ + { sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }, + { sourcePubkeys: [SOURCE_PUBKEY_2], targetPubkey: TARGET_PUBKEY_2 }, + ], + ], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal( + await consolidationMigrator.getAddress(), + ); + + // Step 3: Executor calls executeConsolidation + const fee = await withdrawalVault.getConsolidationRequestFee(); + const totalFee = fee * BigInt(groups.length); + + const initialLimit = (await consolidationGateway.getConsolidationRequestLimitFullInfo()) + .currentConsolidationRequestsLimit; + + const tx = await consolidationBus.connect(executor).executeConsolidation( + [ + { sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }, + { sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }, + ], + { + value: totalFee, + }, + ); + + // Step 4: Verify batch is removed from storage after execution + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + + // Step 5: Verify ConsolidationGateway rate limit was consumed + const finalLimit = (await consolidationGateway.getConsolidationRequestLimitFullInfo()) + .currentConsolidationRequestsLimit; + expect(finalLimit).to.equal(initialLimit - BigInt(groups.length)); + + // Step 6: Verify consolidation requests reached WithdrawalVault + const receipt = await tx.wait(); + expect(receipt).not.to.be.null; + + const consolidationEvents = findEventsWithInterfaces(receipt!, "ConsolidationRequestAdded", [ + withdrawalVault.interface, + ]); + expect(consolidationEvents?.length).to.equal(groups.length); + }); + + it("Should revert submitConsolidationBatch if caller is not the designated submitter", async () => { + await expect( + consolidationMigrator + .connect(stranger) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(stranger.address, sourceOperatorId, targetOperatorId); + }); + + it("Should revert submitConsolidationBatch if pair is not allowed (no submitter set)", async () => { + const unknownTargetOpId = 999n; + + // When pair is not allowed, there's no submitter set (address(0)) + // So caller will fail authorization check first + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, unknownTargetOpId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOperatorId, unknownTargetOpId); + }); + + it("Should revert executeConsolidation if batch not found", async () => { + const fakePubkey = "0x" + "ff".repeat(48); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [fakePubkey], targetWitness: fakeWitnessForTarget(fakePubkey) }], { + value: 1n, + }), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + + it("Should revert executeConsolidation if insufficient fee", async () => { + // Submit batch first + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Try to execute with insufficient fee (0) + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: 0n, + }, + ), + ).to.be.reverted; // The actual error comes from WithdrawalVault + }); + + it("Should revert executeConsolidation if batch already executed", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first time + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Try to execute again + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + }); + + context("Batch management", () => { + it("Should allow manager to remove a pending batch", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.not.equal(ethers.ZeroAddress); + + // Manager removes the batch + await consolidationBus.connect(agentSigner).removeBatches([batchHash]); + + expect((await consolidationBus.getBatchInfo(batchHash)).publisher).to.equal(ethers.ZeroAddress); + }); + }); + + context("Allowlist management", () => { + it("Should allow disallowing a pair after submission", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Submit a batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Disallow the pair + await consolidationMigrator.connect(agentSigner).disallowPair(sourceOperatorId, targetOperatorId); + + // Verify new submissions are blocked (submitter is cleared, so NotAuthorized is thrown) + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "NotAuthorized") + .withArgs(submitter.address, sourceOperatorId, targetOperatorId); + + // But existing batch can still be executed + const { withdrawalVault } = ctx.contracts; + const fee = await withdrawalVault.getConsolidationRequestFee(); + + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }), + ).to.not.be.reverted; + }); + + it("Should allow one source operator to consolidate to multiple targets", async () => { + const { withdrawalVault } = ctx.contracts; + const agentSigner = await ctx.getSigner("agent"); + + // Set up a second target operator with deposited validators + const targetOperatorId2 = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_target_operator_2", + rewardAddress: certainAddress("consolidation:target2:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: targetOperatorId2, + keysToAdd: 2n, + }); + + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: targetOperatorId2, + limit: 2n, + }); + + // Deposit validators to make target2 keys active + await depositAndReportValidators(ctx, NOR_MODULE_ID, 1n); + + const targetKey3 = await nor.getSigningKey(targetOperatorId2, 0); + const TARGET_PUBKEY_3 = targetKey3.key; + + // Build valid CL proof witness for TARGET_PUBKEY_3 + const FAR_FUTURE_EPOCH = 2n ** 64n - 1n; + const { validatorIndex: vi3 } = await merkleTree.addValidator({ + pubkey: TARGET_PUBKEY_3, + withdrawalCredentials: addressToWC(await ctx.contracts.withdrawalVault.getAddress(), 2), + effectiveBalance: 32_000_000_000n, + slashed: false, + activationEligibilityEpoch: 0, + activationEpoch: 0, + exitEpoch: FAR_FUTURE_EPOCH, + withdrawableEpoch: FAR_FUTURE_EPOCH, + }); + const { childBlockTimestamp: cbt3, beaconBlockHeader: bbh3 } = await merkleTree.commitChangesToBeaconRoot(); + const targetWitness3 = { + proof: await merkleTree.buildProof(vi3, bbh3), + pubkey: TARGET_PUBKEY_3, + validatorIndex: vi3, + childBlockTimestamp: cbt3, + slot: bbh3.slot, + proposerIndex: bbh3.proposerIndex, + }; + + // Allow second pair with the same submitter + await consolidationMigrator + .connect(agentSigner) + .allowPair(sourceOperatorId, targetOperatorId2, submitter.address); + + // Submit batch to first target + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Submit batch to second target + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId2, [ + { sourceKeyIndices: [1n], targetKeyIndex: 0n }, + ]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute both batches + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness3 }], { + value: fee, + }); + }); + }); + + context("Key validation with real NOR", () => { + it("Should revert submitConsolidationBatch if source key is NOT used (not deposited)", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Create a new source operator with keys that are NOT deposited + const unusedSourceOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_unused_source", + rewardAddress: certainAddress("consolidation:unused:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: unusedSourceOperatorId, + keysToAdd: 2n, + }); + + // Set staking limit but DO NOT deposit - keys remain unused + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: unusedSourceOperatorId, + limit: 2n, + }); + + // Allow the pair + await consolidationMigrator + .connect(agentSigner) + .allowPair(unusedSourceOperatorId, targetOperatorId, submitter.address); + + // Try to consolidate from unused key - should fail + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(unusedSourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(NOR_MODULE_ID, unusedSourceOperatorId, 0n); + }); + + it("Should revert submitConsolidationBatch if target key is NOT deposited (not active validator)", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Create a new target operator with keys that are NOT deposited + const undepositedTargetOperatorId = await norSdvtAddNodeOperator(ctx, nor, { + name: "consolidation_undeposited_target", + rewardAddress: certainAddress("consolidation:undeposited:reward"), + }); + + await norSdvtAddOperatorKeys(ctx, nor, { + operatorId: undepositedTargetOperatorId, + keysToAdd: 2n, + }); + + // Set staking limit but DO NOT deposit - keys remain undeposited (not active) + await norSdvtSetOperatorStakingLimit(ctx, nor, { + operatorId: undepositedTargetOperatorId, + limit: 2n, + }); + + // Verify target keys are NOT used (not deposited) + const targetKey = await nor.getSigningKey(undepositedTargetOperatorId, 0); + expect(targetKey.used).to.be.false; + + // Allow the pair + await consolidationMigrator + .connect(agentSigner) + .allowPair(sourceOperatorId, undepositedTargetOperatorId, submitter.address); + + // Try to consolidate to undeposited target key - should fail + // Per EIP-7251, consolidation can only happen TO active (deposited) validators + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, undepositedTargetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationMigrator, "KeyNotDeposited") + .withArgs(NOR_MODULE_ID, undepositedTargetOperatorId, 0n); + }); + }); + + context("ConsolidationGateway integration", () => { + it("Should revert executeConsolidation when ConsolidationGateway is paused", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch first + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Grant PAUSE_ROLE to agent and pause the gateway + const agentSigner = await ctx.getSigner("agent"); + const PAUSE_ROLE = await consolidationGateway.PAUSE_ROLE(); + await consolidationGateway.connect(agentSigner).grantRole(PAUSE_ROLE, agentSigner.address); + await consolidationGateway.connect(agentSigner).pauseFor(3600); // 1 hour + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Try to execute - should revert because gateway is paused + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationGateway, "ResumedExpected"); + }); + + it("Should revert executeConsolidation when rate limit is exhausted", async () => { + const { withdrawalVault } = ctx.contracts; + + // Grant EXIT_LIMIT_MANAGER_ROLE to agent and set a small limit + const agentSigner = await ctx.getSigner("agent"); + const EXIT_LIMIT_MANAGER_ROLE = await consolidationGateway.EXIT_LIMIT_MANAGER_ROLE(); + await consolidationGateway.connect(agentSigner).grantRole(EXIT_LIMIT_MANAGER_ROLE, agentSigner.address); + await consolidationGateway.connect(agentSigner).setConsolidationRequestLimit(1, 1, 86400); + + // Submit first batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first batch - this should consume the limit + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Submit second batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [1n], targetKeyIndex: 1n }]); + + // Execute second batch - should fail due to rate limit + await expect( + consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }], { + value: fee, + }), + ).to.be.revertedWithCustomError(consolidationGateway, "ConsolidationRequestsLimitExceeded"); + }); + + it("Should refund excess ETH to executor", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + const excessFee = fee * 10n; // Send 10x the required fee + + const executorBalanceBefore = await ethers.provider.getBalance(executor.address); + + const tx = await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: excessFee, + }); + + const receipt = await tx.wait(); + const gasUsed = receipt!.gasUsed * receipt!.gasPrice; + + const executorBalanceAfter = await ethers.provider.getBalance(executor.address); + + // Executor should only pay fee + gas, not excessFee + // Balance after = Balance before - fee - gas + const expectedBalance = executorBalanceBefore - fee - gasUsed; + expect(executorBalanceAfter).to.equal(expectedBalance); + }); + }); + + context("Batch management extended", () => { + it("Should execute multiple batches sequentially", async () => { + const { withdrawalVault } = ctx.contracts; + + // Submit first batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + // Submit second batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [1n], targetKeyIndex: 1n }]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Execute first batch + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: targetWitness1 }], { + value: fee, + }); + + // Verify first batch is executed + const batchHash1 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash1)).publisher).to.equal(ethers.ZeroAddress); + + // Execute second batch + await consolidationBus + .connect(executor) + .executeConsolidation([{ sourcePubkeys: [SOURCE_PUBKEY_2], targetWitness: targetWitness2 }], { + value: fee, + }); + + // Verify second batch is executed + const batchHash2 = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_2], targetPubkey: TARGET_PUBKEY_2 }]], + ), + ); + expect((await consolidationBus.getBatchInfo(batchHash2)).publisher).to.equal(ethers.ZeroAddress); + }); + + it("Should revert executeConsolidation if batch was removed", async () => { + const { withdrawalVault } = ctx.contracts; + const agentSigner = await ctx.getSigner("agent"); + + // Submit batch + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + // Remove batch + await consolidationBus.connect(agentSigner).removeBatches([batchHash]); + + const fee = await withdrawalVault.getConsolidationRequestFee(); + + // Try to execute removed batch + await expect( + consolidationBus + .connect(executor) + .executeConsolidation( + [{ sourcePubkeys: [SOURCE_PUBKEY_1], targetWitness: fakeWitnessForTarget(TARGET_PUBKEY_1) }], + { + value: fee, + }, + ), + ).to.be.revertedWithCustomError(consolidationBus, "BatchNotFound"); + }); + + it("Should revert addConsolidationRequests if too many groups", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Set maxGroupsInBatch to 1 + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + + // Try to submit batch with 2 groups (exceeds maxGroupsInBatch of 1) + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(2, 1); + }); + + it("Should revert addConsolidationRequests if batch size exceeds limit", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Set batchSize to 1 (single group with 2 sources will exceed it) + // Must reduce maxGroupsInBatch first, since batchSize must be >= maxGroupsInBatch + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + await consolidationBus.connect(agentSigner).setBatchSize(1); + + // Try to submit 1 group with 2 source keys (total count 2 exceeds batchSize of 1) + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n, 1n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(2, 1); + }); + + it("Should revert addConsolidationRequests if batch already pending (duplicate submission)", async () => { + // Submit batch first time + await consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [{ sourceKeyIndices: [0n], targetKeyIndex: 0n }]); + + const batchHash = ethers.keccak256( + ethers.AbiCoder.defaultAbiCoder().encode( + ["tuple(bytes[] sourcePubkeys, bytes targetPubkey)[]"], + [[{ sourcePubkeys: [SOURCE_PUBKEY_1], targetPubkey: TARGET_PUBKEY_1 }]], + ), + ); + + // Try to submit the same batch again + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchAlreadyPending") + .withArgs(batchHash); + }); + }); + + context("Input validation", () => { + it("Should revert submitConsolidationBatch with EmptyBatch if groups array is empty", async () => { + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, []), + ).to.be.revertedWithCustomError(consolidationBus, "EmptyBatch"); + }); + + it("Should revert submitConsolidationBatch with EmptyGroup if a source group is empty", async () => { + // Second group has empty sourceKeyIndices — ConsolidationBus catches this after migrator passes it through + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "EmptyGroup") + .withArgs(1); + }); + + it("Should revert submitConsolidationBatch with TooManyGroups if groups exceed maxGroupsInBatch", async () => { + const agentSigner = await ctx.getSigner("agent"); + + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + + await expect( + consolidationMigrator.connect(submitter).submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n], targetKeyIndex: 0n }, + { sourceKeyIndices: [1n], targetKeyIndex: 1n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "TooManyGroups") + .withArgs(2, 1); + }); + + it("Should revert submitConsolidationBatch with BatchTooLarge if total keys exceed batchSize", async () => { + const agentSigner = await ctx.getSigner("agent"); + + // Reduce limits so a single group with 2 source keys exceeds the batch size + await consolidationBus.connect(agentSigner).setMaxGroupsInBatch(1); + await consolidationBus.connect(agentSigner).setBatchSize(1); + + await expect( + consolidationMigrator + .connect(submitter) + .submitConsolidationBatch(sourceOperatorId, targetOperatorId, [ + { sourceKeyIndices: [0n, 1n], targetKeyIndex: 0n }, + ]), + ) + .to.be.revertedWithCustomError(consolidationBus, "BatchTooLarge") + .withArgs(2, 1); + }); + }); +}); diff --git a/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts b/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts index f550e3d31b..ebc0b32ac8 100644 --- a/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts +++ b/test/integration/core/accounting-oracle-extra-data-full-items.integration.ts @@ -18,7 +18,7 @@ import { RewardDistributionState, setAnnualBalanceIncreaseLimit, } from "lib"; -import { getProtocolContext, ProtocolContext, withCSM } from "lib/protocol"; +import { getProtocolContext, ProtocolContext, seedProtocolPendingBaseline, withCSM } from "lib/protocol"; import { reportWithoutExtraData } from "lib/protocol/helpers/accounting"; import { norSdvtEnsureOperators } from "lib/protocol/helpers/nor-sdvt"; import { removeStakingLimit, setModuleStakeShareLimit } from "lib/protocol/helpers/staking"; @@ -28,6 +28,7 @@ import { MAX_BASIS_POINTS, Snapshot } from "test/suite"; const MIN_KEYS_PER_OPERATOR = 5n; const MIN_OPERATORS_COUNT = 30n; +const MAIN_REPORT_EFFECTIVE_CL_REWARD = ether("1"); class ListKeyMapHelper { private map: Map = new Map(); @@ -237,13 +238,29 @@ describe("Integration: AccountingOracle extra data full items", () => { ); } + // This suite also relies on the reward-bearing main report to enter + // TransferredToModule before extra-data finalization. Snapshot protocol + // pending first so the original reward-bearing path remains reachable. + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); + + // Keep the original 1 ETH reward-bearing main report, but give the pending-backed + // safety cap enough elapsed time after snapshotting the pending baseline. + await advanceChainTime(15n * 24n * 60n * 60n); + const { submitter, extraDataChunks } = await reportWithoutExtraData( ctx, numExitedValidatorsByStakingModule, modulesWithExited, extraData, + { + // Snapshot protocol pending into the previous report first, then run the original + // reward-bearing main report so this suite still exercises + // TransferredToModule -> ReadyForDistribution -> Distributed. + effectiveClDiff: MAIN_REPORT_EFFECTIVE_CL_REWARD, + }, ); + // Make the main-report transition explicit before extra data finalization moves modules to ReadyForDistribution. await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); for (let i = 0; i < extraDataChunks.length; i++) { diff --git a/test/integration/core/accounting-oracle-extra-data.integration.ts b/test/integration/core/accounting-oracle-extra-data.integration.ts index 23b1c721b4..fccf9f8b02 100644 --- a/test/integration/core/accounting-oracle-extra-data.integration.ts +++ b/test/integration/core/accounting-oracle-extra-data.integration.ts @@ -6,7 +6,13 @@ import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; import { advanceChainTime, ether, findEventsWithInterfaces, hexToBytes, RewardDistributionState } from "lib"; import { EXTRA_DATA_FORMAT_LIST, KeyType, prepareExtraData, setAnnualBalanceIncreaseLimit } from "lib/oracle"; -import { getProtocolContext, OracleReportParams, ProtocolContext, report } from "lib/protocol"; +import { + getProtocolContext, + OracleReportParams, + ProtocolContext, + report, + seedProtocolPendingBaseline, +} from "lib/protocol"; import { reportWithoutExtraData, waitNextAvailableReportTime } from "lib/protocol/helpers/accounting"; import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; @@ -14,6 +20,7 @@ import { MAX_BASIS_POINTS, Snapshot } from "test/suite"; const MODULE_ID = NOR_MODULE_ID; const NUM_NEWLY_EXITED_VALIDATORS = 1n; +const MAIN_REPORT_EFFECTIVE_CL_REWARD = ether("1"); const MAINNET_NOR_ADDRESS = "0x55032650b14df07b85bf18a3a3ec8e0af2e028d5".toLowerCase(); describe("Integration: AccountingOracle extra data", () => { @@ -112,7 +119,21 @@ describe("Integration: AccountingOracle extra data", () => { // Add total exited validators for both entries const totalNewExited = NUM_NEWLY_EXITED_VALIDATORS + 1n; // First operator has 1, second has 1 - return await reportWithoutExtraData(ctx, [totalExitedValidators + totalNewExited], [NOR_MODULE_ID], extraData); + // The main report in this suite must stay reward-bearing because it drives the + // TransferredToModule -> ReadyForDistribution state machine. Snapshot protocol + // pending first so the original 1 ETH main report still reaches that phase path. + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); + + // Keep the original 1 ETH reward-bearing main report, but give the pending-backed + // safety cap enough elapsed time after snapshotting the pending baseline. + await advanceChainTime(15n * 24n * 60n * 60n); + + return await reportWithoutExtraData(ctx, [totalExitedValidators + totalNewExited], [NOR_MODULE_ID], extraData, { + // Snapshot protocol pending into the previous report first, then run the original + // reward-bearing main report so this suite still exercises + // TransferredToModule -> ReadyForDistribution. + effectiveClDiff: MAIN_REPORT_EFFECTIVE_CL_REWARD, + }); } it("should accept report with multiple keys per node operator (single chunk)", async () => { @@ -170,6 +191,8 @@ describe("Integration: AccountingOracle extra data", () => { const { accountingOracle } = ctx.contracts; const { submitter, extraDataChunks } = await submitMainReport(); + // Make the main-report transition explicit before extra data starts changing module state further. + await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); // Submit first chunk of extra data await accountingOracle.connect(submitter).submitReportExtraDataList(hexToBytes(extraDataChunks[0])); @@ -196,6 +219,8 @@ describe("Integration: AccountingOracle extra data", () => { const { accountingOracle } = ctx.contracts; const { submitter, extraDataChunks } = await submitMainReport(); + // Make the main-report transition explicit before extra data starts changing module state further. + await assertModulesRewardDistributionState(RewardDistributionState.TransferredToModule); // Submit first chunk of extra data await accountingOracle.connect(submitter).submitReportExtraDataList(hexToBytes(extraDataChunks[0])); diff --git a/test/integration/core/accounting-oracle-module-balances.integration.ts b/test/integration/core/accounting-oracle-module-balances.integration.ts new file mode 100644 index 0000000000..5d25697a84 --- /dev/null +++ b/test/integration/core/accounting-oracle-module-balances.integration.ts @@ -0,0 +1,339 @@ +import { expect } from "chai"; +import { getBigInt } from "ethers"; + +import { ether, ONE_GWEI } from "lib"; +import { + depositValidatorsWithoutReport, + getCurrentModuleAccountingReportParams, + getNextReportContext, + getProtocolContext, + getStakingModuleBalances, + ProtocolContext, + report, + submitReportDataWithConsensus, + submitReportDataWithConsensusAndEmptyExtraData, + updateOracleReportLimits, +} from "lib/protocol"; +import { NOR_MODULE_ID, SDVT_MODULE_ID } from "lib/protocol/helpers/staking-module"; + +import { Snapshot } from "test/suite"; + +const ONE_DAY = 24n * 60n * 60n; +const ONE_VALIDATOR_BALANCE_ETH = 32n; +const ONE_VALIDATOR_BALANCE = ether("32"); +const ONE_ETH = ether("1"); +const MAX_BASIS_POINTS = 10_000n; +const SECONDS_PER_YEAR = 365n * ONE_DAY; +const sumBigints = (values: bigint[]) => values.reduce((sum, value) => sum + value, 0n); + +describe("Integration: AccountingOracle module balances sanity", () => { + let ctx: ProtocolContext; + + let snapshot: string; + let originalState: string; + + before(async () => { + ctx = await getProtocolContext(); + snapshot = await Snapshot.take(); + + await submitModuleBalancesSanityBaseline(); + }); + + beforeEach(async () => (originalState = await Snapshot.take())); + + afterEach(async () => await Snapshot.restore(originalState)); + + after(async () => await Snapshot.restore(snapshot)); + + const getCurrentModuleReportState = async () => { + const { stakingModuleIdsWithUpdatedBalance, validatorBalancesGweiByStakingModule } = + await getCurrentModuleAccountingReportParams(ctx); + const moduleIndexById = new Map( + stakingModuleIdsWithUpdatedBalance.map((moduleId, index) => [moduleId, index] as const), + ); + + return { stakingModuleIdsWithUpdatedBalance, validatorBalancesGweiByStakingModule, moduleIndexById }; + }; + + const withUpdatedModuleBalances = ( + currentValidatorBalancesGweiByStakingModule: bigint[], + moduleIndexById: Map, + overrides: Array<[bigint, bigint]>, + ) => { + const updatedValidatorBalancesGweiByStakingModule = [...currentValidatorBalancesGweiByStakingModule]; + + for (const [moduleId, updatedValidatorsBalanceGwei] of overrides) { + const index = moduleIndexById.get(moduleId); + if (index === undefined) { + throw new Error(`Missing staking module ${moduleId} in router order`); + } + + updatedValidatorBalancesGweiByStakingModule[index] = updatedValidatorsBalanceGwei; + } + + return updatedValidatorBalancesGweiByStakingModule; + }; + + const buildReportData = async ({ + clDiff, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + clPendingBalanceGwei, + }: { + clDiff: bigint; + stakingModuleIdsWithUpdatedBalance: bigint[]; + validatorBalancesGweiByStakingModule: bigint[]; + clPendingBalanceGwei: bigint; + }) => { + const { data } = await report(ctx, { + clDiff, + clPendingBalanceGwei, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + waitNextReportTime: true, + }); + + const totalClBalanceGwei = getBigInt(data.clValidatorsBalanceGwei) + getBigInt(data.clPendingBalanceGwei); + return { + ...data, + clValidatorsBalanceGwei: totalClBalanceGwei - clPendingBalanceGwei, + clPendingBalanceGwei, + }; + }; + + const submitModuleBalancesSanityBaseline = async () => { + const { data } = await report(ctx, { + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + }); + + await submitReportDataWithConsensusAndEmptyExtraData(ctx, data); + }; + + it("should accept a report that moves one module's pending balance into validators", async () => { + const { lido } = ctx.contracts; + + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norBefore = await getStakingModuleBalances(ctx, NOR_MODULE_ID); + const norPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + const totalPendingBalanceBeforeGwei = norPendingBalanceBeforeGwei; + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + + const pendingConsumedGwei = norPendingBalanceBeforeGwei; + expect(pendingConsumedGwei).to.be.gt(0n); + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [[NOR_MODULE_ID, norBefore.validatorsBalanceGwei + pendingConsumedGwei]], + ); + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + const pendingBalanceAfterGwei = 0n; + + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + pendingConsumedGwei); + expect(pendingBalanceAfterGwei).to.equal(totalPendingBalanceBeforeGwei - pendingConsumedGwei); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)).to.not.be.reverted; + }); + + it("should reject a report whose module validators balances do not add up to the reported CL validators total", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: moduleReportState.validatorBalancesGweiByStakingModule, + clPendingBalanceGwei: norPendingBalanceBeforeGwei, + }); + const inconsistentData = { + ...data, + clValidatorsBalanceGwei: getBigInt(data.clValidatorsBalanceGwei) + 1n, + }; + + await expect(submitReportDataWithConsensus(ctx, inconsistentData)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "InconsistentValidatorsBalanceByModule", + ); + }); + + it("should reject a report that consumes more pending across modules than the global appeared limit allows", async () => { + const { oracleReportSanityChecker } = ctx.contracts; + const { reportTimeElapsed } = await getNextReportContext(ctx); + const perModuleAppearedLimitEthPerDay = + (ONE_VALIDATOR_BALANCE_ETH * ONE_DAY + reportTimeElapsed - 1n) / reportTimeElapsed; + + await updateOracleReportLimits(ctx, { + appearedEthAmountPerDayLimit: perModuleAppearedLimitEthPerDay, + consolidationEthAmountPerDayLimit: 0n, + }); + + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, 1n); + await depositValidatorsWithoutReport(ctx, SDVT_MODULE_ID, 1n); + + const balanceStatsBeforeReport = await ctx.contracts.lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norBefore = await getStakingModuleBalances(ctx, NOR_MODULE_ID); + const sdvtBefore = await getStakingModuleBalances(ctx, SDVT_MODULE_ID); + const pendingConsumedPerModuleGwei = ONE_VALIDATOR_BALANCE / ONE_GWEI; + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [ + [NOR_MODULE_ID, norBefore.validatorsBalanceGwei + pendingConsumedPerModuleGwei], + [SDVT_MODULE_ID, sdvtBefore.validatorsBalanceGwei + pendingConsumedPerModuleGwei], + ], + ), + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectTotalActivatedBalance", + ); + }); + + it("should reject a report when positive module validators growth exceeds the module increase limit", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + const moduleGrowthExcessGwei = ONE_ETH / ONE_GWEI; + + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, 1n); + await depositValidatorsWithoutReport(ctx, SDVT_MODULE_ID, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const { reportTimeElapsed } = await getNextReportContext(ctx); + const totalPendingBalanceBeforeWei = balanceStatsBeforeReport.depositedSinceLastReport; + const totalPendingBalanceBeforeGwei = totalPendingBalanceBeforeWei / ONE_GWEI; + const appearedLimitEthPerDay = + ((totalPendingBalanceBeforeWei / ONE_ETH) * ONE_DAY + reportTimeElapsed - 1n) / reportTimeElapsed; + + await updateOracleReportLimits(ctx, { + annualBalanceIncreaseBPLimit: 0n, + appearedEthAmountPerDayLimit: appearedLimitEthPerDay, + consolidationEthAmountPerDayLimit: 0n, + }); + + const moduleReportState = await getCurrentModuleReportState(); + const norBefore = await getStakingModuleBalances(ctx, NOR_MODULE_ID); + const sdvtBefore = await getStakingModuleBalances(ctx, SDVT_MODULE_ID); + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + + expect(totalPendingBalanceBeforeWei).to.equal(2n * ONE_VALIDATOR_BALANCE); + expect(sdvtBefore.validatorsBalanceGwei).to.be.gt( + moduleGrowthExcessGwei, + "test precondition failed: SDVT must have enough previous balance to offset the crafted excess", + ); + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [ + [NOR_MODULE_ID, norBefore.validatorsBalanceGwei + totalPendingBalanceBeforeGwei + moduleGrowthExcessGwei], + [SDVT_MODULE_ID, sdvtBefore.validatorsBalanceGwei - moduleGrowthExcessGwei], + ], + ); + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + totalPendingBalanceBeforeGwei); + + const data = await buildReportData({ + clDiff: totalPendingBalanceBeforeWei, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: 0n, + }); + + await expect(submitReportDataWithConsensus(ctx, data)) + .to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalModuleValidatorsBalanceIncrease") + .withArgs(totalPendingBalanceBeforeWei, totalPendingBalanceBeforeWei + ONE_ETH); + }); + + it("should reject a report that grows module validators without consuming matching pending balance", async () => { + const { lido, oracleReportSanityChecker } = ctx.contracts; + + await updateOracleReportLimits(ctx, { annualBalanceIncreaseBPLimit: 1n }); + + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, 1n); + + const balanceStatsBeforeReport = await lido.getBalanceStats(); + const moduleReportState = await getCurrentModuleReportState(); + const norBefore = await getStakingModuleBalances(ctx, NOR_MODULE_ID); + const totalPendingBalanceBeforeGwei = balanceStatsBeforeReport.depositedSinceLastReport / ONE_GWEI; + const totalValidatorsBalanceBeforeGwei = sumBigints(moduleReportState.validatorBalancesGweiByStakingModule); + + expect(balanceStatsBeforeReport.clValidatorsBalanceAtLastReport).to.be.gt(0n); + expect(balanceStatsBeforeReport.depositedSinceLastReport).to.equal(ONE_VALIDATOR_BALANCE); + expect(totalPendingBalanceBeforeGwei).to.be.gt(0n); + + const { reportTimeElapsed } = await getNextReportContext(ctx); + const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); + const allowedValidatorsGrowthGwei = + (totalValidatorsBalanceBeforeGwei * annualBalanceIncreaseBPLimit * reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + const excessiveValidatorsGrowthGwei = allowedValidatorsGrowthGwei + 1n; + const excessiveValidatorsGrowthWei = excessiveValidatorsGrowthGwei * ONE_GWEI; + + const reportedValidatorsBalancesGwei = withUpdatedModuleBalances( + moduleReportState.validatorBalancesGweiByStakingModule, + moduleReportState.moduleIndexById, + [[NOR_MODULE_ID, norBefore.validatorsBalanceGwei + excessiveValidatorsGrowthGwei]], + ); + const validatorsBalanceAfterGwei = sumBigints(reportedValidatorsBalancesGwei); + const pendingBalanceAfterGwei = totalPendingBalanceBeforeGwei; + + expect(pendingBalanceAfterGwei).to.equal(totalPendingBalanceBeforeGwei); + expect(validatorsBalanceAfterGwei).to.equal(totalValidatorsBalanceBeforeGwei + excessiveValidatorsGrowthGwei); + + const data = await buildReportData({ + clDiff: balanceStatsBeforeReport.depositedSinceLastReport + excessiveValidatorsGrowthWei, + stakingModuleIdsWithUpdatedBalance: moduleReportState.stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule: reportedValidatorsBalancesGwei, + clPendingBalanceGwei: totalPendingBalanceBeforeGwei, + }); + + const totalCLBalanceBeforeWei = + balanceStatsBeforeReport.clValidatorsBalanceAtLastReport + + balanceStatsBeforeReport.clPendingBalanceAtLastReport + + balanceStatsBeforeReport.depositedSinceLastReport; + const totalCLGrowthCapWei = + (totalCLBalanceBeforeWei * annualBalanceIncreaseBPLimit * reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + + expect(totalCLGrowthCapWei).to.be.gte( + excessiveValidatorsGrowthWei, + "test precondition failed: total CL annual cap must stay above the crafted validator-only growth", + ); + + await expect(submitReportDataWithConsensus(ctx, data)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectTotalCLBalanceIncrease", + ); + }); +}); diff --git a/test/integration/core/accounting.integration.ts b/test/integration/core/accounting.integration.ts index 1e54cb230c..5d28178a6c 100644 --- a/test/integration/core/accounting.integration.ts +++ b/test/integration/core/accounting.integration.ts @@ -4,9 +4,17 @@ import { ethers } from "hardhat"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether, impersonate, ONE_GWEI, updateBalance } from "lib"; +import { advanceChainTime, ether, impersonate, ONE_GWEI, updateBalance } from "lib"; import { LIMITER_PRECISION_BASE } from "lib/constants"; -import { getProtocolContext, getReportTimeElapsed, ProtocolContext, removeStakingLimit, report } from "lib/protocol"; +import { + getProtocolContext, + getReportTimeElapsed, + ProtocolContext, + removeStakingLimit, + report, + seedProtocolPendingBaseline, +} from "lib/protocol"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; import { MAX_BASIS_POINTS, ONE_DAY, SHARE_RATE_PRECISION } from "test/suite/constants"; @@ -98,7 +106,7 @@ describe("Integration: Accounting", () => { } async function readState() { - const { lido, accountingOracle, elRewardsVault, withdrawalVault, burner } = ctx.contracts; + const { lido, accountingOracle, elRewardsVault, withdrawalVault, burner, withdrawalQueue } = ctx.contracts; const lastProcessingRefSlot = await accountingOracle.getLastProcessingRefSlot(); const totalELRewardsCollected = await lido.getTotalELRewardsCollected(); @@ -108,6 +116,12 @@ describe("Integration: Accounting", () => { const elRewardsVaultBalance = await ethers.provider.getBalance(elRewardsVault); const withdrawalVaultBalance = await ethers.provider.getBalance(withdrawalVault); const burnerShares = await lido.sharesOf(burner); + const bufferedEther = await lido.getBufferedEther(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositableEther = await lido.getDepositableEther(); + const unfinalizedStETH = await withdrawalQueue.unfinalizedStETH(); return { lastProcessingRefSlot, @@ -118,6 +132,12 @@ describe("Integration: Accounting", () => { elRewardsVaultBalance, withdrawalVaultBalance, burnerShares, + bufferedEther, + depositsReserveTarget, + depositsReserve, + withdrawalsReserve, + depositableEther, + unfinalizedStETH, }; } @@ -134,6 +154,12 @@ describe("Integration: Accounting", () => { elRewardsVaultBalance, withdrawalVaultBalance, burnerShares, + bufferedEther, + depositsReserveTarget, + depositsReserve, + withdrawalsReserve, + depositableEther, + unfinalizedStETH, } = await readState(); expect(lastProcessingRefSlot).to.be.greaterThan( @@ -166,6 +192,24 @@ describe("Integration: Accounting", () => { beforeState.internalShares + (expectedDelta.internalShares ?? 0n), "Internal shares mismatch", ); + + expect(depositsReserveTarget).to.equal( + beforeState.depositsReserveTarget, + "Deposits reserve target should not change during report processing", + ); + const expectedDepositsReserve = bufferedEther < depositsReserveTarget ? bufferedEther : depositsReserveTarget; + expect(depositsReserve).to.equal( + expectedDepositsReserve, + "Deposits reserve should be synced to min(buffered ether, deposits reserve target)", + ); + expect(depositsReserve).to.be.lte(depositsReserveTarget, "Deposits reserve should not exceed target"); + expect(depositsReserve).to.be.lte(bufferedEther, "Deposits reserve should not exceed buffered ether"); + expect(depositableEther).to.equal( + bufferedEther - withdrawalsReserve, + "Depositable should equal buffered minus withdrawals reserve", + ); + expect(withdrawalsReserve).to.be.lte(unfinalizedStETH, "Withdrawals reserve should not exceed demand"); + expect(withdrawalsReserve).to.be.lte(bufferedEther, "Withdrawals reserve should not exceed buffered ether"); } async function expectTransferFeesEvents( @@ -227,7 +271,7 @@ describe("Integration: Accounting", () => { reportBurner: false, skipWithdrawals: true, }), - ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectCLBalanceIncrease(uint256)"); + ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalCLBalanceIncrease"); }); it("Should account correctly with no CL rebase", async () => { @@ -251,6 +295,86 @@ describe("Integration: Accounting", () => { expect(sharesRateBefore).to.be.lessThanOrEqual(sharesRateAfter); }); + it("Should account correctly with non-zero deposits and withdrawals reserves", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + + await lido.connect(agent).setDepositsReserveTarget(ether("10")); + await lido.connect(agent).submit(ZeroAddress, { value: ether("90") }); + await lido.connect(agent).approve(withdrawalQueue, ether("5")); + await withdrawalQueue.connect(agent).requestWithdrawals([ether("5")], agent.address); + await report(ctx, { + clDiff: 0n, + excludeVaultsBalances: true, + reportBurner: false, + skipWithdrawals: true, + dryRun: false, + }); + + const beforeState = await readState(); + expect(beforeState.depositsReserveTarget).to.equal(ether("10")); + expect(beforeState.depositsReserve).to.equal(ether("10")); + expect(beforeState.withdrawalsReserve).to.be.gt(0n); + const expectedWithdrawalsReserve = + beforeState.unfinalizedStETH < beforeState.bufferedEther - beforeState.depositsReserve + ? beforeState.unfinalizedStETH + : beforeState.bufferedEther - beforeState.depositsReserve; + expect(beforeState.withdrawalsReserve).to.equal(expectedWithdrawalsReserve); + expect(beforeState.depositableEther).to.equal(beforeState.bufferedEther - beforeState.withdrawalsReserve); + + // Deferred target increase must not change effective reserves before report processing. + const increasedTarget = beforeState.bufferedEther + ether("1000"); + await lido.connect(agent).setDepositsReserveTarget(increasedTarget); + expect(await lido.getDepositsReserve()).to.equal(beforeState.depositsReserve); + expect(await lido.getWithdrawalsReserve()).to.equal(beforeState.withdrawalsReserve); + const beforeStateAfterTargetUpdate = await readState(); + expect(beforeStateAfterTargetUpdate.depositsReserveTarget).to.equal(increasedTarget); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const dryRunBefore = await report(ctx, dryRunParams); + expect(dryRunBefore.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches in dry-run report", + ); + const [lockBefore] = await withdrawalQueue.prefinalize( + dryRunBefore.data.withdrawalFinalizationBatches, + dryRunBefore.data.simulatedShareRate, + ); + expect(lockBefore).to.be.lte(beforeStateAfterTargetUpdate.withdrawalsReserve); + + const { reportTx } = await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false }); + const reportTxReceipt = (await reportTx!.wait())!; + const { amountOfETHLocked, sharesBurntAmount } = getWithdrawalParamsFromEvent(reportTxReceipt); + + await expectStateChanges(beforeStateAfterTargetUpdate, { + totalELRewardsCollected: 0n, + internalEther: amountOfETHLocked * -1n, + internalShares: sharesBurntAmount * -1n, + lidoBalance: amountOfETHLocked * -1n, + }); + + const afterState = await readState(); + expect(afterState.depositsReserveTarget).to.equal(increasedTarget); + expect(afterState.depositsReserve).to.equal(afterState.bufferedEther); + expect(afterState.withdrawalsReserve).to.equal(0n); + expect(afterState.depositableEther).to.equal(afterState.bufferedEther); + }); + it("Should account correctly with negative CL rebase", async () => { const CL_REBASE_AMOUNT = ether("-100"); @@ -282,27 +406,26 @@ describe("Integration: Accounting", () => { it("Should account correctly with positive CL rebase close to the limits", async () => { const { lido, oracleReportSanityChecker } = ctx.contracts; + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); + const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); - const { beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport } = await lido.getBalanceStats(); const { timeElapsed } = await getReportTimeElapsed(ctx); - // To calculate the rebase amount close to the annual increase limit - // we use (ONE_DAY + 1n) to slightly underperform for the daily limit - // This ensures we're testing a scenario very close to, but not exceeding, the annual limit - const time = timeElapsed + 1n; - let rebaseAmount = (beaconBalance * annualBalanceIncreaseBPLimit * time) / (365n * ONE_DAY) / MAX_BASIS_POINTS; + // `report()` submits the raw post-vs-pre CL delta. In this seeded scenario the + // pending baseline is activated inside the same report, so the raw boundary is + // the validators-based safety-cap component rather than `pending + safetyCap`. + let rebaseAmount = + (clValidatorsBalanceAtLastReport * annualBalanceIncreaseBPLimit * timeElapsed) / + (365n * ONE_DAY) / + MAX_BASIS_POINTS; rebaseAmount = roundToGwei(rebaseAmount); - // At this point, rebaseAmount represents a positive CL rebase that is - // just slightly below the maximum allowed daily increase, testing the system's - // behavior near its operational limits const beforeState = await readState(); // Report - const params = { clDiff: rebaseAmount, excludeVaultsBalances: true }; - - const { reportTx } = (await report(ctx, params)) as { + const { reportTx } = (await report(ctx, { clDiff: rebaseAmount, excludeVaultsBalances: true })) as { reportTx: TransactionResponse; extraDataTx: TransactionResponse; }; diff --git a/test/integration/core/burn-shares.integration.ts b/test/integration/core/burn-shares.integration.ts index 1960e547c6..8714131d76 100644 --- a/test/integration/core/burn-shares.integration.ts +++ b/test/integration/core/burn-shares.integration.ts @@ -66,11 +66,11 @@ describe("Scenario: Burn Shares", () => { const accountingSigner = await impersonate(accounting.address, ether("1")); await burner.connect(accountingSigner).requestBurnSharesForCover(stranger, sharesToBurn); - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; await handleOracleReport(ctx, { - beaconValidators, - clBalance: beaconBalance, + clBalance, sharesRequestedToBurn: sharesToBurn, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, diff --git a/test/integration/core/deposits-reserve.integration.ts b/test/integration/core/deposits-reserve.integration.ts new file mode 100644 index 0000000000..bdcfedf010 --- /dev/null +++ b/test/integration/core/deposits-reserve.integration.ts @@ -0,0 +1,395 @@ +import { expect } from "chai"; +import { ZeroAddress } from "ethers"; +import { ethers } from "hardhat"; + +import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; + +import { advanceChainTime, ether, impersonate, updateBalance } from "lib"; +import { getProtocolContext, ProtocolContext, report } from "lib/protocol"; + +import { Snapshot, ZERO_HASH } from "test/suite"; + +describe("Integration: Deposits reserve", () => { + let ctx: ProtocolContext; + let snapshot: string; + let testSnapshot: string; + + let reserveManager: HardhatEthersSigner; + let holder: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + before(async () => { + ctx = await getProtocolContext(); + snapshot = await Snapshot.take(); + + [holder, stranger] = await ethers.getSigners(); + reserveManager = holder; + + const { acl, lido } = ctx.contracts; + const agent = await ctx.getSigner("agent"); + const role = await lido.BUFFER_RESERVE_MANAGER_ROLE(); + const hasRole = await acl["hasPermission(address,address,bytes32)"](reserveManager.address, lido.address, role); + if (!hasRole) { + // Grant reserve management permission once for the non-agent actor used in ACL tests. + await acl.connect(agent).grantPermission(reserveManager.address, lido.address, role); + } + }); + + beforeEach(async () => { + testSnapshot = await Snapshot.take(); + }); + + afterEach(async () => { + await Snapshot.restore(testSnapshot); + }); + + after(async () => { + await Snapshot.restore(snapshot); + }); + + it("Authorizes reserve target updates via BUFFER_RESERVE_MANAGER_ROLE only", async () => { + const { lido } = ctx.contracts; + + await expect(lido.connect(stranger).setDepositsReserveTarget(ether("1"))).to.be.revertedWith("APP_AUTH_FAILED"); + await expect(lido.connect(reserveManager).setDepositsReserveTarget(ether("1"))) + .to.emit(lido, "DepositsReserveTargetSet") + .withArgs(ether("1")); + }); + + it("Applies target decrease immediately and defers target increase until report sync", async () => { + const { lido } = ctx.contracts; + + const targetBefore = await lido.getDepositsReserveTarget(); + const reserveBeforeIncrease = await lido.getDepositsReserve(); + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + + const increasedTarget = targetBefore + ether("2"); + // Increase is stored in target immediately but reserve value is synchronized on report. + await lido.connect(reserveManager).setDepositsReserveTarget(increasedTarget); + expect(await lido.getDepositsReserveTarget()).to.equal(increasedTarget); + + expect(await lido.getDepositsReserve()).to.equal(reserveBeforeIncrease); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const increasedAgain = increasedTarget + ether("10"); + await lido.connect(reserveManager).setDepositsReserveTarget(increasedAgain); + expect(await lido.getDepositsReserveTarget()).to.equal(increasedAgain); + expect(await lido.getDepositsReserve()).to.equal(increasedTarget); + + const decreasedTarget = increasedTarget - ether("1"); + // Decrease is applied immediately to avoid reducing withdrawals budget unexpectedly. + await lido.connect(reserveManager).setDepositsReserveTarget(decreasedTarget); + expect(await lido.getDepositsReserve()).to.equal(decreasedTarget); + }); + + it("Releases deposits reserve when target is set to zero and preserves reserve/depositable invariants", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + + const requestAmount = ether("5"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + // First set a non-zero effective deposits reserve, then verify explicit reset to zero. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + + await lido.connect(reserveManager).setDepositsReserveTarget(0n); + expect(await lido.getDepositsReserve()).to.equal(0n); + + const buffered = await lido.getBufferedEther(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const unfinalized = await withdrawalQueue.unfinalizedStETH(); + + const expectedWithdrawalsReserve = buffered < unfinalized ? buffered : unfinalized; + // With deposits reserve released, withdrawals reserve is bounded only by buffered and unfinalized demand. + expect(withdrawalsReserve).to.equal(expectedWithdrawalsReserve); + expect(await lido.getDepositableEther()).to.equal(buffered - expectedWithdrawalsReserve); + }); + + it("Reaches increased target on the next report after deferred increase", async () => { + const { lido } = ctx.contracts; + + await lido.connect(holder).submit(ZeroAddress, { value: ether("100") }); + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + // First report materializes initial target in effective reserve. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("20")); + expect(await lido.getDepositsReserve()).to.equal(ether("20")); + + await lido.connect(reserveManager).setDepositsReserveTarget(ether("40")); + expect(await lido.getDepositsReserve()).to.equal(ether("20")); + + // Second report applies deferred increase back to the new target. + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + + expect(await lido.getDepositsReserveTarget()).to.equal(ether("40")); + expect(await lido.getDepositsReserve()).to.equal(ether("40")); + }); + + it("Computes finalization budget from withdrawal-available buffer, excluding deposits reserve", async () => { + const { lido, withdrawalQueue, locator } = ctx.contracts; + + const requestAmount = ether("1"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + const withdrawalsReserveBeforeProtection = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBeforeProtection).to.be.gt(0n); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const buffered = await lido.getBufferedEther(); + // Set target above buffered ether so synced deposits reserve consumes the full buffer first. + await lido.connect(reserveManager).setDepositsReserveTarget(buffered + ether("1000")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + + const elRewardsVaultAddress = await locator.elRewardsVault(); + const extraEthBudget = ether("5"); + await updateBalance(elRewardsVaultAddress, extraEthBudget); + + // The report is built for fixed refSlot, so deposits after refSlot must not increase its finalization budget. + await lido.connect(holder).submit(ZeroAddress, { value: ether("3") }); + expect(await lido.getWithdrawalsReserve()).to.equal(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Freeze report inputs at refSlot and evaluate finalization budget from dry-run output. + const { data } = await report(ctx, { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: true, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: false, + }); + + expect(data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches for tooling budget check", + ); + const [ethToLock] = await withdrawalQueue.prefinalize(data.withdrawalFinalizationBatches, data.simulatedShareRate); + + expect(ethToLock).to.be.lte(extraEthBudget); + }); + + it("Keeps fixed-refSlot finalization batches stable after late deposits", async () => { + const { lido, withdrawalQueue, locator } = ctx.contracts; + + const requestAmount = ether("1"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const depositsTargetBefore = await lido.getDepositsReserveTarget(); + + const elRewardsVaultAddress = await locator.elRewardsVault(); + await updateBalance(elRewardsVaultAddress, ether("3")); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Build dry-run report with explicit refSlot to make batches deterministic. + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: true, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: false, + } as const; + + const before = await report(ctx, dryRunParams); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches before late deposit", + ); + const [beforeLock] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + // Late deposit after refSlot should not affect withdrawals finalization result. + await lido.connect(holder).submit(ZeroAddress, { value: ether("7") }); + expect(await lido.getDepositsReserveTarget()).to.equal(depositsTargetBefore); + expect(await lido.getDepositsReserve()).to.be.gte(depositsReserveBefore); + + const after = await report(ctx, dryRunParams); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches after late deposit", + ); + const [afterLock] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + + expect(afterLock).to.equal(beforeLock); + // Batches and ETH lock must stay unchanged: post-refSlot deposits must not affect finalization inputs. + expect(after.data.withdrawalFinalizationBatches).to.deep.equal(before.data.withdrawalFinalizationBatches); + }); + + it("Keeps withdrawals finalization budget stable after reserve target increase post-refSlot", async () => { + const { lido, withdrawalQueue } = ctx.contracts; + + const requestAmount = ether("20"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositsReserveBefore = await lido.getDepositsReserve(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Build dry-run data at fixed refSlot, then change target and re-run with the same refSlot. + const dryRunParams = { + refSlot, + waitNextReportTime: false, + dryRun: true, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const before = await report(ctx, dryRunParams); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches before reserve target increase", + ); + const [beforeLock] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + // Target increase after refSlot is deferred and must not affect current withdrawals finalization budget. + await lido.connect(reserveManager).setDepositsReserveTarget(ether("120")); + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(await lido.getDepositsReserve()).to.equal(depositsReserveBefore); + + const after = await report(ctx, dryRunParams); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt( + 0, + "Expected non-empty withdrawal finalization batches after reserve target increase", + ); + const [afterLock] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + + expect(await lido.getWithdrawalsReserve()).to.equal(withdrawalsReserveBefore); + expect(afterLock).to.be.lte(withdrawalsReserveBefore); + expect(beforeLock).to.be.lte(withdrawalsReserveBefore); + }); + + it("Does not reduce withdrawals reserve when CL deposits consume depositable ether", async () => { + const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + + const requestAmount = ether("10"); + await lido.connect(reserveManager).submit(ZeroAddress, { value: ether("3200") }); + await lido.connect(reserveManager).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(reserveManager).requestWithdrawals([requestAmount], reserveManager.address); + + const bufferedBefore = await lido.getBufferedEther(); + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + const depositableBefore = await lido.getDepositableEther(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + expect(depositableBefore).to.equal(bufferedBefore - withdrawalsReserveBefore); + + const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); + // Spend depositable ether through CL deposit path. + const depositTx = await stakingRouter.connect(dsmSigner).deposit(1n, ZERO_HASH); + await depositTx.wait(); + + const bufferedAfter = await lido.getBufferedEther(); + const depositsReserveAfter = await lido.getDepositsReserve(); + const withdrawalsReserveAfter = await lido.getWithdrawalsReserve(); + const depositableAfter = await lido.getDepositableEther(); + const consumed = bufferedBefore - bufferedAfter; + + expect(consumed).to.be.gt(0n, "Expected non-zero buffered ether consumption during CL deposit"); + // CL deposit consumes only depositable ether; withdrawals reserve must remain unchanged. + expect(depositsReserveAfter).to.be.lte(depositsReserveBefore); + expect(withdrawalsReserveAfter).to.equal(withdrawalsReserveBefore); + expect(depositableAfter).to.equal(depositableBefore - consumed); + expect(depositableAfter).to.equal(bufferedAfter - withdrawalsReserveAfter); + }); + + it("Keeps fixed-refSlot finalization budget bounded after spending depositable ether post-refSlot", async () => { + const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + + const requestAmount = ether("20"); + await lido.connect(holder).submit(ZeroAddress, { value: ether("200") }); + await lido.connect(holder).approve(withdrawalQueue, requestAmount); + await withdrawalQueue.connect(holder).requestWithdrawals([requestAmount], holder.address); + + const requestTimestampMargin = (await ctx.contracts.oracleReportSanityChecker.getOracleReportLimits()) + .requestTimestampMargin; + await advanceChainTime(requestTimestampMargin + 1n); + + const depositsReserveBefore = await lido.getDepositsReserve(); + const withdrawalsReserveBefore = await lido.getWithdrawalsReserve(); + expect(withdrawalsReserveBefore).to.be.gt(0n); + + const refSlot = (await ctx.contracts.hashConsensus.getCurrentFrame()).refSlot; + // Fix refSlot first, then spend depositable ether to emulate post-refSlot CL deposits. + const reportParams = { + refSlot, + waitNextReportTime: false, + clDiff: 0n, + reportElVault: false, + reportWithdrawalsVault: false, + reportBurner: false, + excludeVaultsBalances: true, + } as const; + + const before = await report(ctx, { ...reportParams, dryRun: true }); + expect(before.data.withdrawalFinalizationBatches.length).to.be.gt(0); + const [lockBefore] = await withdrawalQueue.prefinalize( + before.data.withdrawalFinalizationBatches, + before.data.simulatedShareRate, + ); + + const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); + const bufferedBeforeSpend = await lido.getBufferedEther(); + await stakingRouter.connect(dsmSigner).deposit(1n, ZERO_HASH); + const bufferedAfterSpend = await lido.getBufferedEther(); + expect(bufferedAfterSpend).to.be.lt(bufferedBeforeSpend); + + const depositsReserveAfterSpend = await lido.getDepositsReserve(); + const withdrawalsReserveAfterSpend = await lido.getWithdrawalsReserve(); + expect(depositsReserveAfterSpend).to.be.lte(depositsReserveBefore); + expect(withdrawalsReserveAfterSpend).to.equal(withdrawalsReserveBefore); + + const after = await report(ctx, { ...reportParams, dryRun: true }); + expect(after.data.withdrawalFinalizationBatches.length).to.be.gt(0); + const [lockAfter] = await withdrawalQueue.prefinalize( + after.data.withdrawalFinalizationBatches, + after.data.simulatedShareRate, + ); + expect(lockAfter).to.be.gt(0n); + // Finalization lock remains bounded by precomputed withdrawals reserve from fixed refSlot. + expect(lockAfter).to.be.lte(withdrawalsReserveBefore); + expect(lockBefore).to.be.lte(withdrawalsReserveBefore); + }); +}); diff --git a/test/integration/core/happy-path.integration.ts b/test/integration/core/happy-path.integration.ts index 436edc679f..42de5a0961 100644 --- a/test/integration/core/happy-path.integration.ts +++ b/test/integration/core/happy-path.integration.ts @@ -4,9 +4,10 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; -import { advanceChainTime, batch, ether, impersonate, log, updateBalance } from "lib"; +import { advanceChainTime, batch, ether, impersonate, log, ONE_GWEI, updateBalance } from "lib"; import { finalizeWQViaElVault, + getCurrentModuleAccountingReportParams, getProtocolContext, norSdvtEnsureOperators, OracleReportParams, @@ -14,9 +15,10 @@ import { removeStakingLimit, report, setStakingLimit, + submitReportDataWithConsensusAndEmptyExtraData, } from "lib/protocol"; -import { bailOnFailure, MAX_DEPOSIT, Snapshot, ZERO_HASH } from "test/suite"; +import { bailOnFailure, Snapshot, ZERO_HASH } from "test/suite"; import { LogDescriptionExtended } from "../../../lib/protocol/types"; @@ -32,6 +34,8 @@ describe("Scenario: Protocol Happy Path", () => { let uncountedStETHShares: bigint; let amountWithRewards: bigint; let depositCount: bigint; + let finalizedWithdrawalAmount: bigint; + let norPendingDepositsGwei: bigint; before(async () => { ctx = await getProtocolContext(); @@ -201,30 +205,41 @@ describe("Scenario: Protocol Happy Path", () => { it("Should deposit to staking modules", async () => { const { lido, withdrawalQueue, stakingRouter, depositSecurityModule } = ctx.contracts; + const agent = await ctx.getSigner("agent"); await lido.connect(stEthHolder).submit(ZeroAddress, { value: ether("3200") }); + await lido.connect(agent).setDepositsReserveTarget(ether("128")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); const withdrawalsUnfinalizedStETH = await withdrawalQueue.unfinalizedStETH(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); const depositableEther = await lido.getDepositableEther(); const bufferedEtherBeforeDeposit = await lido.getBufferedEther(); - const expectedDepositableEther = bufferedEtherBeforeDeposit - withdrawalsUnfinalizedStETH; + const expectedDepositableEther = bufferedEtherBeforeDeposit - withdrawalsReserve; + expect(depositsReserveTarget).to.equal(ether("128"), "Deposits reserve target"); + expect(depositsReserve).to.equal(ether("128"), "Deposits reserve"); expect(depositableEther).to.equal(expectedDepositableEther, "Depositable ether"); + expect(withdrawalsReserve).to.be.lte(withdrawalsUnfinalizedStETH, "Withdrawals reserve should not exceed demand"); log.debug("Depositable ether", { "Buffered ether": ethers.formatEther(bufferedEtherBeforeDeposit), "Withdrawals unfinalized stETH": ethers.formatEther(withdrawalsUnfinalizedStETH), + "Withdrawals reserve": ethers.formatEther(withdrawalsReserve), "Depositable ether": ethers.formatEther(depositableEther), }); const dsmSigner = await impersonate(depositSecurityModule.address, ether("100")); const stakingModules = (await stakingRouter.getStakingModules()).filter((m) => m.id === 1n); depositCount = 0n; + norPendingDepositsGwei = 0n; let expectedBufferedEtherAfterDeposit = bufferedEtherBeforeDeposit; for (const module of stakingModules) { - const depositTx = await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, module.id, ZERO_HASH); + const depositTx = await stakingRouter.connect(dsmSigner).deposit(module.id, ZERO_HASH); const depositReceipt = (await depositTx.wait()) as ContractTransactionReceipt; const unbufferedEvent = ctx.getEvents(depositReceipt, "Unbuffered")[0]; const unbufferedAmount = unbufferedEvent?.args[0] || 0n; @@ -237,6 +252,7 @@ describe("Scenario: Protocol Happy Path", () => { }); depositCount += deposits; + norPendingDepositsGwei += unbufferedAmount / ONE_GWEI; expectedBufferedEtherAfterDeposit -= unbufferedAmount; } @@ -295,9 +311,27 @@ describe("Scenario: Protocol Happy Path", () => { const treasuryBalanceBeforeRebase = await lido.sharesOf(treasuryAddress); - // 0.001 – to simulate rewards + const { depositedSinceLastReport } = await lido.getBalanceStats(); + + // Deposit() moved ETH into protocol pending, but the new sanity path takes its + // baseline from the previous Lido report snapshot rather than router-only state. + // Submit a neutral report first so the next reward-bearing report stays on the + // original "deposits activated + tiny positive CL reward" happy path. + const { data: pendingBaselineData } = await report(ctx, { + clDiff: depositedSinceLastReport, + dryRun: true, + excludeVaultsBalances: true, + skipWithdrawals: true, + ...(await getCurrentModuleAccountingReportParams(ctx)), + }); + await submitReportDataWithConsensusAndEmptyExtraData(ctx, { + ...pendingBaselineData, + clValidatorsBalanceGwei: BigInt(pendingBaselineData.clValidatorsBalanceGwei) - norPendingDepositsGwei, + clPendingBalanceGwei: norPendingDepositsGwei, + }); + const reportData: Partial = { - clDiff: ether("32") * depositCount + ether("0.001"), + clDiff: ether("0.001"), clAppearedValidators: depositCount, }; @@ -549,7 +583,7 @@ describe("Scenario: Protocol Happy Path", () => { const lockedEtherAmountBeforeFinalization = await withdrawalQueue.getLockedEtherAmount(); - const reportParams = { clDiff: ether("0.0005") }; // simulate some rewards + const reportParams = { clDiff: 0n }; const { reportTx } = (await report(ctx, reportParams)) as { reportTx: TransactionResponse }; const reportTxReceipt = (await reportTx.wait()) as ContractTransactionReceipt; @@ -557,24 +591,27 @@ describe("Scenario: Protocol Happy Path", () => { const requestId = await withdrawalQueue.getLastRequestId(); const lockedEtherAmountAfterFinalization = await withdrawalQueue.getLockedEtherAmount(); - const expectedLockedEtherAmountAfterFinalization = lockedEtherAmountAfterFinalization - amountWithRewards; + const withdrawalFinalizedEvent = ctx.getEvents(reportTxReceipt, "WithdrawalsFinalized")[0]; + finalizedWithdrawalAmount = withdrawalFinalizedEvent.args.amountOfETHLocked; log.debug("Locked ether amount", { "Before finalization": ethers.formatEther(lockedEtherAmountBeforeFinalization), "After finalization": ethers.formatEther(lockedEtherAmountAfterFinalization), - "Amount with rewards": ethers.formatEther(amountWithRewards), + "Finalized amount": ethers.formatEther(finalizedWithdrawalAmount), }); expect(lockedEtherAmountBeforeFinalization).to.equal( - expectedLockedEtherAmountAfterFinalization, + lockedEtherAmountAfterFinalization - finalizedWithdrawalAmount, "Locked ether amount after finalization", ); - - const withdrawalFinalizedEvent = ctx.getEvents(reportTxReceipt, "WithdrawalsFinalized")[0]; + expect(amountWithRewards - finalizedWithdrawalAmount).to.be.lte( + 2n, + "Finalized amount should differ from requested amount by at most the documented dust", + ); expect(withdrawalFinalizedEvent?.args.toObject()).to.deep.include( { - amountOfETHLocked: amountWithRewards, + amountOfETHLocked: finalizedWithdrawalAmount, from: requestId, to: requestId, }, @@ -608,7 +645,7 @@ describe("Scenario: Protocol Happy Path", () => { const balanceBeforeClaim = await getBalances(stranger); expect(status.isFinalized).to.be.true; - expect(claimableEtherBeforeClaim).to.equal(amountWithRewards, "Claimable ether before claim"); + expect(claimableEtherBeforeClaim).to.equal(finalizedWithdrawalAmount, "Claimable ether before claim"); const claimTx = await withdrawalQueue.connect(stranger).claimWithdrawals([requestId], hints); const claimTxReceipt = (await claimTx.wait()) as ContractTransactionReceipt; @@ -621,7 +658,7 @@ describe("Scenario: Protocol Happy Path", () => { requestId, owner: stranger.address, receiver: stranger.address, - amountOfETH: amountWithRewards, + amountOfETH: finalizedWithdrawalAmount, }, "WithdrawalClaimed event", ); @@ -640,7 +677,7 @@ describe("Scenario: Protocol Happy Path", () => { const balanceAfterClaim = await getBalances(stranger); expect(balanceAfterClaim.ETH).to.equal( - balanceBeforeClaim.ETH + amountWithRewards - spentGas, + balanceBeforeClaim.ETH + finalizedWithdrawalAmount - spentGas, "ETH balance after claim", ); @@ -649,11 +686,11 @@ describe("Scenario: Protocol Happy Path", () => { log.debug("Locked ether amount", { "Before withdrawal": ethers.formatEther(lockedEtherAmountBeforeWithdrawal), "After claim": ethers.formatEther(lockedEtherAmountAfterClaim), - "Amount with rewards": ethers.formatEther(amountWithRewards), + "Finalized amount": ethers.formatEther(finalizedWithdrawalAmount), }); expect(lockedEtherAmountAfterClaim).to.equal( - lockedEtherAmountBeforeWithdrawal - amountWithRewards, + lockedEtherAmountBeforeWithdrawal - finalizedWithdrawalAmount, "Locked ether amount after claim", ); diff --git a/test/integration/core/lido-storage.integration.ts b/test/integration/core/lido-storage.integration.ts index 9d58e556d1..2275f8d1b6 100644 --- a/test/integration/core/lido-storage.integration.ts +++ b/test/integration/core/lido-storage.integration.ts @@ -45,3 +45,37 @@ describe("Integration: Lido storage slots after V3", () => { } }); }); + +describe("Integration: Lido storage slots after V4 (SRv3)", () => { + let ctx: ProtocolContext; + let snapshot: string; + + let stEthHolder: HardhatEthersSigner; + let stranger: HardhatEthersSigner; + + before(async () => { + ctx = await getProtocolContext(); + + [stEthHolder, stranger] = await ethers.getSigners(); + await updateBalance(stranger.address, ether("100000000")); + await updateBalance(stEthHolder.address, ether("100000000")); + + snapshot = await Snapshot.take(); + }); + + after(async () => await Snapshot.restore(snapshot)); + + it("Should have old storage slots zeroed in V4", async () => { + const lido = ctx.contracts.lido; + + const oldStorageSlots = { + CL_BALANCE_AND_CL_VALIDATORS_POSITION: streccak("lido.Lido.clBalanceAndClValidators"), + BUFFERED_ETHER_AND_DEPOSITED_VALIDATORS_POSITION: streccak("lido.Lido.bufferedEtherAndDepositedValidators"), + }; + + for (const [key, value] of Object.entries(oldStorageSlots)) { + const storageValue = await ethers.provider.getStorage(lido, value); + expect(storageValue).to.equal(0n, `${key} storage slot at ${value} is not empty`); + } + }); +}); diff --git a/test/integration/core/negative-rebase.integration.ts b/test/integration/core/negative-rebase.integration.ts index ccd991d072..335fcbfeba 100644 --- a/test/integration/core/negative-rebase.integration.ts +++ b/test/integration/core/negative-rebase.integration.ts @@ -4,8 +4,15 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { ether } from "lib"; -import { getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { ether, impersonate } from "lib"; +import { + getDepositedSinceLastReport, + getProtocolContext, + ProtocolContext, + report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, +} from "lib/protocol"; import { Snapshot } from "test/suite"; @@ -55,22 +62,25 @@ describe("Integration: Negative rebase", () => { return exited; }; + const ensureAtLeastOneStoredReport = async () => { + const reportDataCount = await ctx.contracts.oracleReportSanityChecker.getReportDataCount(); + if (reportDataCount === 0n) { + await reportWithEffectiveClDiff(ctx, 0n, { + skipWithdrawals: true, + excludeVaultsBalances: true, + }); + } + }; + it("Should store correctly exited validators count", async () => { const { locator, oracleReportSanityChecker } = ctx.contracts; - expect((await locator.oracleReportSanityChecker()) == oracleReportSanityChecker.address); + expect(await locator.oracleReportSanityChecker()).to.equal(oracleReportSanityChecker.address); const currentExited = await exitedValidatorsCount(); const reportExitedValidators = currentExited.get(1n) ?? 0n; - - // On upgrade OracleReportSanityChecker is new and not provisioned thus has no reports - if ((await oracleReportSanityChecker.getReportDataCount()) === 0n) { - await report(ctx, { - clDiff: ether("0"), - skipWithdrawals: true, - clAppearedValidators: 0n, - }); - } + await ensureAtLeastOneStoredReport(); + const reportDataCountBefore = await oracleReportSanityChecker.getReportDataCount(); await report(ctx, { clDiff: ether("0"), @@ -80,40 +90,103 @@ describe("Integration: Negative rebase", () => { numExitedValidatorsByStakingModule: [reportExitedValidators + 2n], }); - const count = await oracleReportSanityChecker.getReportDataCount(); - expect(count).to.be.greaterThanOrEqual(2); + const reportDataCountAfter = await oracleReportSanityChecker.getReportDataCount(); + expect(reportDataCountAfter).to.equal(reportDataCountBefore + 1n); - const lastReportData = await oracleReportSanityChecker.reportData(count - 1n); - const beforeLastReportData = await oracleReportSanityChecker.reportData(count - 2n); + const updatedExited = await exitedValidatorsCount(); + const updatedExitedForModule = updatedExited.get(1n) ?? 0n; + const totalExitedBefore = Array.from(currentExited.values()).reduce((acc, val) => acc + val, 0n); + const totalExitedAfter = Array.from(updatedExited.values()).reduce((acc, val) => acc + val, 0n); - const lastExitedTotal = Array.from(currentExited.values()).reduce((acc, val) => acc + val, 0n); - - expect(lastReportData.totalExitedValidators).to.be.equal(lastExitedTotal + 2n); - expect(beforeLastReportData.totalExitedValidators).to.be.equal(lastExitedTotal); + expect(updatedExitedForModule).to.be.equal(reportExitedValidators + 2n); + expect(totalExitedAfter).to.be.equal(totalExitedBefore + 2n); }); - // 56 weeks of negative rebases is too much for the test and it breaks with the SocketError: other side closed - it.skip("Should store correctly many negative rebases", async () => { + it("Should store correctly many negative rebases", async () => { const { locator, oracleReportSanityChecker } = ctx.contracts; - expect((await locator.oracleReportSanityChecker()) == oracleReportSanityChecker.address); + expect(await locator.oracleReportSanityChecker()).to.equal(oracleReportSanityChecker.address); + + await resetCLBalanceDecreaseWindow(ctx); + await ensureAtLeastOneStoredReport(); + + const REPORTS_REPEATED = 10; + const CL_DIFF_PER_REPORT = -1000000000n; // effective -1 gwei per report relative to principal CL balance + let reportDataCount = await oracleReportSanityChecker.getReportDataCount(); + expect(reportDataCount).to.be.gt(0n); + let previousCLBalance = (await oracleReportSanityChecker.reportData(reportDataCount - 1n)).clBalance; - const REPORTS_REPEATED = 56; - const SINGLE_REPORT_DECREASE = -1000000000n; for (let i = 0; i < REPORTS_REPEATED; i++) { - await report(ctx, { - clDiff: SINGLE_REPORT_DECREASE * BigInt(i + 1), + const depositedSinceLastReport = await getDepositedSinceLastReport(ctx); + + await reportWithEffectiveClDiff(ctx, CL_DIFF_PER_REPORT, { skipWithdrawals: true, reportWithdrawalsVault: false, reportElVault: false, }); + + reportDataCount += 1n; + const reportCountAfter = await oracleReportSanityChecker.getReportDataCount(); + expect(reportCountAfter).to.equal(reportDataCount); + + const lastReportData = await oracleReportSanityChecker.reportData(reportDataCount - 1n); + const expectedCurrentCLBalance = previousCLBalance + depositedSinceLastReport + CL_DIFF_PER_REPORT; + + expect(lastReportData.clBalance).to.equal(expectedCurrentCLBalance); + expect(lastReportData.clBalance).to.be.lt(previousCLBalance + depositedSinceLastReport); + previousCLBalance = lastReportData.clBalance; } - const count = await oracleReportSanityChecker.getReportDataCount(); - expect(count).to.be.greaterThanOrEqual(REPORTS_REPEATED + 1); + }); - for (let i = count - 1n, j = REPORTS_REPEATED - 1; i >= 0 && j >= 0; --i, --j) { - const reportData = await oracleReportSanityChecker.reportData(i); - expect(reportData.negativeCLRebaseWei).to.be.equal(-1n * SINGLE_REPORT_DECREASE * BigInt(j + 1)); + // Tests the sliding window CL decrease check by calling checkAccountingOracleReport + // directly with zero deposits/withdrawals (so adjustedBase == raw baseline balance). + it("Should revert with IncorrectCLBalanceDecrease on gradual negative rebases", async () => { + const { oracleReportSanityChecker, accounting } = ctx.contracts; + + const accountingSigner = await impersonate(await accounting.getAddress(), ether("1")); + + const reportDataCount = await oracleReportSanityChecker.getReportDataCount(); + let currentBalance = + reportDataCount === 0n + ? ether("1000000") + : (await oracleReportSanityChecker.reportData(reportDataCount - 1n)).clBalance; + + const reportFromAccounting = (preBalance: bigint, postBalance: bigint) => + oracleReportSanityChecker + .connect(accountingSigner) + .checkAccountingOracleReport(24n * 60n * 60n, preBalance, 0n, postBalance, 0n, 0n, 0n, 0n, 0n, 0n); + + // REPORTS_WINDOW in contract is 36 (private constant, no getter). + // Fill window + 1 neutral data points to fully control the baseline. + const REPORTS_WINDOW = 36; + for (let i = 0; i < REPORTS_WINDOW + 1; ++i) { + await reportFromAccounting(currentBalance, currentBalance); } + + // Derive the number of 1% decreases that fit under the limit from the actual config. + const limits = await oracleReportSanityChecker.getOracleReportLimits(); + const maxDecreaseBP = limits.maxCLBalanceDecreaseBP; + const DECREASE_PER_REPORT_BP = 100n; // 1% + + let passingReports = 0; + let cumulativeBalanceBP = 10_000n; + while (true) { + const next = cumulativeBalanceBP - (cumulativeBalanceBP * DECREASE_PER_REPORT_BP) / 10_000n; + if (10_000n - next > maxDecreaseBP) break; + cumulativeBalanceBP = next; + passingReports++; + } + + for (let i = 0; i < passingReports; ++i) { + const decreasedBalance = currentBalance - (currentBalance * DECREASE_PER_REPORT_BP) / 10_000n; + await reportFromAccounting(currentBalance, decreasedBalance); + currentBalance = decreasedBalance; + } + + const nextDecreasedBalance = currentBalance - (currentBalance * DECREASE_PER_REPORT_BP) / 10_000n; + await expect(reportFromAccounting(currentBalance, nextDecreasedBalance)).to.be.revertedWithCustomError( + oracleReportSanityChecker, + "IncorrectCLBalanceDecrease", + ); }); }); diff --git a/test/integration/core/second-opinion.integration.ts b/test/integration/core/second-opinion.integration.ts index 919ef4a0aa..15f9977f4c 100644 --- a/test/integration/core/second-opinion.integration.ts +++ b/test/integration/core/second-opinion.integration.ts @@ -4,17 +4,14 @@ import { ethers } from "hardhat"; import { SecondOpinionOracle__Mock } from "typechain-types"; import { ether, impersonate, log, ONE_GWEI } from "lib"; -import { getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { getProtocolContext, ProtocolContext, report, resetCLBalanceDecreaseWindow } from "lib/protocol"; -import { bailOnFailure, Snapshot } from "test/suite"; +import { bailOnFailure, Snapshot, ZERO_HASH } from "test/suite"; const AMOUNT = ether("100"); -const MAX_DEPOSIT = 150n; const CURATED_MODULE_ID = 1n; const INITIAL_REPORTED_BALANCE = ether("32") * 3n; // 32 ETH * 3 validators -const ZERO_HASH = new Uint8Array(32).fill(0); - // Diff amount is 10% of total supply function getDiffAmount(totalSupply: bigint): bigint { return (totalSupply / 10n / ONE_GWEI) * ONE_GWEI; @@ -34,7 +31,7 @@ describe("Integration: Second opinion", () => { snapshot = await Snapshot.take(); - const { lido, depositSecurityModule, oracleReportSanityChecker } = ctx.contracts; + const { lido, depositSecurityModule, oracleReportSanityChecker, stakingRouter } = ctx.contracts; const { chainId } = await ethers.provider.getNetwork(); // Sepolia-specific initialization @@ -52,7 +49,7 @@ describe("Integration: Second opinion", () => { } const dsmSigner = await impersonate(depositSecurityModule.address, AMOUNT); - await lido.connect(dsmSigner).deposit(MAX_DEPOSIT, CURATED_MODULE_ID, ZERO_HASH); + await stakingRouter.connect(dsmSigner).deposit(CURATED_MODULE_ID, ZERO_HASH); secondOpinion = await ethers.deployContract("SecondOpinionOracle__Mock", []); const soAddress = await secondOpinion.getAddress(); @@ -62,19 +59,26 @@ describe("Integration: Second opinion", () => { .connect(agentSigner) .grantRole(await oracleReportSanityChecker.SECOND_OPINION_MANAGER_ROLE(), agentSigner.address); - let { beaconBalance } = await lido.getBeaconStat(); + let balanceStats = await lido.getBalanceStats(); + let clBalance = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; // Report initial balances if TVL is zero - if (beaconBalance === 0n) { + if (clBalance === 0n) { await report(ctx, { clDiff: INITIAL_REPORTED_BALANCE, clAppearedValidators: 3n, excludeVaultsBalances: true, }); - beaconBalance = (await lido.getBeaconStat()).beaconBalance; + balanceStats = await lido.getBalanceStats(); + clBalance = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; } - totalSupply = beaconBalance; - await oracleReportSanityChecker.connect(agentSigner).setSecondOpinionOracleAndCLBalanceUpperMargin(soAddress, 74n); + + // Normalize CL decrease window and consume pending deposits to make + // second-opinion checks deterministic across different scratch states. + await resetCLBalanceDecreaseWindow(ctx); + + balanceStats = await lido.getBalanceStats(); + totalSupply = balanceStats.clValidatorsBalanceAtLastReport + balanceStats.clPendingBalanceAtLastReport; }); beforeEach(bailOnFailure); diff --git a/test/integration/core/withdrawal-edge-cases.integration.ts b/test/integration/core/withdrawal-edge-cases.integration.ts index 812b41dfa8..29486c8310 100644 --- a/test/integration/core/withdrawal-edge-cases.integration.ts +++ b/test/integration/core/withdrawal-edge-cases.integration.ts @@ -6,8 +6,17 @@ import { setBalance, time } from "@nomicfoundation/hardhat-network-helpers"; import { Lido, WithdrawalQueueERC721 } from "typechain-types"; -import { ether, findEventsWithInterfaces } from "lib"; -import { finalizeWQViaSubmit, getProtocolContext, ProtocolContext, report } from "lib/protocol"; +import { ether, findEventsWithInterfaces, ONE_GWEI } from "lib"; +import { + finalizeWQViaSubmit, + getProtocolContext, + ProtocolContext, + report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, +} from "lib/protocol"; +import { depositValidatorsWithoutReport } from "lib/protocol/helpers/staking"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; @@ -19,6 +28,102 @@ describe("Integration: Withdrawal edge cases", () => { let holder: HardhatEthersSigner; let lido: Lido; let wq: WithdrawalQueueERC721; + const DEPOSITS_RESERVE_TARGET = ether("25"); + + const assertBufferAllocationInvariants = async () => { + const buffered = await lido.getBufferedEther(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const depositable = await lido.getDepositableEther(); + const unfinalized = await wq.unfinalizedStETH(); + + expect(depositsReserveTarget).to.equal(DEPOSITS_RESERVE_TARGET, "Deposits reserve target mismatch"); + expect(depositsReserve).to.be.lte(buffered, "Deposits reserve should not exceed buffered ether"); + expect(depositsReserve).to.be.lte(depositsReserveTarget, "Deposits reserve should not exceed target"); + expect(depositable).to.equal(buffered - withdrawalsReserve, "Depositable should equal buffered minus reserve"); + expect(withdrawalsReserve).to.be.lte(unfinalized, "Reserve should not exceed unfinalized withdrawals demand"); + expect(withdrawalsReserve).to.be.lte(buffered, "Reserve should not exceed buffered ether"); + }; + + const getModuleAccountingReportParams = async (postCLBalanceWei: bigint) => { + const { stakingRouter } = ctx.contracts; + const stakingModuleIds = await stakingRouter.getStakingModuleIds(); + const modules: { moduleId: bigint; validatorsBalanceGwei: bigint }[] = []; + let totalValidatorsBalanceGwei = 0n; + + for (const moduleId of stakingModuleIds) { + const [validatorsBalanceGwei] = await stakingRouter.getStakingModuleStateAccounting(moduleId); + modules.push({ moduleId, validatorsBalanceGwei }); + totalValidatorsBalanceGwei += validatorsBalanceGwei; + } + + const totalReportedValidatorsBalanceGwei = postCLBalanceWei / ONE_GWEI; + const stakingModuleIdsWithUpdatedBalance: bigint[] = []; + const validatorBalancesGweiByStakingModule: bigint[] = []; + let remainingReportedValidatorsBalanceGwei = totalReportedValidatorsBalanceGwei; + let remainingValidatorsBalanceGwei = totalValidatorsBalanceGwei; + + for (let index = 0; index < modules.length; ++index) { + const { moduleId, validatorsBalanceGwei } = modules[index]; + const isLastModule = index === modules.length - 1; + const reportedValidatorsBalanceGwei = + isLastModule || remainingValidatorsBalanceGwei === 0n + ? remainingReportedValidatorsBalanceGwei + : (remainingReportedValidatorsBalanceGwei * validatorsBalanceGwei) / remainingValidatorsBalanceGwei; + + stakingModuleIdsWithUpdatedBalance.push(moduleId); + validatorBalancesGweiByStakingModule.push(reportedValidatorsBalanceGwei); + + remainingReportedValidatorsBalanceGwei -= reportedValidatorsBalanceGwei; + remainingValidatorsBalanceGwei -= validatorsBalanceGwei; + } + + return { + stakingModuleIdsWithUpdatedBalance, + validatorBalancesGweiByStakingModule, + }; + }; + + const reportWithEffectiveClDiffUsingCurrentModuleBalances = async ( + effectiveClDiff: bigint, + skipWithdrawals = false, + ) => { + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport, depositedSinceLastReport } = + await ctx.contracts.lido.getBalanceStats(); + const postCLBalanceWei = + clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport + depositedSinceLastReport + effectiveClDiff; + + await reportWithEffectiveClDiff(ctx, effectiveClDiff, { + excludeVaultsBalances: true, + skipWithdrawals, + ...(await getModuleAccountingReportParams(postCLBalanceWei)), + }); + }; + + const activateDepositedValidators = async (depositsCount: bigint) => { + await depositValidatorsWithoutReport(ctx, NOR_MODULE_ID, depositsCount); + + const { stakingRouter, lido: lidoContract } = ctx.contracts; + const stakingModuleIds = [...(await stakingRouter.getStakingModuleIds())]; + const { depositedSinceLastReport } = await lidoContract.getBalanceStats(); + const depositedValidatorsBalanceGwei = depositedSinceLastReport / ONE_GWEI; + + await report(ctx, { + clDiff: depositedSinceLastReport, + excludeVaultsBalances: true, + skipWithdrawals: true, + stakingModuleIdsWithUpdatedBalance: stakingModuleIds, + validatorBalancesGweiByStakingModule: await Promise.all( + stakingModuleIds.map(async (moduleId) => { + const [validatorsBalanceGwei] = await stakingRouter.getStakingModuleStateAccounting(moduleId); + return moduleId === NOR_MODULE_ID + ? validatorsBalanceGwei + depositedValidatorsBalanceGwei + : validatorsBalanceGwei; + }), + ), + }); + }; before(async () => { ctx = await getProtocolContext(); @@ -31,6 +136,9 @@ describe("Integration: Withdrawal edge cases", () => { await setBalance(holder.address, ether("1000000")); await finalizeWQViaSubmit(ctx); + + const agent = await ctx.getSigner("agent"); + await lido.connect(agent).setDepositsReserveTarget(DEPOSITS_RESERVE_TARGET); }); after(async () => await Snapshot.restore(snapshot)); @@ -39,6 +147,8 @@ describe("Integration: Withdrawal edge cases", () => { beforeEach(async () => (originalState = await Snapshot.take())); afterEach(async () => await Snapshot.restore(originalState)); it("Should handle bunker mode with multiple batches", async () => { + await resetCLBalanceDecreaseWindow(ctx); + const amount = ether("100"); const withdrawalAmount = ether("10"); @@ -46,10 +156,14 @@ describe("Integration: Withdrawal edge cases", () => { await lido.connect(holder).approve(wq.target, amount); await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); + + await activateDepositedValidators(1n); const stethInitialBalance = await lido.balanceOf(holder.address); - await report(ctx, { clDiff: ether("-1"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-1")); + await assertBufferAllocationInvariants(); const stethFirstNegativeReportBalance = await lido.balanceOf(holder.address); @@ -62,7 +176,8 @@ describe("Integration: Withdrawal edge cases", () => { const [firstRequestEvent] = findEventsWithInterfaces(firstRequestReceipt!, "WithdrawalRequested", [wq.interface]); const firstRequestId = firstRequestEvent!.args.requestId; - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); const stethSecondNegativeReportBalance = await lido.balanceOf(holder.address); @@ -85,7 +200,8 @@ describe("Integration: Withdrawal edge cases", () => { expect(firstStatus.amountOfStETH).to.equal(secondStatus.amountOfStETH); expect(firstStatus.amountOfShares).to.be.lt(secondStatus.amountOfShares); - await report(ctx, { clDiff: ether("0.0001"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; @@ -96,7 +212,6 @@ describe("Integration: Withdrawal edge cases", () => { const lastCheckpointIndex = await wq.getLastCheckpointIndex(); const hints = await wq.findCheckpointHints([...requestIds], 1, lastCheckpointIndex); - const claimTx = await wq.connect(holder).claimWithdrawals([...requestIds], [...hints]); const claimReceipt = await claimTx.wait(); @@ -119,8 +234,11 @@ describe("Integration: Withdrawal edge cases", () => { // Submit initial stETH deposit await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true }); + await activateDepositedValidators(3n); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.001")); + await assertBufferAllocationInvariants(); // Create withdrawal request await lido.connect(holder).approve(wq.target, amount); @@ -141,7 +259,8 @@ describe("Integration: Withdrawal edge cases", () => { expect(status.isFinalized).to.be.false; // Submit next report to finalize request - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.001")); + await assertBufferAllocationInvariants(); // Verify request finalized const [finalizedStatus] = await wq.getWithdrawalStatus([...requestIds]); @@ -168,15 +287,20 @@ describe("Integration: Withdrawal edge cases", () => { after(async () => await Snapshot.restore(originalState)); it("should handle first rebase correctly", async () => { + await resetCLBalanceDecreaseWindow(ctx); + const amount = ether("100"); expect(await lido.balanceOf(holder.address)).to.equal(0); await lido.connect(holder).approve(wq.target, amount); await lido.connect(holder).submit(ethers.ZeroAddress, { value: amount }); + await assertBufferAllocationInvariants(); // First rebase - positive - await report(ctx, { clDiff: ether("0.001"), excludeVaultsBalances: true }); + await activateDepositedValidators(1n); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0000001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; // Create first withdrawal request @@ -188,7 +312,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle second (negative) rebase correctly", async () => { // Second rebase - negative - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.true; // Verify first request finalized @@ -206,7 +331,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle third (negative) rebase correctly", async () => { // Third rebase - negative - await report(ctx, { clDiff: ether("-0.1"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("-0.1")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.true; // Create third withdrawal request @@ -218,7 +344,8 @@ describe("Integration: Withdrawal edge cases", () => { it("should handle fourth (positive) rebase correctly", async () => { // Fourth rebase - positive - await report(ctx, { clDiff: ether("0.0000001"), excludeVaultsBalances: true }); + await reportWithEffectiveClDiffUsingCurrentModuleBalances(ether("0.0000001")); + await assertBufferAllocationInvariants(); expect(await wq.isBunkerModeActive()).to.be.false; // Verify all requests finalized @@ -237,9 +364,13 @@ describe("Integration: Withdrawal edge cases", () => { // Verify claimed amounts const claimEvents = findEventsWithInterfaces(claimReceipt!, "WithdrawalClaimed", [wq.interface]); - expect(claimEvents![0].args.amountOfETH).to.be.lt(withdrawalAmount); - expect(claimEvents![1].args.amountOfETH).to.be.lt(withdrawalAmount); - expect(claimEvents![2].args.amountOfETH).to.equal(withdrawalAmount); + const firstClaimed = claimEvents![0].args.amountOfETH; + const secondClaimed = claimEvents![1].args.amountOfETH; + const thirdClaimed = claimEvents![2].args.amountOfETH; + + expect(firstClaimed).to.be.lt(withdrawalAmount); + expect(secondClaimed).to.be.lt(withdrawalAmount); + expect(thirdClaimed).to.equal(withdrawalAmount); }); }); }); diff --git a/test/integration/core/withdrawal-happy-path.integration.ts b/test/integration/core/withdrawal-happy-path.integration.ts index 94cb5fed9b..467282b63f 100644 --- a/test/integration/core/withdrawal-happy-path.integration.ts +++ b/test/integration/core/withdrawal-happy-path.integration.ts @@ -42,6 +42,10 @@ describe("Integration: Withdrawal happy path", () => { .grantPermission(agentSigner.address, lido.address, await lido.STAKING_CONTROL_ROLE()); await lido.connect(agentSigner).removeStakingLimit(); await lido.connect(holder).submit(ethers.ZeroAddress, { value: ether("10000") }); + await lido.connect(agentSigner).setDepositsReserveTarget(ether("100")); + await report(ctx, { clDiff: 0n, excludeVaultsBalances: true, reportBurner: false, skipWithdrawals: true }); + expect(await lido.getDepositsReserveTarget()).to.equal(ether("100")); + expect(await lido.getDepositsReserve()).to.equal(ether("100")); expect(await lido.balanceOf(holder.address)).to.be.gte(REQUESTS_SUM); // Get initial state @@ -53,6 +57,18 @@ describe("Integration: Withdrawal happy path", () => { const lastFinalizedRequestId = await wq.getLastFinalizedRequestId(); const lastCheckpointIndexBefore = await wq.getLastCheckpointIndex(); const unfinalizedSteth = await wq.unfinalizedStETH(); + const bufferedEtherBeforeRequest = await lido.getBufferedEther(); + const withdrawalsReserveBeforeRequest = await lido.getWithdrawalsReserve(); + const depositableBeforeRequest = await lido.getDepositableEther(); + + expect(depositableBeforeRequest).to.equal( + bufferedEtherBeforeRequest - withdrawalsReserveBeforeRequest, + "Depositable should equal buffered minus withdrawals reserve", + ); + expect(withdrawalsReserveBeforeRequest).to.be.lte( + unfinalizedSteth, + "Withdrawals reserve should not exceed unfinalized demand", + ); const preReportRequestShares = await lido.getSharesByPooledEth(REQUEST_AMOUNT); @@ -115,6 +131,14 @@ describe("Integration: Withdrawal happy path", () => { reportTx = (await report(ctx, { clDiff: ether("0.00000000000001") })).reportTx; } + const bufferedEtherAfterReport = await lido.getBufferedEther(); + const withdrawalsReserveAfterReport = await lido.getWithdrawalsReserve(); + const depositableAfterReport = await lido.getDepositableEther(); + expect(depositableAfterReport).to.equal( + bufferedEtherAfterReport - withdrawalsReserveAfterReport, + "Depositable should stay consistent after report processing", + ); + const [parsedFinalizedEvent] = findEventsWithInterfaces(reportReceipt!, "WithdrawalsFinalized", [wq.interface]); expect(parsedFinalizedEvent?.args.from).to.equal(lastFinalizedRequestId + 1n); expect(parsedFinalizedEvent?.args.to).to.equal(REQUESTS_COUNT + lastRequestId); diff --git a/test/integration/validators-exit-bus-submit-and-trigger-exits.ts b/test/integration/validators-exit-bus-submit-and-trigger-exits.ts index 2c107987ec..8d272714ff 100644 --- a/test/integration/validators-exit-bus-submit-and-trigger-exits.ts +++ b/test/integration/validators-exit-bus-submit-and-trigger-exits.ts @@ -45,7 +45,6 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { let refundRecipient: HardhatEthersSigner; const dataFormat = 1; - const exitRequestsLength = 5; const validatorsExitRequests: ExitRequest[] = [ { moduleId: 1, nodeOpId: 10, valIndex: 100, valPubkey: "0x" + "11".repeat(48) }, @@ -120,7 +119,8 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { it("should submit hash and data if veb is resumed", async () => { // Configure exit requests limits - const MAX_LIMIT = 100; + // Set a high enough balance limit (in ETH) to cover test requests + const MAX_LIMIT = 1_000_000; await veb.connect(agent).setExitRequestLimit(MAX_LIMIT, 1, 48); // Resume the contract await veb.connect(resumer).resume(); @@ -130,6 +130,7 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { .to.emit(veb, "RequestsHashSubmitted") .withArgs(exitRequestsHash); + const limitBefore = await veb.getExitRequestLimitFullInfo(); const tx = await veb.submitExitRequestsData(exitRequest); const receipt = await tx.wait(); const block = await receipt?.getBlock(); @@ -148,7 +149,8 @@ describe("Scenario: ValidatorsExitBus Submit and Trigger Exits", () => { // check limit const exitLimitInfo = await veb.getExitRequestLimitFullInfo(); const currentExitRequestsLimit = exitLimitInfo[4]; - expect(currentExitRequestsLimit).to.equal(MAX_LIMIT - exitRequestsLength); + // Limit should decrease after processing + expect(currentExitRequestsLimit).to.be.lessThan(limitBefore[4]); }); it("should trigger exits", async () => { diff --git a/test/integration/vaults/bad-debt.integration.ts b/test/integration/vaults/bad-debt.integration.ts index 6d763a96f5..ea07a1be3c 100644 --- a/test/integration/vaults/bad-debt.integration.ts +++ b/test/integration/vaults/bad-debt.integration.ts @@ -701,11 +701,12 @@ describe("Integration: Vault with bad debt", () => { expect(await vaultHub.badDebtToInternalize()).to.be.equal(badDebtShares, "Bad debt to internalize is the same"); // simulate the report at the refSlot (like the Oracle would do) - const { beaconValidators, beaconBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const simulationAtRefSlot = await simulateReport(ctx, { refSlot: nextRefSlot, - beaconValidators, - clBalance: beaconBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, }); @@ -717,8 +718,8 @@ describe("Integration: Vault with bad debt", () => { expect( await simulateReport(ctx, { refSlot: (await hashConsensus.getCurrentFrame()).refSlot, - beaconValidators, - clBalance: beaconBalance, + clValidatorsBalance: clBalance, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, }), diff --git a/test/integration/vaults/roles/accounting.roles.integration.ts b/test/integration/vaults/roles/accounting.roles.integration.ts index 3590749265..4bb743fcf6 100644 --- a/test/integration/vaults/roles/accounting.roles.integration.ts +++ b/test/integration/vaults/roles/accounting.roles.integration.ts @@ -45,8 +45,8 @@ describe("Integration: Accounting Roles and Access Control", () => { const report = { timestamp: 0n, timeElapsed: 0n, - clValidators: 0n, - clBalance: 0n, + clValidatorsBalance: 0n, + clPendingBalance: 0n, withdrawalVaultBalance: 0n, elRewardsVaultBalance: 0n, sharesRequestedToBurn: 0n, diff --git a/test/integration/vaults/sanity-checker-bad-debt.integration.ts b/test/integration/vaults/sanity-checker-bad-debt.integration.ts index d8c1fb8e34..74d9fec534 100644 --- a/test/integration/vaults/sanity-checker-bad-debt.integration.ts +++ b/test/integration/vaults/sanity-checker-bad-debt.integration.ts @@ -5,18 +5,22 @@ import { ethers } from "hardhat"; import { HardhatEthersSigner } from "@nomicfoundation/hardhat-ethers/signers"; import { setBalance } from "@nomicfoundation/hardhat-network-helpers"; -import { advanceChainTime, ether, impersonate, LIMITER_PRECISION_BASE } from "lib"; +import { ether, impersonate, LIMITER_PRECISION_BASE, ONE_GWEI } from "lib"; import { + getNextReportContext, getProtocolContext, ProtocolContext, queueBadDebtInternalization, removeStakingLimit, report, + reportWithEffectiveClDiff, + resetCLBalanceDecreaseWindow, + seedProtocolPendingBaseline, setupLidoForVaults, setupVaultWithBadDebt, upDefaultTierShareLimit, - waitNextAvailableReportTime, } from "lib/protocol"; +import { NOR_MODULE_ID } from "lib/protocol/helpers/staking-module"; import { Snapshot } from "test/suite"; import { SHARE_RATE_PRECISION } from "test/suite/constants"; @@ -268,17 +272,17 @@ describe("Integration: Sanity checker with bad debt internalization", () => { describe("CL balance decrease check with bad debt internalization", () => { it("Small CL balance decrease", async () => { + await resetCLBalanceDecreaseWindow(ctx, { waitNextReportTime: true }); + const stateBefore = await captureState(); // Queue bad debt internalization - const { stakingVault, badDebtShares } = await setupVaultWithBadDebt(ctx, owner, nodeOperator); - await queueBadDebtInternalization(ctx, stakingVault, badDebtShares); + await setupVaultWithBadDebt(ctx, owner, nodeOperator); // Small negative CL diff (within allowed limits) const smallDecrease = ether("-1"); - await report(ctx, { - clDiff: smallDecrease, + await reportWithEffectiveClDiff(ctx, smallDecrease, { excludeVaultsBalances: true, skipWithdrawals: true, waitNextReportTime: true, @@ -294,26 +298,16 @@ describe("Integration: Sanity checker with bad debt internalization", () => { // Bad debt internalization does not affect calculation of dynamic slashing limit // so the report with max allowed CL decrease should still pass with bad debt internalization - const { oracleReportSanityChecker, lido, stakingRouter } = ctx.contracts; + const { oracleReportSanityChecker, lido } = ctx.contracts; - // Time travel to 54 days to invalidate all current penalties and get max slashing limits - const DAYS_54_IN_SECONDS = 54n * 24n * 60n * 60n; - await advanceChainTime(DAYS_54_IN_SECONDS); + // Submit a neutral report to establish a fresh CL balance baseline await report(ctx); // Get current protocol state to calculate dynamic slashing limit - const { beaconValidators } = await lido.getBeaconStat(); - const moduleDigests = await stakingRouter.getAllStakingModuleDigests(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); + const preCLBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const limits = await oracleReportSanityChecker.getOracleReportLimits(); - - const exitedValidators = moduleDigests.reduce((total, { summary }) => total + summary.totalExitedValidators, 0n); - const activeValidators = beaconValidators - exitedValidators; - - // maxAllowedCLRebaseNegativeSum = initialSlashingAmountPWei * 1e15 * validators + inactivityPenaltiesAmountPWei * 1e15 * validators - const ONE_PWEI = 10n ** 15n; - const maxAllowedNegativeRebase = - limits.initialSlashingAmountPWei * ONE_PWEI * activeValidators + - limits.inactivityPenaltiesAmountPWei * ONE_PWEI * activeValidators; + const maxAllowedNegativeRebase = (preCLBalance * limits.maxCLBalanceDecreaseBP) / 10_000n; // CL decrease exactly at limit minus 1 wei should pass const clSlashing = -(maxAllowedNegativeRebase - 1n); @@ -346,56 +340,49 @@ describe("Integration: Sanity checker with bad debt internalization", () => { describe("Annual balance increase check with bad debt internalization", () => { it("CL balance increase over limit reverts, bad debt does not compensate", async () => { - // Bad debt internalization does not affect CL balance increase check - // so even with bad debt queued, the report exceeding limit should revert + // Bad debt internalization does not affect positive CL growth checks, + // so even with bad debt queued, a report exceeding the activated-pending + // plus validators-based safety cap should revert. + + const { oracleReportSanityChecker, lido } = ctx.contracts; - const { oracleReportSanityChecker, lido, accountingOracle, hashConsensus } = ctx.contracts; + await seedProtocolPendingBaseline(ctx, NOR_MODULE_ID); const { stakingVault, badDebtShares } = await setupVaultWithBadDebt(ctx, owner, nodeOperator); await queueBadDebtInternalization(ctx, stakingVault, badDebtShares); - await waitNextAvailableReportTime(ctx); // Get current protocol state - const { beaconBalance: preCLBalance } = await lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = await lido.getBalanceStats(); const { annualBalanceIncreaseBPLimit } = await oracleReportSanityChecker.getOracleReportLimits(); - const { secondsPerSlot } = await hashConsensus.getChainConfig(); - const { currentFrameRefSlot } = await accountingOracle.getProcessingState(); - const lastRefSlot = await accountingOracle.getLastProcessingRefSlot(); - const slotElapsed = currentFrameRefSlot - lastRefSlot; - - expect(slotElapsed).to.be.gt(0n, "Some slots should have elapsed since last report"); - - // Calculate time elapsed for one frame - const timeElapsed = slotElapsed * secondsPerSlot; - - // Calculate balance increase that exceeds the limit - // The check is: (365 days * 10000 * balanceIncrease / preCLBalance) / timeElapsed > limit - // Solving : balanceIncrease > ((limit + 1) * preCLBalance * timeElapsed - 1) / (365 days * 10000) + const { reportTimeElapsed } = await getNextReportContext(ctx); const SECONDS_PER_YEAR = 365n * 24n * 60n * 60n; const MAX_BASIS_POINTS = 10000n; const maxBalanceIncrease = - ((annualBalanceIncreaseBPLimit + 1n) * preCLBalance * timeElapsed - 1n) / (SECONDS_PER_YEAR * MAX_BASIS_POINTS); + ((annualBalanceIncreaseBPLimit * clValidatorsBalanceAtLastReport * reportTimeElapsed) / + (SECONDS_PER_YEAR * MAX_BASIS_POINTS) / + ONE_GWEI) * + ONE_GWEI; const stateBefore = await captureState(); expect(stateBefore.badDebtToInternalize).to.equal(badDebtShares, "Bad debt should be queued"); - // Report should revert - CL increase exceeds the limit - // Bad debt being queued does NOT compensate for the excess + // `report()` consumes the seeded pending baseline inside the same report, so the + // raw CL delta under test is just the validators-based safety-cap component. + // Bad debt still must not compensate an over-limit report. + expect(clPendingBalanceAtLastReport).to.be.gt(0n, "test precondition failed: pending baseline must be non-zero"); await expect( report(ctx, { - clDiff: maxBalanceIncrease + 10n ** 9n, // + 1 gwei to exceed limit + clDiff: maxBalanceIncrease + ONE_GWEI, excludeVaultsBalances: true, skipWithdrawals: true, - waitNextReportTime: false, }), - ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectCLBalanceIncrease"); + ).to.be.revertedWithCustomError(oracleReportSanityChecker, "IncorrectTotalCLBalanceIncrease"); - // Now report exactly at the limit. Should pass despite bad debt internalization + // Report exactly at the limit should pass despite bad debt internalization await report(ctx, { clDiff: maxBalanceIncrease, excludeVaultsBalances: true, skipWithdrawals: true, - waitNextReportTime: false, }); const stateAfter = await captureState(); diff --git a/test/integration/vaults/scenario/happy-path.integration.ts b/test/integration/vaults/scenario/happy-path.integration.ts index 85e39d69ed..6e5977967e 100644 --- a/test/integration/vaults/scenario/happy-path.integration.ts +++ b/test/integration/vaults/scenario/happy-path.integration.ts @@ -28,6 +28,7 @@ import { ProtocolContext, report, reportVaultDataWithProof, + reportWithEffectiveClDiff, setupLidoForVaults, } from "lib/protocol"; @@ -89,13 +90,15 @@ describe("Scenario: Staking Vaults Happy Path", () => { beforeEach(bailOnFailure); async function calculateReportParams() { - const { beaconBalance } = await ctx.contracts.lido.getBeaconStat(); + const { clValidatorsBalanceAtLastReport, clPendingBalanceAtLastReport } = + await ctx.contracts.lido.getBalanceStats(); + const clBalance = clValidatorsBalanceAtLastReport + clPendingBalanceAtLastReport; const { timeElapsed } = await getReportTimeElapsed(ctx); log.debug("Report time elapsed", { timeElapsed }); const gross = (TARGET_APR * TOTAL_BASIS_POINTS) / (TOTAL_BASIS_POINTS - PROTOCOL_FEE); // take into account 10% Lido fee - const elapsedProtocolReward = (beaconBalance * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; + const elapsedProtocolReward = (clBalance * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; const elapsedVaultReward = (VAULT_DEPOSIT * gross * timeElapsed) / TOTAL_BASIS_POINTS / ONE_YEAR; log.debug("Report values", { @@ -320,12 +323,9 @@ describe("Scenario: Staking Vaults Happy Path", () => { const { elapsedProtocolReward, elapsedVaultReward } = await calculateReportParams(); const vaultValue = await addRewards(elapsedVaultReward); - const params = { - clDiff: elapsedProtocolReward, + await reportWithEffectiveClDiff(ctx, elapsedProtocolReward, { excludeVaultsBalances: true, - } as OracleReportParams; - - await report(ctx, params); + }); expect(await vaultHub.liabilityShares(stakingVaultAddress)).to.be.equal(stakingVaultMaxMintingShares); @@ -376,14 +376,16 @@ describe("Scenario: Staking Vaults Happy Path", () => { await lido.connect(owner).approve(dashboard, await lido.getPooledEthByShares(stakingVaultMaxMintingShares)); await dashboard.connect(owner).burnShares(stakingVaultMaxMintingShares); - const { elapsedProtocolReward, elapsedVaultReward } = await calculateReportParams(); + const { elapsedVaultReward } = await calculateReportParams(); const vaultValue = await addRewards(elapsedVaultReward / 2n); // Half the vault rewards value after validator exit const params = { - clDiff: elapsedProtocolReward, excludeVaultsBalances: true, } as OracleReportParams; + // This test is about burn -> zero liability shares on the next vault report, not + // about a protocol CL reward. Keep the follow-up report neutral so we don't add + // an unrelated pending-backed APR setup to a burn-flow assertion. await report(ctx, params); await reportVaultDataWithProof(ctx, stakingVault, { totalValue: vaultValue }); diff --git a/test/integration/vaults/withdrawals-bad-debt.integration.ts b/test/integration/vaults/withdrawals-bad-debt.integration.ts index 4896efc4f6..2b864903cc 100644 --- a/test/integration/vaults/withdrawals-bad-debt.integration.ts +++ b/test/integration/vaults/withdrawals-bad-debt.integration.ts @@ -29,6 +29,7 @@ describe("Integration: Withdrawals finalization with bad debt internalization", let owner: HardhatEthersSigner; let nodeOperator: HardhatEthersSigner; let stranger: HardhatEthersSigner; + const DEPOSITS_RESERVE_TARGET = ether("100"); // Helper to capture protocol state const captureState = async () => { @@ -41,6 +42,11 @@ describe("Integration: Withdrawals finalization with bad debt internalization", const internalEther = totalPooledEther - externalEther; const internalShares = totalShares - externalShares; const unfinalizedSTETH = await withdrawalQueue.unfinalizedStETH(); + const depositsReserveTarget = await lido.getDepositsReserveTarget(); + const depositsReserve = await lido.getDepositsReserve(); + const withdrawalsReserve = await lido.getWithdrawalsReserve(); + const bufferedEther = await lido.getBufferedEther(); + const depositableEther = await lido.getDepositableEther(); const unfinalizedRequestNumber = await withdrawalQueue.unfinalizedRequestNumber(); const lastFinalizedRequestId = await withdrawalQueue.getLastFinalizedRequestId(); const badDebtToInternalize = await vaultHub.badDebtToInternalize(); @@ -61,7 +67,12 @@ describe("Integration: Withdrawals finalization with bad debt internalization", elRewardsVaultBalance, withdrawalVaultBalance, withdrawalQueueBalance, + depositsReserveTarget, + depositsReserve, unfinalizedSTETH, + withdrawalsReserve, + bufferedEther, + depositableEther, unfinalizedRequestNumber, lastFinalizedRequestId, shareRate: totalShares > 0n ? (totalPooledEther * SHARE_RATE_PRECISION) / totalShares : 0n, @@ -197,6 +208,26 @@ describe("Integration: Withdrawals finalization with bad debt internalization", const finalizedEvent = events[0]; const stateAfter = await captureState(); + expect(stateAfter.depositableEther).to.equal( + stateAfter.bufferedEther - stateAfter.withdrawalsReserve, + "Depositable should equal buffered minus withdrawals reserve after report", + ); + expect(stateAfter.depositsReserveTarget).to.equal( + DEPOSITS_RESERVE_TARGET, + "Deposits reserve target mismatch after report", + ); + expect(stateAfter.depositsReserve).to.be.lte( + stateAfter.depositsReserveTarget, + "Deposits reserve should not exceed target after report", + ); + + const [, , amountOfETHLocked] = finalizedEvent.args; + const availableEthForFinalization = + stateBefore.withdrawalVaultBalance + stateBefore.elRewardsVaultBalance + stateBefore.withdrawalsReserve; + expect(amountOfETHLocked).to.be.lte( + availableEthForFinalization, + "Finalization should be bounded by vault balances plus withdrawals reserve", + ); return { reportTx, finalizedEvent, stateBefore, stateAfter }; }; @@ -227,6 +258,8 @@ describe("Integration: Withdrawals finalization with bad debt internalization", .connect(agent) .grantRole(await oracleReportSanityChecker.MAX_POSITIVE_TOKEN_REBASE_MANAGER_ROLE(), agent); await oracleReportSanityChecker.connect(agent).setMaxPositiveTokenRebase(maxPositiveTokenRebase); + + await lido.connect(agent).setDepositsReserveTarget(DEPOSITS_RESERVE_TARGET); }); beforeEach(async () => (snapshot = await Snapshot.take())); diff --git a/test/suite/constants.ts b/test/suite/constants.ts index 8d621fd694..979f10374a 100644 --- a/test/suite/constants.ts +++ b/test/suite/constants.ts @@ -1,8 +1,11 @@ +import { MAX_EFFECTIVE_BALANCE_WC_TYPE_01 } from "lib"; + export const ONE_HOUR = 60n * 60n; export const ONE_DAY = 24n * 60n * 60n; export const MAX_BASIS_POINTS = 100_00n; export const MAX_DEPOSIT = 150n; +export const MAX_DEPOSIT_AMOUNT = MAX_DEPOSIT * MAX_EFFECTIVE_BALANCE_WC_TYPE_01; // 150 * 32 ETH export const CURATED_MODULE_ID = 1n; export const SIMPLE_DVT_MODULE_ID = 2n;