diff --git a/.github/workflows/regression-test.yml b/.github/workflows/regression-test.yml new file mode 100644 index 00000000..473ea4c3 --- /dev/null +++ b/.github/workflows/regression-test.yml @@ -0,0 +1,79 @@ +name: Regression Test CI +run-name: CI triggered from @${{ github.actor }} of ${{ github.head_ref }} + +on: + workflow_dispatch: + +# Cancel in-progress jobs except for integration branch +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: ${{ !contains(github.ref, 'integration/')}} + +jobs: + regression_test: + runs-on: ubuntu-24.04 + + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive + + # Install wscat for finality node smoke test to listen to sequencer feed + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: "16" + + - name: Install wscat + run: npm install -g wscat + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + version: nightly + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: network=host + + # Explicitly building the docker images because it's not clear how we can + # use the github action (gha) cache if we let docker compose build the images. + - uses: docker/build-push-action@v5 + with: + context: ./rollupcreator + cache-from: type=gha + cache-to: type=gha,mode=max + # Use default branch from test-node.bash + build-args: | + NITRO_CONTRACTS_BRANCH=develop + + - uses: docker/build-push-action@v5 + with: + context: ./scripts + cache-from: type=gha + cache-to: type=gha,mode=max + + - uses: docker/build-push-action@v5 + with: + context: ./mock-sequencer + cache-from: type=gha + cache-to: type=gha,mode=max + + - uses: docker/build-push-action@v5 + with: + context: ./tokenbridge + cache-from: type=gha + cache-to: type=gha,mode=max + # Use default branch from test-node.bash + build-args: | + TOKEN_BRIDGE_BRANCH=v1.2.2 + + - name: Regression Test with Latest Espresso Image + run: ./regression-test.bash + + - name: Dump docker logs on failure + if: failure() + run: | + docker compose logs diff --git a/docker-compose.yaml b/docker-compose.yaml index 1e141ea1..6a98db09 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -244,6 +244,19 @@ services: retries: 5 start_period: 40s + mock-sequencer: + pid: host + build: mock-sequencer/ + environment: + - REMOTE_WS_URL=ws://sequencer:9642 + - HTTP_PORT=10000 + ports: + - "127.0.0.1:10000:10000" + - "127.0.0.1:9643:9642" + depends_on: + - sequencer + - espresso-dev-node + sequencer-on-espresso: pid: host # allow debugging image: espresso-integration-testnode @@ -498,7 +511,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock espresso-dev-node: - image: ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:20250428-dev-node-decaf-pos + image: ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:20250710 ports: - "$ESPRESSO_SEQUENCER_API_PORT:$ESPRESSO_SEQUENCER_API_PORT" - "$ESPRESSO_BUILDER_PORT:$ESPRESSO_BUILDER_PORT" diff --git a/espresso-tests/.env b/espresso-tests/.env index 713de903..132f8d22 100644 --- a/espresso-tests/.env +++ b/espresso-tests/.env @@ -7,14 +7,8 @@ export PARENT_CHAIN_RPC_URL="http://localhost:8545" export CHILD_CHAIN_RPC_URL="http://localhost:8547" # Environment variables for new OSP deployment # These are essential for the upgrade -export PARENT_CHAIN_UPGRADE_EXECUTOR="0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7" -export PARENT_UPGRADE_EXECUTOR_ADDRESS="0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7" -export CHILD_CHAIN_UPGRADE_EXUCTOR_ADDRESS="0xD59870177729b1Fa7CCdA1d2E245C57C6ad5F9F6" -# Environment variables for osp migration action contract -export ROLLUP_ADDRESS="0x1b836843Ef0B1731fea7C69d7d3847327DD137c2" -export PROXY_ADMIN_ADDRESS="0x2A1f38c9097e7883570e0b02BFBE6869Cc25d8a3" -# Environment variables for ArbOS upgrade action. -export UPGRADE_TIMESTAMP="1723664126" +CHILD_CHAIN_UPGRADE_EXUCTOR_ADDRESS="0xD59870177729b1Fa7CCdA1d2E245C57C6ad5F9F6" +UPGRADE_TIMESTAMP="1723664126" # The reader addr is only important if the parent chain is not an arbitrum chain, this is important for the batch poster. export READER_ADDRESS="0x7DD3F2a3fAeF3B9F2364c335163244D3388Feb83" @@ -25,6 +19,4 @@ export OLD_BATCH_POSTER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" export NEW_BATCH_POSTER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" export BATCH_POSTER_MANAGER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" export PARENT_CHAIN_IS_ARBITRUM="false" -export ESPRESSO_TEE_VERIFIER_ADDRESS="0x165155D6aBB370Cb10ad1bF835e723F662d51C86" -export INBOX_ADDRESS="0x9f8c1c641336A371031499e3c362e40d58d0f254" -export TARGET_WASM_MODULE_ROOT="0xe81f986823a85105c5fd91bb53b4493d38c0c26652d23f76a7405ac889908287" +DELAY_BUFFERABLE=false diff --git a/espresso-tests/create-espresso-integrated-nitro-node.bash b/espresso-tests/create-espresso-integrated-nitro-node.bash index c8a73bc9..aa3b99ac 100755 --- a/espresso-tests/create-espresso-integrated-nitro-node.bash +++ b/espresso-tests/create-espresso-integrated-nitro-node.bash @@ -8,7 +8,7 @@ espresso=true simpleWithValidator=false # docker pull and tag the espresso integration nitro node. -docker pull $ESPRESSO_VERSION +docker pull $ESPRESSO_VERSION --platform linux/amd64 docker tag $ESPRESSO_VERSION espresso-integration-testnode diff --git a/espresso-tests/migration-test.bash b/espresso-tests/migration-test.bash index a390e47a..bca8729a 100755 --- a/espresso-tests/migration-test.bash +++ b/espresso-tests/migration-test.bash @@ -10,28 +10,87 @@ set -x # print each command before executing it, for debugging +function forge3 { + forge $@ --root espresso-migration-3.1.0 +} + + # Find directory of this script, the project, and the orbit-actions submodule TEST_DIR="$(dirname $(readlink -f $0))" +TEST_SCRIPT_DIR="v2.1.3-migration" +TESTNODE_LOG_FILE=$(mktemp -t nitro-test-node-logs-XXXXXXXX) +ESPRESSO_DEVNODE_LOG_FILE=$(mktemp -t espresso-dev-node-logs-XXXXXXXX) TESTNODE_DIR="$(dirname "$TEST_DIR")" ORBIT_ACTIONS_DIR="$TESTNODE_DIR/orbit-actions" +ORBIT_MIGRATION_ACTION_DIR="contracts/parent-chain/espresso-migration/" +ENV_FILE="$TEST_DIR/.env" +# Hide docker compose warnings about orphaned containers. +export COMPOSE_IGNORE_ORPHANS=true + +v3=false +forge=forge +ESPRESSO_NITRO_CONTRACTS_BRANCH=v2.1.3-8e58a9a +# This commit matches v2.1.0 release of nitro-contracts, with additional support to set arb owner through upgrade executor +NITRO_CONTRACTS_BRANCH="99c07a7db2fcce75b751c5a2bd4936e898cda065" +BROADCAST_DIR="broadcast" +PROXY_ADMIN_ADDRESS="0x2A1f38c9097e7883570e0b02BFBE6869Cc25d8a3" +if [[ ${VERSION:-2} == "3" ]]; then + info "Using v3 migration scripts" + v3=true + TEST_SCRIPT_DIR="v3.1.0-migration" + ESPRESSO_NITRO_CONTRACTS_BRANCH=develop + ORBIT_MIGRATION_ACTION_DIR="espresso-migration-3.1.0/" + BROADCAST_DIR="espresso-migration-3.1.0/broadcast" + NITRO_CONTRACTS_BRANCH="v3.1.0" + PROXY_ADMIN_ADDRESS="0x275FC51309e5928Cb085b463ADEF5cbD45c76b62" + + forge="forge3" +else + info "Using v2 migration scripts" +fi + +info Ensuring docker compose project is stopped +run docker compose down -v --remove-orphans # Change to orbit actions directory, update the submodule, and install any dependencies for the purposes of the test. cd "$ORBIT_ACTIONS_DIR" -git submodule update --init -forge update -yarn +info "Ensuring submodules are checked out" +run git submodule update --init --recursive + +info "Ensuring nodejs dependencies are installed" +run yarn + +info "Ensuring we can compile the migration smart contracts" +run $forge build # Change to the top level directory for the purposes of the test. cd "$TESTNODE_DIR" -# Initialize a standard network not compatible with espresso to simulate a pre-upgrade orbit network e.g. not needed for the real migration -./test-node.bash --simple --init-force --tokenbridge --detach --no-build-utils +# NOTE: the test-node.bash script (or potentially docker compose) does not work +# well with the `fmt` utility function and hangs at the end. I don't know why. +# Furthermore the long warning lines don't work with the `fmt` function but I +# can't work out a way to be able to filter the lines (e. g. grep -v WARN) and +# still have the output show up. + +info Deploying a vanilla Nitro stack locally, to be migrated to Espresso later. +emph ./test-node.bash --simple --init-force --tokenbridge --detach +if [ "$DEBUG" = "true" ]; then + ./test-node.bash --simple --init-force --tokenbridge --detach +else + info "This command starts up an entire Nitro stack. It takes a long time." + info "Run \`tail -f $TESTNODE_LOG_FILE\` to see logs, if necessary." + echo + ./test-node.bash --simple --init-force --tokenbridge --detach > "$TESTNODE_LOG_FILE" 2>&1 +fi # Start espresso sequencer node for the purposes of the test e.g. not needed for the real migration. docker compose up espresso-dev-node --detach -# Export environment variables in .env file +info "Waiting for espresso dev node to start" +sleep 200 + +info "Load environment variables in $ENV_FILE" # A similar env file should be supplied for whatever . "$TEST_DIR/.env" @@ -41,8 +100,15 @@ ROLLUP_ADDRESS=$(docker compose run --entrypoint cat scripts /config/deployed_ch # A convoluted way to get the address of the child chain upgrade executor, maybe there's a better way? # These steps below are just for the purposes of the test. In a real deployment operators will likely already know their child-chain's upgrade executor address, and it should be included in a .env file for the migration run. -INBOX_ADDRESS=$(docker compose run --entrypoint cat scripts /config/deployed_chain_info.json | jq -r '.[0].rollup.inbox' | tail -n 1 | tr -d '\r\n') -L1_TOKEN_BRIDGE_CREATOR_ADDRESS=$(docker compose run --entrypoint cat scripts /tokenbridge-data/network.json | jq -r '.l1TokenBridgeCreator' | tail -n 1 | tr -d '\r\n') +INBOX_ADDRESS=$(get-addr /config/deployed_chain_info.json '.[0].rollup.inbox') +declare -p INBOX_ADDRESS + +PARENT_CHAIN_UPGRADE_EXECUTOR=$(get-addr /config/deployed_chain_info.json '.[0].rollup["upgrade-executor"]') +declare -p PARENT_CHAIN_UPGRADE_EXECUTOR + +L1_TOKEN_BRIDGE_CREATOR_ADDRESS=$(get-addr /tokenbridge-data/network.json '.l1TokenBridgeCreator') +declare -p L1_TOKEN_BRIDGE_CREATOR_ADDRESS + CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS=$(cast call $L1_TOKEN_BRIDGE_CREATOR_ADDRESS 'inboxToL2Deployment(address)(address,address,address,address,address,address,address,address,address)' $INBOX_ADDRESS | tail -n 2 | head -n 1 | tr -d '\r\n') # Export l2 owner private key and address @@ -51,23 +117,26 @@ CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS=$(cast call $L1_TOKEN_BRIDGE_CREATOR_ADDRES PRIVATE_KEY="$(docker compose run scripts print-private-key --account l2owner | tail -n 1 | tr -d '\r\n')" OWNER_ADDRESS="$(docker compose run scripts print-address --account l2owner | tail -n 1 | tr -d '\r\n')" +info "Disabling validator whitelist" +run cast send $PARENT_CHAIN_UPGRADE_EXECUTOR "executeCall(address,bytes)" $ROLLUP_ADDRESS "$(cast calldata 'setValidatorWhitelistDisabled(bool)' true)" --rpc-url $PARENT_CHAIN_RPC_URL --private-key $PRIVATE_KEY + cd $ORBIT_ACTIONS_DIR -forge update -echo "Deploying mock espresso tee verifier" -forge script --chain $PARENT_CHAIN_CHAIN_ID ../espresso-tests/DeployMockVerifier.s.sol:DeployMockVerifier --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv +info "Deploying mock espresso TEE verifier" +run $forge script --chain $PARENT_CHAIN_CHAIN_ID ../espresso-tests/$TEST_SCRIPT_DIR/DeployMockVerifier.s.sol:DeployMockVerifier --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv -ESPRESSO_TEE_VERIFIER_ADDRESS=$(cat broadcast/DeployMockVerifier.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum) -echo "Mock TEE Address:" -echo $ESPRESSO_TEE_VERIFIER_ADDRESS +ESPRESSO_TEE_VERIFIER_ADDRESS=$(cat $BROADCAST_DIR/DeployMockVerifier.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum) +declare -p ESPRESSO_TEE_VERIFIER_ADDRESS # Echo for debug echo "Deploying and initializing Espresso SequencerInbox" # ** Essential migration step ** Forge script to deploy the new SequencerInbox. We do this to later point the rollups challenge manager to the espresso integrated OSP. -forge script --chain $PARENT_CHAIN_CHAIN_ID contracts/parent-chain/espresso-migration/DeployAndInitEspressoSequencerInbox.s.sol:DeployAndInitEspressoSequencerInbox --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv +run $forge script --chain $PARENT_CHAIN_CHAIN_ID ../espresso-tests/$TEST_SCRIPT_DIR/DeployAndInitEspressoSequencerInboxForTest.s.sol:DeployAndInitEspressoSequencerInbox --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv --skip-simulation --private-key $PRIVATE_KEY # Extract new_osp_entry address from run-latest.json # * Essential migration sub step * These addresses are likely known addresses to operators in the event of a real migration after they have deployed the new OSP contracts, however, if operators create a script for the migration, this command is useful. -NEW_SEQUENCER_INBOX_IMPL_ADDRESS=$(cat broadcast/DeployAndInitEspressoSequencerInbox.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress'| cast to-checksum) +NEW_SEQUENCER_INBOX_IMPL_ADDRESS=$(cat $BROADCAST_DIR/DeployAndInitEspressoSequencerInboxForTest.s.sol/1337/run-latest.json | jq -r '.receipts[0].contractAddress'| cast to-checksum) +declare -p NEW_SEQUENCER_INBOX_IMPL_ADDRESS + # Echo for debugging. echo "Deployed new SequencerInbox at $NEW_SEQUENCER_INBOX_IMPL_ADDRESS" @@ -75,11 +144,12 @@ echo "Deployed new SequencerInbox at $NEW_SEQUENCER_INBOX_IMPL_ADDRESS" echo "Deploying Espresso SequencerInbox migration action" # ** Essential migration step ** Forge script to deploy Espresso OSP migration action -forge script --chain $PARENT_CHAIN_CHAIN_ID contracts/parent-chain/espresso-migration/DeployEspressoSequencerMigrationAction.s.sol:DeployEspressoSequencerMigrationAction --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv +run $forge script --chain $PARENT_CHAIN_CHAIN_ID $ORBIT_MIGRATION_ACTION_DIR/DeployEspressoSequencerMigrationAction.s.sol:DeployEspressoSequencerMigrationAction --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv # Capture new OSP address # * Essential migration sub step ** Essential migration sub step * operators will be able to manually determine this address while running the upgrade, but this can be useful if they wish to make a script. -SEQUENCER_MIGRATION_ACTION=$(cat broadcast/DeployEspressoSequencerMigrationAction.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum) +SEQUENCER_MIGRATION_ACTION=$(cat $BROADCAST_DIR/DeployEspressoSequencerMigrationAction.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum) +declare -p SEQUENCER_MIGRATION_ACTION echo "Deployed new EspressoSequencerMigrationAction at $SEQUENCER_MIGRATION_ACTION" @@ -106,8 +176,27 @@ cast send $PARENT_CHAIN_UPGRADE_EXECUTOR "execute(address, bytes)" $SEQUENCER_MI echo "Executed SequencerMigrationAction via UpgradeExecutor" +function get_latest_confirmed_v2() { + result=$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(uint256)') + echo $result +} + +function get_latest_confirmed_v3() { + result=$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(bytes32)') + echo $result +} + +function get_latest_confirmed() { + if $v3; then + get_latest_confirmed_v3 + else + get_latest_confirmed_v2 + fi +} + # Get the number of confirmed nodes before the upgrade to ensure the staker is still working. -NUM_CONFIRMED_NODES_BEFORE_UPGRADE=$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(uint256)') +NUM_CONFIRMED_NODES_BEFORE_UPGRADE=$(get_latest_confirmed) +info "Before upgrade: $NUM_CONFIRMED_NODES_BEFORE_UPGRADE" # Wait for CHILD_CHAIN_RPC_URL to be available @@ -172,10 +261,14 @@ fi # Echo successful balance update echo "Balance of $RECIPIENT_ADDRESS changed from $BALANCE_ORIG to $BALANCE_NEW" -# Check that the staker is making progress after the upgrade -while [ "$NUM_CONFIRMED_NODES_BEFORE_UPGRADE" == "$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(uint256)')" ]; do - echo "Waiting for confirmed nodes ..." +info Check that the staker is making progress after the upgrade +echo + +START=$SECONDS +echo "Waiting for confirmed nodes." +while [ "$NUM_CONFIRMED_NODES_BEFORE_UPGRADE" == "$(get_latest_confirmed)" ]; do sleep 5 + echo "Waited $(( SECONDS - START )) seconds for confirmed nodes. $NUM_CONFIRMED_NODES_BEFORE_UPGRADE" done # Echo to confirm that stakers are behaving normally. echo "Confirmed nodes have progressed" diff --git a/espresso-tests/upgrade-test/custom b/espresso-tests/upgrade-test/custom new file mode 100644 index 00000000..7cbfce29 --- /dev/null +++ b/espresso-tests/upgrade-test/custom @@ -0,0 +1,60 @@ +import { parseEther } from 'ethers/lib/utils' +import { Config } from '../../boldUpgradeCommon' +import { hoursToBlocks } from './utils' + +export const custom: Config = { + contracts: { + bridge: '0x5eCF728ffC5C5E802091875f96281B5aeECf6C49', + inbox: '0x9f8c1c641336A371031499e3c362e40d58d0f254', + outbox: '0x50143333b44Ea46255BEb67255C9Afd35551072F', + rollup: '0x4d16C7c301d4233414Efa3fc822F329B53F52b68', + sequencerInbox: '0x18d19C5d3E685f5be5b9C86E097f0E439285D216', + excessStakeReceiver: '0x0000000000000000000000000000000000000001', // receives losers' stake + rollupEventInbox: '0x0e73Faf857E1ca53E700856fCf19F31F920a1e3c', + upgradeExecutor: '0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7', + }, + proxyAdmins: { + outbox: '0x2a1f38c9097e7883570e0b02bfbe6869cc25d8a3', // e.g. the address of the proxy admin for the outbox + inbox: '0x2a1f38c9097e7883570e0b02bfbe6869cc25d8a3', + bridge: '0x2a1f38c9097e7883570e0b02bfbe6869cc25d8a3', + rei: '0x2a1f38c9097e7883570e0b02bfbe6869cc25d8a3', + seqInbox: '0x2a1f38c9097e7883570e0b02bfbe6869cc25d8a3', + }, + settings: { + challengeGracePeriodBlocks: hoursToBlocks(48), // 2 days for the chain owner to intervene in case of challenge + confirmPeriodBlocks: 1, + challengePeriodBlocks: 50400, // same as confirm period + stakeToken: '0x165155D6aBB370Cb10ad1bF835e723F662d51C86', // rollup stake token + stakeAmt: parseEther('1'), // assertion stake amount + miniStakeAmounts: [parseEther('0'), parseEther('1'), parseEther('1')], // subchallenge stake amounts (0 first level recommended) + chainId: 412346, // child chain id + minimumAssertionPeriod: 75, // minimum number of blocks between assertions + validatorAfkBlocks: 201600, // number of blocks before validator whitelist is dropped due to inactivity + disableValidatorWhitelist: false, // keep or disable validator whitelist + blockLeafSize: 2 ** 26, // do not change unless you know what you're doing + bigStepLeafSize: 2 ** 19, // do not change unless you know what you're doing + smallStepLeafSize: 2 ** 23, // do not change unless you know what you're doing + numBigStepLevel: 1, // do not change unless you know what you're doing + maxDataSize: 117964, // 117964 if you're an L3, this should be set to 104857 + isDelayBufferable: false, // keep this as false for Espresso Integration + bufferConfig: { + max: 2 ** 32 - 1, // maximum buffer size, set artificially high to disable + threshold: 2 ** 32 - 1, // keep above typical posting frequency. set artificially high to disable + replenishRateInBasis: 500, // 5% replenishment rate + }, + espressoTEEVerifier: '0xb562622f2D76F355D673560CB88c1dF6088702f1', // address of the TEE verifier + }, + // validators to be whitelisted on the new rollup + validators: [ + '0x8a5F1455b5E51a59deb763342ECf0D82eAaD4d01', + '0xBA97D2B0d0a8E0894eAa9f2c4A879fD68C0DeE4F', + '0x139A0b6B1Dd1e7F912361B32A09cAD89e82F29db', + '0x02FB3B113D8e8504CD54E3ABd6D74348DE5ac0C9', + '0x6B62905346bd8ae209d580edEB6473B8C423d06b', + '0xD0555Ec8817D718f22b114D9dbe7803995c24341', + '0x47d2c39425AbCD48168C8BcDf261e6CC7B25d028', + '0xDC3B771CF8dC2b1278eE0b593DB2E1237B687060', + '0x6C76a18821C41E725E83F890eCBa8e5B11787998', + '0x96ECE75e306D3717f9F03765A6fA702Da581864c' + ], +} diff --git a/espresso-tests/upgrade-test/templatesV3.1 b/espresso-tests/upgrade-test/templatesV3.1 new file mode 100644 index 00000000..6425536d --- /dev/null +++ b/espresso-tests/upgrade-test/templatesV3.1 @@ -0,0 +1,77 @@ +import { JsonRpcProvider } from '@ethersproject/providers' + +export type CreatorTemplates = { + eth: { + bridge: string + sequencerInbox: string + delayBufferableSequencerInbox: string + inbox: string + rollupEventInbox: string + outbox: string + } + erc20: { + bridge: string + sequencerInbox: string + delayBufferableSequencerInbox: string + inbox: string + rollupEventInbox: string + outbox: string + } + rollupUserLogic: string + rollupAdminLogic: string + challengeManagerTemplate: string + osp: string + rollupCreator: string +} + +export const templates: { + [key: number]: CreatorTemplates +} = { + // Nitro testnode + 1337: { + eth: { + bridge: '0x43202b1afae6c0c2B2a04dA9030434D73579A0FF', + sequencerInbox: '0x773D62Ce1794b11788907b32F793e647A4f9A1F7', + delayBufferableSequencerInbox: + '0x571fa696c2c66A85D0801C703E0ce4666B66D2af', + inbox: '0x47821a1c6eF6f804753109057b89ea4Ec5d60516', + rollupEventInbox: '0x7e8C3fcDF91a606b71b77c6502b8E4A58559D20A', + outbox: '0x56001107D68A55A0Dc321821b22C8d657A6ce74b', + }, + erc20: { + bridge: '0xb3Dc60073a2A4F74b1575CeCa6c368D1d906c43E', + sequencerInbox: '0x14820E92B664D771bc6c787481B7c088a9dA7760', + delayBufferableSequencerInbox: + '0x2766e96f90f9F027835E0c00c04C8119c635Ce02', + inbox: '0x037B11bB930dBB7c875ce459eeFf69FC2E9FD40d', + rollupEventInbox: '0xb034A5B82f12023017285b36a3d831698caA064f', + outbox: '0x56001107D68A55A0Dc321821b22C8d657A6ce74b', + }, + rollupUserLogic: '0xE927E260Eb017552b047786837bab40ff515FfD8', + rollupAdminLogic: '0x479d0Fb5fd2902d6f9a4Fba18311854f8c4C6044', + challengeManagerTemplate: '0xFf579b0AF1B69382BbF3e656bAE39A067AeE700a', + osp: '0x07F072b449A68C3eeb9274880594Ee6ae8D58db4', + rollupCreator: '0xfB83e25003b4193060bA988bA0277122B6D8337C', + }, +} + +export async function verifyCreatorTemplates( + l1Rpc: JsonRpcProvider, + templates: CreatorTemplates +) { + const checkAddress = async (name: string, address: string) => { + if ((await l1Rpc.getCode(address)).length <= 2) { + throw new Error(`No code found for template ${name} at ${address}`) + } + } + + for (const [key, value] of Object.entries(templates)) { + if (typeof value === 'string') { + await checkAddress(key, value) + } else { + for (const [subkey, subvalue] of Object.entries(value)) { + await checkAddress(`${key}.${subkey}`, subvalue) + } + } + } +} diff --git a/espresso-tests/upgrade-test/upgrade-test.bash b/espresso-tests/upgrade-test/upgrade-test.bash new file mode 100755 index 00000000..ffe18c86 --- /dev/null +++ b/espresso-tests/upgrade-test/upgrade-test.bash @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +set -euo pipefail + +cd "$(dirname "$0")" + +export COMPOSE_IGNORE_ORPHANS=1 + +export ESPRESSO_NITRO_CONTRACTS_BRANCH=v2.1.3-8e58a9a + +# Load env vars for rollupcreator runs without editing docker-compose.yaml +# set -a exports all variables defined in the env file into the shell +set -a +. ./upgrade-test.env +set +a + +echo "running an espresso node with branch $ESPRESSO_NITRO_CONTRACTS_BRANCH" +../../test-node.bash --init-force --latest-espresso-image --no-simple --detach --espresso + +L1_PRIV_KEY=`docker compose run scripts print-private-key --account l2owner | tail -n 1 | tr -d '\r\n'` +echo "L1_PRIV_KEY: $L1_PRIV_KEY" + +sleep 100 + +echo "starting tx spammer" +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 2000 --delay 200 --wait + +ROLLUP_ADDRESS=0x4d16C7c301d4233414Efa3fc822F329B53F52b68 +EXCUTOR=0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7 +echo "ROLLUP_ADDRESS: $ROLLUP_ADDRESS" +echo "EXCUTOR: $EXCUTOR" + +echo "Disabling validator whitelist to make sure the staker is making nodes" +cast send $EXCUTOR "executeCall(address,bytes)" $ROLLUP_ADDRESS "$(cast calldata 'setValidatorWhitelistDisabled(bool)' true)" --rpc-url http://localhost:8545 --private-key $L1_PRIV_KEY + +sleep 120 + +export NITRO_CONTRACTS_BRANCH=develop +export NITRO_CONTRACTS_REPO=https://github.com/EspressoSystems/nitro-contracts.git + +echo "deploying v3.1.0 contracts" +# Values should be put to the `templatesV3.1.ts` +docker compose run --build \ + -e PARENT_CHAIN_RPC="http://geth:8545" \ + -e DEPLOYER_PRIVKEY=$L1_PRIV_KEY \ + -e PARENT_CHAIN_ID=$L1CHAINID \ + -e CHILD_CHAIN_NAME="arb-dev-test" \ + -e MAX_DATA_SIZE=117964 \ + -e OWNER_ADDRESS=$L2OWNER_ADDRESS \ + -e WASM_MODULE_ROOT=$WASMROOT \ + -e SEQUENCER_ADDRESS=$SEQUENCER_ADDRESS \ + -e AUTHORIZE_VALIDATORS=10 \ + -e CHILD_CHAIN_CONFIG_PATH="/config/l2_chain_config.json" \ + -e CHAIN_DEPLOYMENT_INFO="/config/deployment.json" \ + -e CHILD_CHAIN_INFO="/config/deployed_chain_info.json" \ + -e LIGHT_CLIENT_ADDR=$LIGHT_CLIENT_ADDR \ + -e STAKE_TOKEN_ADDRESS="" \ + rollupcreator create-rollup-testnode + +echo "running upgrade" +# Use -v to override the settings +docker compose run --build \ + -v "$(pwd)/custom:/workspace/scripts/files/configs/custom.ts:ro" \ + -v "$(pwd)/templatesV3.1:/workspace/scripts/files/templatesV3.1.ts:ro" \ + -e CONFIG_NETWORK_NAME=$CONFIG_NETWORK_NAME \ + -e DEPLOYED_CONTRACTS_DIR=$DEPLOYED_CONTRACTS_DIR \ + -e DISABLE_VERIFICATION=$DISABLE_VERIFICATION \ + -e CUSTOM_RPC_URL=$CUSTOM_RPC_URL \ + -e CUSTOM_CHAINID=$CUSTOM_CHAINID \ + -e L1_PRIV_KEY=$L1_PRIV_KEY \ + --entrypoint sh \ + rollupcreator -lc ' + export PATH="/root/.foundry/bin:$PATH"; + forge --version && + yarn script:bold-prepare --network custom && + yarn script:bold-populate-lookup --network custom && + yarn script:bold-local-execute --network custom' + +sequencer_inbox=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"sequencer-inbox\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "sequencer_inbox: $sequencer_inbox" + +parent_chain_upgrade_executor=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"upgrade-executor\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "parent_chain_upgrade_executor: $parent_chain_upgrade_executor" + +tee_verifier=$(cast call $sequencer_inbox "espressoTEEVerifier()(address)" --rpc-url http://localhost:8545) +echo "tee_verifier: $tee_verifier" + +cast send $parent_chain_upgrade_executor $(cast calldata "executeCall(address, bytes)" $sequencer_inbox $(cast calldata "setEspressoTEEVerifier(address)" $tee_verifier)) --rpc-url http://localhost:8545 --private-key $L1_PRIV_KEY + +docker compose down --remove-orphans diff --git a/espresso-tests/upgrade-test/upgrade-test.env b/espresso-tests/upgrade-test/upgrade-test.env new file mode 100644 index 00000000..d12ef6b0 --- /dev/null +++ b/espresso-tests/upgrade-test/upgrade-test.env @@ -0,0 +1,13 @@ +CONFIG_NETWORK_NAME="custom" +DEPLOYED_CONTRACTS_DIR="./scripts/files/" +DISABLE_VERIFICATION=true + +CUSTOM_RPC_URL="http://geth:8545" +CUSTOM_CHAINID=412346 +L1_PRIV_KEY=dc04c5399f82306ec4b4d654a342f40e2e0620fe39950d967e1e574b32d4dd36 + +L2OWNER_ADDRESS=0x5E1497dD1f08C87b2d8FE23e9AAB6c1De833D927 +WASMROOT=0xdb698a2576298f25448bc092e52cf13b1e24141c997135d70f217d674bbeb69a +SEQUENCER_ADDRESS=0xe2148eE53c0755215Df69b2616E552154EdC584f +L1CHAINID=1337 +LIGHT_CLIENT_ADDR=0xb7fc0e52ec06f125f3afeba199248c79f71c2e3a \ No newline at end of file diff --git a/espresso-tests/v2.1.3-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol b/espresso-tests/v2.1.3-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol new file mode 100644 index 00000000..f52c050d --- /dev/null +++ b/espresso-tests/v2.1.3-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "forge-std/Script.sol"; +import "nitro-contracts/bridge/SequencerInbox.sol"; +import "nitro-contracts/bridge/ISequencerInbox.sol"; + +/// @notice This contract deploys and initializes a sequencerInbox contract that orbit chains can migrate to that enables compatibility +/// with the espresso confirmation layer +/// @dev BATCH_POSTER_ADDRS should be a comma delimited list that includes addresses. This list will give batch posting affordances to those addresses +/// For chains using the Espresso TEE integration, this will be the address of your new batch poster, if you decide to change it. +contract DeployAndInitEspressoSequencerInbox is Script { + function run() external { + bool isMigrationTest = vm.envBool("IS_MIGRATION_TEST"); + // Grab addresses from env + address reader4844Addr = vm.envAddress("READER_ADDRESS"); + + // Grab any uints we need to initialize the contract from envAddress + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + uint256 maxDataSize = vm.envUint("MAX_DATA_SIZE"); + // Grab booleans we need from env + bool isUsingFeeToken = vm.envBool("IS_USING_FEE_TOKEN"); + // Trick the Vm into seeing that this opcode exsists if this isn't the migration test + if (!isMigrationTest){ + bytes memory code = vm.getDeployedCode("ArbSysMock.sol:ArbSysMock"); + vm.etch(0x0000000000000000000000000000000000000064, code); + } + // initialize interfaces needed + IReader4844 reader = IReader4844(reader4844Addr); + // Start broadcast to deploy the SequencerInbox + vm.startBroadcast(deployerPrivateKey); + SequencerInbox sequencerInbox = new SequencerInbox(maxDataSize, reader, isUsingFeeToken); + + // Setting batch posters and batch poster manager + vm.stopBroadcast(); + } +} diff --git a/espresso-tests/DeployMockVerifier.s.sol b/espresso-tests/v2.1.3-migration/DeployMockVerifier.s.sol similarity index 100% rename from espresso-tests/DeployMockVerifier.s.sol rename to espresso-tests/v2.1.3-migration/DeployMockVerifier.s.sol diff --git a/espresso-tests/v3.1.0-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol b/espresso-tests/v3.1.0-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol new file mode 100644 index 00000000..df74cc43 --- /dev/null +++ b/espresso-tests/v3.1.0-migration/DeployAndInitEspressoSequencerInboxForTest.s.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "forge-std/Script.sol"; +import "nitro-contracts-v3/bridge/SequencerInbox.sol"; +import "nitro-contracts-v3/bridge/ISequencerInbox.sol"; + +/// @notice This contract deploys and initializes a sequencerInbox contract that orbit chains can migrate to that enables compatibility +/// with the espresso confirmation layer +/// @dev BATCH_POSTER_ADDRS should be a comma delimited list that includes addresses. This list will give batch posting affordances to those addresses +/// For chains using the Espresso TEE integration, this will be the address of your new batch poster, if you decide to change it. +contract DeployAndInitEspressoSequencerInbox is Script { + function run() external { + bool isMigrationTest = vm.envBool("IS_MIGRATION_TEST"); + // Grab addresses from env + address reader4844Addr = vm.envAddress("READER_ADDRESS"); + + // Grab any uints we need to initialize the contract from envAddress + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + uint256 maxDataSize = vm.envUint("MAX_DATA_SIZE"); + // Grab booleans we need from env + bool isUsingFeeToken = vm.envBool("IS_USING_FEE_TOKEN"); + // Trick the Vm into seeing that this opcode exsists if this isn't the migration test + if (!isMigrationTest){ + bytes memory code = vm.getDeployedCode("ArbSysMock.sol:ArbSysMock"); + vm.etch(0x0000000000000000000000000000000000000064, code); + } + bool delayBufferable = vm.envBool("DELAY_BUFFERABLE"); + // initialize interfaces needed + IReader4844 reader = IReader4844(reader4844Addr); + // Start broadcast to deploy the SequencerInbox + vm.startBroadcast(deployerPrivateKey); + SequencerInbox sequencerInbox = new SequencerInbox(maxDataSize, reader, isUsingFeeToken, delayBufferable); + + // Setting batch posters and batch poster manager + vm.stopBroadcast(); + } +} diff --git a/espresso-tests/v3.1.0-migration/DeployMockVerifier.s.sol b/espresso-tests/v3.1.0-migration/DeployMockVerifier.s.sol new file mode 100644 index 00000000..3b2ae39c --- /dev/null +++ b/espresso-tests/v3.1.0-migration/DeployMockVerifier.s.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.9; + +import "forge-std/Script.sol"; +import "nitro-contracts-v3/mocks/EspressoTEEVerifier.sol"; + +contract DeployMockVerifier is Script { + function run() external { + uint256 deployerPrivateKey = vm.envUint("PRIVATE_KEY"); + vm.startBroadcast(deployerPrivateKey); + EspressoTEEVerifierMock mockVerifier = new EspressoTEEVerifierMock(); + vm.stopBroadcast(); + } +} diff --git a/flake.lock b/flake.lock index b6c52425..e6144138 100644 --- a/flake.lock +++ b/flake.lock @@ -2,12 +2,12 @@ "nodes": { "flake-compat": { "locked": { - "lastModified": 1696426674, - "narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=", - "rev": "0f9255e01c2351cc7d116c072cb317785dd33b33", - "revCount": 57, + "lastModified": 1733328505, + "narHash": "sha256-NeCCThCEP3eCl2l/+27kNNK7QrwZB1IJCrXfrbv5oqU=", + "rev": "ff81ac966bb2cae68946d5ed5fc4994f96d0ffec", + "revCount": 69, "type": "tarball", - "url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.0.1/018afb31-abd1-7bff-a5e4-cff7e18efb7a/source.tar.gz" + "url": "https://api.flakehub.com/f/pinned/edolstra/flake-compat/1.1.0/01948eb7-9cba-704f-bbf3-3fa956735b52/source.tar.gz" }, "original": { "type": "tarball", @@ -35,16 +35,16 @@ "nixpkgs": "nixpkgs" }, "locked": { - "lastModified": 1722676286, - "narHash": "sha256-wEDJdvwRZF2ErQ33nQ0Lqn/48XrPbaadv56/bM2MSZU=", + "lastModified": 1758100230, + "narHash": "sha256-sARl8NpG4ifzhd7j5D04A5keJIf0zkP1XYIuDEkzXb4=", "owner": "shazow", "repo": "foundry.nix", - "rev": "d84c83b1c1722c8742b3d2d84c9386814d75384e", + "rev": "e632b06dc759e381ef04f15ff9541f889eda6013", "type": "github" }, "original": { "owner": "shazow", - "ref": "monthly", + "ref": "stable", "repo": "foundry.nix", "type": "github" } @@ -65,12 +65,12 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1721379653, - "narHash": "sha256-8MUgifkJ7lkZs3u99UDZMB4kbOxvMEXQZ31FO3SopZ0=", - "rev": "1d9c2c9b3e71b9ee663d11c5d298727dace8d374", - "revCount": 655136, + "lastModified": 1757745802, + "narHash": "sha256-hLEO2TPj55KcUFUU1vgtHE9UEIOjRcH/4QbmfHNF820=", + "rev": "c23193b943c6c689d70ee98ce3128239ed9e32d1", + "revCount": 861038, "type": "tarball", - "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.655136%2Brev-1d9c2c9b3e71b9ee663d11c5d298727dace8d374/0190cd4f-c0eb-72cb-834b-ac854aa282dc/source.tar.gz" + "url": "https://api.flakehub.com/f/pinned/NixOS/nixpkgs/0.1.861038%2Brev-c23193b943c6c689d70ee98ce3128239ed9e32d1/01994596-722e-716c-b0eb-e6b07d4de75b/source.tar.gz" }, "original": { "type": "tarball", diff --git a/flake.nix b/flake.nix index 1a367bb5..26ea67c6 100644 --- a/flake.nix +++ b/flake.nix @@ -2,7 +2,7 @@ description = "A Nix-flake-based Node.js development environment"; inputs.nixpkgs.url = "https://flakehub.com/f/NixOS/nixpkgs/0.1.*.tar.gz"; - inputs.foundry.url = "github:shazow/foundry.nix/monthly"; # Use monthly branch for permanent releases + inputs.foundry.url = "github:shazow/foundry.nix/stable"; inputs.flake-compat.url = "https://flakehub.com/f/edolstra/flake-compat/1.tar.gz"; # for shell.nix compatibility diff --git a/mock-sequencer/.gitignore b/mock-sequencer/.gitignore new file mode 100644 index 00000000..9c97bbd4 --- /dev/null +++ b/mock-sequencer/.gitignore @@ -0,0 +1,3 @@ +node_modules +dist +.env diff --git a/mock-sequencer/Dockerfile b/mock-sequencer/Dockerfile new file mode 100644 index 00000000..ad1d0f8a --- /dev/null +++ b/mock-sequencer/Dockerfile @@ -0,0 +1,9 @@ +FROM node:18-bullseye-slim +WORKDIR /workspace + +COPY ./package.json ./ +RUN yarn +COPY ./*.ts ./tsconfig.json ./ +RUN yarn build + +CMD ["node", "dist/index.js"] diff --git a/mock-sequencer/README.md b/mock-sequencer/README.md new file mode 100644 index 00000000..fdc53768 --- /dev/null +++ b/mock-sequencer/README.md @@ -0,0 +1,13 @@ +# mock-sequencer + +A standalone mock sequencer WebSocket server for local development and testing. + +## Features + +- **WebSocket Proxy:** Forwards messages from a remote sequencer or test node to local clients. +- **Message Manipulation:** Supports skipping, reordering, and oversizing messages to simulate malicious or faulty sequencer behavior. +- **HTTP Control API:** Dynamically control sequencer behavior via simple REST endpoints. + +## Usage + +Check the `regression/test_with_malicious_sequencer.bash` for usage. diff --git a/mock-sequencer/consts.ts b/mock-sequencer/consts.ts new file mode 100644 index 00000000..5200bc7c --- /dev/null +++ b/mock-sequencer/consts.ts @@ -0,0 +1,3 @@ +export const REMOTE_WS_URL = process.env.REMOTE_WS_URL || 'ws://localhost:8081'; +export const LOCAL_WS_PORT = parseInt(process.env.LOCAL_WS_PORT || '9642', 10); +export const HTTP_PORT = parseInt(process.env.HTTP_PORT || '10000', 10); diff --git a/mock-sequencer/index.ts b/mock-sequencer/index.ts new file mode 100644 index 00000000..96fccfd3 --- /dev/null +++ b/mock-sequencer/index.ts @@ -0,0 +1,52 @@ +import { MockSequencer } from './wss' +import express from 'express' +import { HTTP_PORT } from './consts' + +const sequencer = new MockSequencer() + +const app = express() +app.use(express.json()) + +app.post('/skip-next', (_req, res) => { + const count = sequencer.getCurrentCount() + sequencer.setSkipNext() + res.json(count) +}) + +app.post('/send-in-random', (_req, res) => { + const count = sequencer.getCurrentCount() + sequencer.setSendInRandom() + res.json(count) +}) + +app.post('/send-oversized', (_req, res) => { + const count = sequencer.getCurrentCount() + sequencer.setSendOversized() + res.json(count) +}) + +app.get('/block-number', (_req, res) => { + const count = sequencer.getCurrentCount() + res.json(count) +}) + +app.post('/reset', (_req, res) => { + sequencer.reset() + res.json(sequencer.getCurrentCount()) +}) + +app.post('/send-invalid-delayed-messages', (_req, res) => { + const count = sequencer.getCurrentCount() + sequencer.setSendInvalidDelayedMessages() + res.json(count) +}) + +app.post('/send-messages-at-same-block', (_req, res) => { + const count = sequencer.getCurrentCount() + sequencer.setSendMessageAtSameBlock() + res.json(count) +}) + +app.listen(HTTP_PORT, () => { + console.log(`MockSequencer HTTP server listening on http://localhost:${HTTP_PORT}`) +}) diff --git a/mock-sequencer/package.json b/mock-sequencer/package.json new file mode 100644 index 00000000..afb75f21 --- /dev/null +++ b/mock-sequencer/package.json @@ -0,0 +1,24 @@ +{ + "name": "mock-sequencer", + "version": "1.0.0", + "description": "A mock sequencer WebSocket server for local development and testing.", + "main": "dist/index.js", + "types": "dist/index.d.ts", + "scripts": { + "build": "tsc", + "start": "node dist/index.js", + "dev": "ts-node src/index.ts" + }, + "author": "", + "license": "MIT", + "dependencies": { + "express": "^4.18.2", + "ws": "^8.13.0" + }, + "devDependencies": { + "typescript": "^5.4.0", + "ts-node": "^10.9.2", + "@types/express": "^4.17.21", + "@types/ws": "^8.5.10" + } +} diff --git a/mock-sequencer/tsconfig.json b/mock-sequencer/tsconfig.json new file mode 100644 index 00000000..8b857a7c --- /dev/null +++ b/mock-sequencer/tsconfig.json @@ -0,0 +1,12 @@ +{ + "compilerOptions": { + "target": "ES2015", + "module": "CommonJS", + "declaration": true, + "outDir": "dist", + "strict": true, + "esModuleInterop": true, + }, + "include": ["*.ts"], + "exclude": ["node_modules", "dist"] +} diff --git a/mock-sequencer/types.ts b/mock-sequencer/types.ts new file mode 100644 index 00000000..451de10e --- /dev/null +++ b/mock-sequencer/types.ts @@ -0,0 +1,30 @@ +export interface BroadcastMessage { + version: number + messages?: SequencerMessage[] +} + +export interface SequencerMessage { + sequenceNumber: number + message: SequencerMessageDetail + blockHash: string + signature: string | null +} + +export interface SequencerMessageDetail { + message: L2Message + delayedMessagesRead: number +} + +export interface L2Message { + header: L1IncomingMessageHeader + l2Msg: string +} + +export interface L1IncomingMessageHeader { + kind: number + sender: string + blockNumber: number + timestamp: number + requestId: string | null + baseFeeL1: string | null +} diff --git a/mock-sequencer/wss.ts b/mock-sequencer/wss.ts new file mode 100644 index 00000000..1c9d07bc --- /dev/null +++ b/mock-sequencer/wss.ts @@ -0,0 +1,184 @@ +import { LOCAL_WS_PORT, REMOTE_WS_URL } from './consts' +import { BroadcastMessage, SequencerMessage } from './types' +import WebSocket, { Server as WebSocketServer } from 'ws' + +export class MockSequencer { + constructor() { + this.wss = new WebSocketServer({ port: LOCAL_WS_PORT }); + this.wss.on('connection', (ws) => { + this.clients.add(ws) + this.connectRemoteWs() + console.log('Client connected', this.clients.size) + ws.on('close', () => { + this.clients.delete(ws) + }) + }) + this.wss.on('message', (data) => { + console.log('receive message:', data) + if (this.remoteWs) { + this.remoteWs.send(data) + } + }) + + } + + public getCurrentCount() { + return this.blockNumber + } + + public setSkipNext() { + console.log('setting next block to be skipped', this.getCurrentCount() + 1) + this.skipNext = this.getCurrentCount() + 1 + } + + public setSendInRandom() { + this.sendInRandom = true + } + + public setSendOversized() { + console.log('setting next block to be oversized', this.getCurrentCount() + 1) + this.sendOversized = this.getCurrentCount() + 1 + } + + public setSendInvalidDelayedMessages() { + console.log('setting next block to be invalid delayed messages', this.getCurrentCount() + 1) + this.sendInvalidDelayedMessages = true + } + + public setSendMessageAtSameBlock() { + console.log('setting sequencer to tamper with message positions', this.getCurrentCount() + 1) + this.sendMessageAtSameBlock = this.getCurrentCount() + 1 + } + + public reset() { + this.skipNext = 0 + this.sendInRandom = false + this.sendOversized = 0 + this.sendInvalidDelayedMessages = false + } + + private processAndBroadcast(data: WebSocket.Data) { + const str = typeof data === 'string' ? data : data.toString(); + const messages = JSON.parse(str) as BroadcastMessage; + if (!messages.messages) { + return this.broadcastToClients(data) + } + const newMessages: SequencerMessage[] = [] + let intercept = false + messages.messages.forEach((message) => { + const blockNumber = message.sequenceNumber + const delayedCount = message.message.delayedMessagesRead + this.blockNumberToDelayedCount.set(blockNumber, delayedCount) + + if (blockNumber == this.skipNext) { + console.log('Skipping block', blockNumber) + intercept = true + return + } + if (blockNumber == this.sendOversized) { + intercept = true + const overSized = '0'.repeat(10000000) + const l2MsgBytes = new TextEncoder().encode(overSized) + console.log("oversized message length:", l2MsgBytes.length) + message.message.message.l2Msg = overSized + return + } + + if (this.sendInvalidDelayedMessages) { + const previousDelayedCount = this.blockNumberToDelayedCount.get(blockNumber - 1) + if (previousDelayedCount === undefined) { + // we don't know if this is a delayed message, will skip it first + return + } + if (previousDelayedCount + 1 === delayedCount) { + console.log('tampering delayed messages', delayedCount) + message.message.message.l2Msg = '' + intercept = true + } + return + } + + if (this.sendMessageAtSameBlock && blockNumber > this.sendMessageAtSameBlock) { + intercept = true + message.sequenceNumber = this.sendMessageAtSameBlock + return + } + + if (blockNumber > this.blockNumber) { + this.blockNumber = blockNumber + } + newMessages.push(message) + }) + + if (!this.skipNext && + !this.sendOversized && + !this.sendInvalidDelayedMessages) { + return this.broadcastToClients(data) + } + if (intercept) { + console.log('Intercepting block', newMessages, this.skipNext) + data = JSON.stringify({ version: 1, messages: newMessages }) + } else if (this.sendInRandom) { + if (this.buffer.length < this.bufferSize) { + this.buffer.push(data) + } else { + this.buffer.sort((_a, _b) => { + return Math.random() - 0.5 + }) + this.buffer.forEach((d) => { + this.broadcastToClients(d) + }) + this.buffer = [] + } + this.sendInRandom = false + return + } + this.broadcastToClients(data) + } + + private connectRemoteWs() { + this.remoteWs = new WebSocket(REMOTE_WS_URL) + + this.remoteWs.on('open', () => { + console.log('Connected to remote WebSocket server:', REMOTE_WS_URL) + }) + + this.remoteWs.on('message', (data) => { + this.processAndBroadcast(data) + }) + + this.remoteWs.on('close', () => { + console.log('Remote WebSocket closed, reconnecting in 1s...') + setTimeout(this.connectRemoteWs, 1000) + }) + + this.remoteWs.on('error', (err) => { + console.error('Remote WebSocket error:', err) + }) + + } + + private broadcastToClients(data: WebSocket.Data) { + for (const client of this.clients) { + if (client.readyState === WebSocket.OPEN) { + client.send(data) + } + } + } + + // Only after bufferSize messages are received, will the messages be broadcasted + private buffer: WebSocket.Data[] = [] + private bufferSize = 20 + + private clients: Set = new Set() + private wss: WebSocketServer + private remoteWs!: WebSocket + private blockNumber = 0 + private blockNumberToDelayedCount = new Map([[0, 1]]) + + private skipNext: number | null = null + private sendInRandom = false + private sendOversized: number | null = null + private sendInvalidDelayedMessages = false + private sendMessageAtSameBlock: number | null = null +} diff --git a/orbit-actions b/orbit-actions index e7535cde..3ca7fde4 160000 --- a/orbit-actions +++ b/orbit-actions @@ -1 +1 @@ -Subproject commit e7535cde6891a911dd28adcbb16e4f4bd3a57927 +Subproject commit 3ca7fde417126a1a962bd04206915efdf2449402 diff --git a/regression-test.bash b/regression-test.bash new file mode 100755 index 00000000..1418fc54 --- /dev/null +++ b/regression-test.bash @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +set -euo pipefail + +echo "Running regression tests..." +cd "$(dirname "$0")" + +cd regression-tests +scripts=$(find . -maxdepth 1 -name '*.bash' ! -name 'common.bash' | sort) + +for script in $(find . -maxdepth 1 -name '*.bash' ! -name 'common.bash' | sort); do + echo "Running $(basename "$script")" + attempt=1 + max_attempts=3 + while ! ./$script; do + if (( attempt >= max_attempts )); then + echo "Failed $(basename "$script") after $attempt attempts, aborting." + exit 1 + fi + attempt=$((attempt+1)) + echo "Retrying $(basename "$script") (attempt $attempt/$max_attempts)..." + done + echo "Completed $(basename "$script")" + +done + +cd .. +echo "All regression tests completed successfully!" +docker compose down --remove-orphans diff --git a/regression-tests/.env b/regression-tests/.env new file mode 100644 index 00000000..713de903 --- /dev/null +++ b/regression-tests/.env @@ -0,0 +1,30 @@ +# Environment variables for chain name and rpc_url +# These are essential for the upgrade +export PARENT_CHAIN_CHAIN_ID="1337" +export CHAIN_ID="1337" +export CHILD_CHAIN_CHAIN_NAME="412346" +export PARENT_CHAIN_RPC_URL="http://localhost:8545" +export CHILD_CHAIN_RPC_URL="http://localhost:8547" +# Environment variables for new OSP deployment +# These are essential for the upgrade +export PARENT_CHAIN_UPGRADE_EXECUTOR="0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7" +export PARENT_UPGRADE_EXECUTOR_ADDRESS="0x513D9F96d4D0563DEbae8a0DC307ea0E46b10ed7" +export CHILD_CHAIN_UPGRADE_EXUCTOR_ADDRESS="0xD59870177729b1Fa7CCdA1d2E245C57C6ad5F9F6" +# Environment variables for osp migration action contract +export ROLLUP_ADDRESS="0x1b836843Ef0B1731fea7C69d7d3847327DD137c2" +export PROXY_ADMIN_ADDRESS="0x2A1f38c9097e7883570e0b02BFBE6869Cc25d8a3" +# Environment variables for ArbOS upgrade action. +export UPGRADE_TIMESTAMP="1723664126" + +# The reader addr is only important if the parent chain is not an arbitrum chain, this is important for the batch poster. +export READER_ADDRESS="0x7DD3F2a3fAeF3B9F2364c335163244D3388Feb83" +export IS_USING_FEE_TOKEN="false" + +export MAX_DATA_SIZE="117964" +export OLD_BATCH_POSTER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" +export NEW_BATCH_POSTER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" +export BATCH_POSTER_MANAGER_ADDRESS="0xe2148eE53c0755215Df69b2616E552154EdC584f" +export PARENT_CHAIN_IS_ARBITRUM="false" +export ESPRESSO_TEE_VERIFIER_ADDRESS="0x165155D6aBB370Cb10ad1bF835e723F662d51C86" +export INBOX_ADDRESS="0x9f8c1c641336A371031499e3c362e40d58d0f254" +export TARGET_WASM_MODULE_ROOT="0xe81f986823a85105c5fd91bb53b4493d38c0c26652d23f76a7405ac889908287" diff --git a/regression-tests/batcher-e2e.bash b/regression-tests/batcher-e2e.bash new file mode 100755 index 00000000..0b7b0a52 --- /dev/null +++ b/regression-tests/batcher-e2e.bash @@ -0,0 +1,88 @@ +#!/usr/bin/env bash + +# This test we send a lot of transactions to the L2 and delayed messages to L1. +# We expect the validator will eventually catch up with the sequencer. +# This test will take around 20 minutes to finish. + +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +# Ignore orphaned container output +# Orphaned containers will be removed at the end of the test +export COMPOSE_IGNORE_ORPHANS=1 + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --detach + +echo "starting tx spammer" +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 2000 --delay 200 --wait + +echo "sending delayed tx" +docker compose run --detach scripts send-l2-delayed --ethamount 10 --to user_delayed_user --from espresso-sequencer --times 100 --delay 2000 --wait + +check_validator_root_matches_sequencer() { + local block=$1 + validator_root=$(cast block --rpc-url http://localhost:8247 --json $block | jq -r .stateRoot) + sequencer_root=$(cast block --rpc-url http://localhost:8547 --json $block | jq -r .stateRoot) + if [[ "$validator_root" != "$sequencer_root" ]]; then + echo "Error: validator root ($validator_root) does not match sequencer root ($sequencer_root)" + exit 1 + fi + echo "validator block number: $block, validator root: $validator_root, sequencer root: $sequencer_root" +} + +sleep 20 +validator_block=0 +i=0 +while [[ $i -lt 5 ]]; do + sleep 60 + new_validator_block=$(cast block-number --rpc-url http://localhost:8247) + echo "validator block number: $new_validator_block" + if [[ "$new_validator_block" -eq "$validator_block" ]]; then + # validator has caught up + i=$((i+1)) + continue + fi + + validator_block=$new_validator_block + check_validator_root_matches_sequencer $validator_block +done + +echo "validator stops creating blocks for the last 60 seconds. validator block number: $validator_block" +sequencer_block=$(cast block-number --rpc-url http://localhost:8547) +sequencer_inbox=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"sequencer-inbox\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "sequencer inbox: $sequencer_inbox" +batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +echo "batch count: $batch_count" + +if (( sequencer_block > validator_block + batch_count + 1 )); then + echo "Error: sequencer block number ($sequencer_block) is greater than validator block number ($validator_block) + batch count ($batch_count)" + exit 1 +fi +echo "validator has caught up. validator block number: $validator_block" + +validator_block=$(cast block-number --rpc-url http://localhost:8247) +check_validator_root_matches_sequencer $validator_block + +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.max-empty-batch-delay \ + --value "30s" \ + +docker compose restart poster + +# Batcher should have created 1 empty batch +sleep 40 + +validator_block2=$(cast block-number --rpc-url http://localhost:8247) +if (( validator_block2 == validator_block )); then + echo "Error: validator block number ($validator_block2) is not greater than validator block number ($validator_block)" + exit 1 +fi + +check_validator_root_matches_sequencer $validator_block2 + +docker compose down --remove-orphans diff --git a/regression-tests/batcher-resubmission.bash b/regression-tests/batcher-resubmission.bash new file mode 100755 index 00000000..82583549 --- /dev/null +++ b/regression-tests/batcher-resubmission.bash @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cd "$(dirname "$0")" + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --detach + +source ./common.bash +# Ignore orphaned container output +# Orphaned containers will be removed at the end of the test +export COMPOSE_IGNORE_ORPHANS=1 + +sequencer_inbox=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"sequencer-inbox\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "sequencer inbox: $sequencer_inbox" +batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +echo "batch count: $batch_count" + +docker compose run scripts send-l2 --ethamount 10 --to user_l2user --times 10 --delay 200 + +sleep 10 + +echo "pausing espresso-dev-node" +docker compose pause espresso-dev-node + +# Transactions are sent after the espresso-dev-node is down +docker compose run scripts send-l2 --ethamount 10 --to user_l2user --times 1500 --delay 200 + +# Stop the espresso dev node for 2 minutes +sleep 120 + +docker compose unpause espresso-dev-node + +block=0 +new_batches=0 +# Wait for the validator to catch up +while true; do + sleep 30 + sequencer_block=$(cast block-number --rpc-url http://localhost:8547) + validator_block=$(cast block-number --rpc-url http://localhost:8247) + new_batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) + new_batches=$(($new_batch_count - $batch_count)) + echo "new batches: $new_batches" + + if [[ "$validator_block" -gt "$sequencer_block" ]]; then + echo "Validator block number ($validator_block) is greater than sequencer block number ($sequencer_block)" + exit 1 + fi + echo "validator block number: $validator_block" + echo "sequencer block number: $sequencer_block" + # this condition is not really necessary but it prevents a case that + # sequencer doesn't create any blocks at all and it stopped at the genesis block + if [[ "$validator_block" -lt 100 ]]; then + continue + fi + + # The batch poster will post batches after the Hotshot comes into live. + # It is possible that all batch posting reports are in the pending state. + # In this case, the validator will not catch up with the sequencer. + if [[ $(($sequencer_block - $validator_block)) -le $new_batches ]]; then + echo "block number: $validator_block" + block=$validator_block + break + fi +done + +sequencer_root=$(cast block --rpc-url http://localhost:8547 --json $block | jq -r .stateRoot) +validator_root=$(cast block --rpc-url http://localhost:8247 --json $block | jq -r .stateRoot) + +echo "sequencer root: $sequencer_root" +echo "validator root: $validator_root" + +if [[ "$validator_root" != "$sequencer_root" ]]; then + echo "Error: validator root ($validator_root) does not match sequencer root ($sequencer_root)" + exit 1 +fi + +# Typically, 3 or 4 new batches are sufficient for this test. +# We set the threshold to 10 to avoid flakiness due to timing or network issues. +# If there are more than 10 new batches, it may indicate excessive or unexpected batch posting, which could be costly. +if [[ $new_batches -gt 10 ]]; then + echo "Error: too many new batches ($new_batches)" + exit 1 +fi + +docker compose down --remove-orphans diff --git a/regression-tests/batcher-shutdown.bash b/regression-tests/batcher-shutdown.bash new file mode 100755 index 00000000..aeda1936 --- /dev/null +++ b/regression-tests/batcher-shutdown.bash @@ -0,0 +1,107 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --detach + +sleep 30 + +# Ignore orphaned container output +# Orphaned containers will be removed at the end of the test +export COMPOSE_IGNORE_ORPHANS=1 +export http_proxy="" +export https_proxy="" +export all_proxy="" + + +sequencer_inbox=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"sequencer-inbox\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "sequencer inbox: $sequencer_inbox" +batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +echo "batch count: $batch_count" + +block=0 +last_validator_block=0 +check_validator_catchup_and_compare_roots() { + while true; do + sleep 30 + sequencer_block=$(cast block-number --rpc-url http://localhost:8547) + validator_block=$(cast block-number --rpc-url http://localhost:8247) + new_batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) + new_batches=$(($new_batch_count - $batch_count)) + echo "new batches: $new_batches" + + if [[ "$validator_block" -gt "$sequencer_block" ]]; then + echo "Validator block number ($validator_block) is greater than sequencer block number ($sequencer_block)" + exit 1 + fi + echo "validator block number: $validator_block" + echo "sequencer block number: $sequencer_block" + + # The batch poster will post batches after the Hotshot is live + # It is possible that all batch posting reports are in the pending state. + # In this case, the validator will not catch up with the sequencer. + if [[ "$validator_block" -eq "$last_validator_block" && $(($sequencer_block - $validator_block - 1)) -le $new_batches ]]; then + echo "Validator block ($validator_block) is no longer increasing AND validator has caught up. Breaking out." + block=$validator_block + break + fi + last_validator_block=$validator_block + done + + sequencer_root=$(cast block --rpc-url http://localhost:8547 --json $block | jq -r .stateRoot) + validator_root=$(cast block --rpc-url http://localhost:8247 --json $block | jq -r .stateRoot) + + echo "sequencer root: $sequencer_root" + echo "validator root: $validator_root" + + if [[ "$validator_root" != "$sequencer_root" ]]; then + echo "Error: validator root ($validator_root) does not match sequencer root ($sequencer_root)" + exit 1 + fi +} + +# create a background process to send transactions. +# It will take around 3.5 minutes to finish +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 360 --delay 600 + +echo "stopping batcher gracefully" +docker compose down poster + +sleep 60 + +status=$(docker ps -a --filter "name=poster" --format '{{.Status}}') +if [[ "$status" == Exited* ]]; then + echo "poster container has shut down." +else + echo "poster container is still running or restarting." +fi + +docker compose up -d poster + +echo "waiting for validator to catch up" +check_validator_catchup_and_compare_roots + +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 360 --delay 600 +echo "force kill batch poster" +docker compose kill poster + +sleep 20 + +status=$(docker ps -a --filter "name=poster" --format '{{.Status}}') +if [[ "$status" == Exited* ]]; then + echo "poster container has shut down." +else + echo "poster container is still running or restarting." +fi + +docker compose up -d poster + +echo "waiting for validator to catch up" +check_validator_catchup_and_compare_roots + +docker compose down --remove-orphans diff --git a/regression-tests/batcher-with-malicious-sequencer.bash b/regression-tests/batcher-with-malicious-sequencer.bash new file mode 100755 index 00000000..c11c5478 --- /dev/null +++ b/regression-tests/batcher-with-malicious-sequencer.bash @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +# This script tests the batcher's behavior when interacting with a malicious sequencer. +# If the sequencer acts maliciously, the batcher will lose liveness. +# Once the sequencer resumes correct behavior, the batcher should automatically recover liveness. + +wait_for_block_number() { + local validator_rpc="$1" + local target_block_number="$2" + while true; do + # Get current message count + count=$(curl --fail --silent http://127.0.0.1:10000/block-number) + echo "Current mock sequencer block number: $count" + block_number=$(cast block-number --rpc-url $validator_rpc) + echo "Current validated block number: $block_number" + if [[ $block_number -gt $target_block_number ]]; then + break + fi + sleep 5 + done +} + +check_and_recover_liveness() { + local validator_rpc="$1" + local current_count="$2" + local validated_count=0 + + local i=0 + while [[ $i -lt 15 ]]; do + local block_number + block_number=$(cast block-number --rpc-url "$validator_rpc") + echo "Current validated number: $block_number, target: $current_count" + if [[ $block_number -le $current_count ]]; then + validated_count=$block_number + fi + i=$((i+1)) + sleep 15 + done + + echo "sequencer is going to work properly" + curl -X POST --fail --silent http://127.0.0.1:10000/reset + echo "restarting batch poster" + docker compose restart poster + + wait_for_block_number $validator_rpc $validated_count + + echo "$validated_count" +} + +set -euo pipefail + +cd "$(dirname "$0")" + +echo "starting with mock sequencer" + +source ./common.bash +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --mock-sequencer --detach + +while true; do + curl -sfL http://localhost:41000/v0/status/block-height && break || sleep 5 + echo "waiting for nodes" +done + +validator_rpc="http://localhost:8247" + +echo "starting tx spammer" +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 500000 --delay 8000 --wait + +wait_for_block_number $validator_rpc 20 + +# This should not break liveness +current_count=$(curl -X POST --fail --silent http://127.0.0.1:10000/send-in-random) +wait_for_block_number $validator_rpc $(($current_count+10)) + +sleep 10 + +current_count2=$(curl -X POST --fail --silent http://127.0.0.1:10000/skip-next) +check_and_recover_liveness $validator_rpc $current_count2 + +sleep 10 + +current_count3=$(curl -X POST --fail --silent http://127.0.0.1:10000/send-oversized) +check_and_recover_liveness $validator_rpc $current_count3 + +current_count4=$(curl -X POST --fail --silent http://127.0.0.1:10000/send-messages-at-same-block) +check_and_recover_liveness $validator_rpc $current_count4 + +sleep 20 + +block_number=$(cast block-number --rpc-url $validator_rpc) +echo "block number: $block_number" + +validator_state=$(cast block $block_number --rpc-url $validator_rpc --json | jq -r .stateRoot) +sequencer_state=$(cast block $block_number --rpc-url http://localhost:8547 --json | jq -r .stateRoot) +echo "validator_state: $validator_state" +echo "sequencer_state: $sequencer_state" +if [ "$validator_state" != "$sequencer_state" ]; then + echo "Error: validator_state ($validator_state) does not match sequencer_state ($sequencer_state)" + exit 1 +fi + +docker compose down --remove-orphans diff --git a/regression-tests/batcher-with-multiple-node-client b/regression-tests/batcher-with-multiple-node-client new file mode 100755 index 00000000..477349d2 --- /dev/null +++ b/regression-tests/batcher-with-multiple-node-client @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --detach +sleep 60 + +# Ignore orphaned container output +# Orphaned containers will be removed at the end of the test +export COMPOSE_IGNORE_ORPHANS=1 + +good_url=http://espresso-dev-node:41000 +bad_url=http://bad-url:41000 + +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.max-empty-batch-delay \ + --value "10h" \ + +# 1 good url and 1 bad url +# The batcher should not post any batches +echo "modifying poster Hotshot url config to have 1 good url and 1 bad url" +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.hotshot-urls \ + --value "$good_url,$bad_url" \ + --isArray true + +# update batcher's config +docker compose restart poster + +# create L2 traffic +docker compose run scripts send-l2 --ethamount 10 --to user_l2user --times 10 --delay 200 + +sequencer_inbox=$(docker compose run --entrypoint sh poster -c "jq -r '.[0].rollup.\"sequencer-inbox\"' /config/deployed_chain_info.json | tail -n 1 | tr -d '\r\n'") +echo "sequencer inbox: $sequencer_inbox" +batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +echo "batch count: $batch_count" + +echo "waiting for 60 seconds for the batcher" +sleep 60 + +now_batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +if [[ $now_batch_count -ne $batch_count ]]; then + echo "batch count is not the same, the batcher is posting batches" + exit 1 +fi + +echo "batcher is not posting batches, which is expected" + +# Only 1 good url +# The batcher should fail to start +echo "modifying poster Hotshot url config to only have 1 good url" +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.hotshot-urls \ + --value "$good_url" \ + --isArray true + +docker compose restart poster + +sleep 5 + +status=$(docker compose ps --status=running --services | grep -w poster || true) +if [[ -z "$status" ]]; then + echo "Success: poster failed to start as expected." +else + echo "Error: poster is running but it should have failed to start." + exit 1 +fi + +echo "modifying poster Hotshot url config to have 1 good url and 2 bad urls" +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.hotshot-urls \ + --value "$good_url,$bad_url,$bad_url" \ + --isArray true + +docker compose restart poster + +sleep 10 + +status=$(docker compose ps --status=running --services | grep -w poster || true) +if [[ -z "$status" ]]; then + echo "Error: poster is not running but it should have started." + exit 1 +fi + +sleep 30 +now_batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) +if [[ $now_batch_count -ne $batch_count ]]; then + echo "batch count is not the same, the batcher is posting batches" + exit 1 +fi + + +echo "modifying poster Hotshot url config to have 2 good urls" +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.hotshot-urls \ + --value "$good_url,$good_url" \ + --isArray true + +docker compose restart poster + +while true; do + sleep 10 + now_batch_count=$(cast call $sequencer_inbox "batchCount()(uint256)" --rpc-url http://localhost:8545) + if [[ $now_batch_count -gt $batch_count ]]; then + echo "Success: poster is posting batches." + break + fi + echo "waiting for a new batch, batch count: $now_batch_count" +done + +docker compose down --remove-orphans diff --git a/regression-tests/batcher-with-spam-in-hotshot.bash b/regression-tests/batcher-with-spam-in-hotshot.bash new file mode 100755 index 00000000..445d2af1 --- /dev/null +++ b/regression-tests/batcher-with-spam-in-hotshot.bash @@ -0,0 +1,121 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --detach + +# modify the `check-batch-correctness` to false +# This is because the message constructed from the `send-l2-to-hotshot` command is not perfectly correct +# and the batcher will reject it. But actually messages are valid and the validator can execute them successfully. +docker compose run scripts update-config-value \ + --path /config/poster_config.json \ + --property node.batch-poster.check-batch-correctness \ + --value false \ + --isBool true + +docker compose restart poster + +attacker=user_attacker +user=user_l2user +user2=user_l2user2 + +echo "funding" +docker compose run scripts send-l2 --ethamount 100 --to sequencer --wait +docker compose run scripts send-l2 --ethamount 100 --to $attacker --wait + +# Ignore orphaned container output +# Orphaned containers will be removed at the end of the test +export COMPOSE_IGNORE_ORPHANS=1 + +nowBlock=$(cast block-number --rpc-url http://127.0.0.1:8547) +messagePosition=$(($nowBlock + 1)) +nowDelayed=$(cast block --rpc-url http://127.0.0.1:8547 --json | jq -r .nonce | xargs printf "%d") + +get_nonce_from_sequencer() { + local account=$1 + local addr=$(docker compose run scripts print-address --account $account) + local nonce=$(cast nonce $addr --rpc-url http://127.0.0.1:8547) + echo "$nonce" +} + +send_valid_tx_message_to_hotshot() { + local account=$1 + local to=$2 + local nonceOffset=$3 + local nonce=$(get_nonce_from_sequencer $account) + docker compose run scripts send-l2-to-hotshot --to $to --position $messagePosition --signer $account --delayed $nowDelayed --nonce $((nonce+nonceOffset)) +} + +send_invalid_nonce_message_to_hotshot() { + local account=$1 + local to=$2 + docker compose run scripts send-l2-to-hotshot --to $to --position $messagePosition --signer $account --delayed $nowDelayed --nonce 999999 +} + +send_valid_tx_message_to_hotshot sequencer $user 0 +messagePosition=$(($messagePosition+1)) + +while true; do + balance=$(cast balance $(docker compose run scripts print-address --account $user) --rpc-url http://127.0.0.1:8247) + echo "$user balance: $balance" + if [ "$balance" -gt 0 ]; then + break + fi + sleep 1 +done + +oldBlockNumber=$(cast block-number --rpc-url http://127.0.0.1:8247) +echo "oldBlockNumber: $oldBlockNumber" + +for i in {1..10}; do + ## This is a valid message but signed by invalid signer + ## Batcher should ignore this message + send_valid_tx_message_to_hotshot $attacker user_invalid_user $i + sleep 1 + + ## This is an invalid message signed by valid signer. + ## Batcher should accept this message but validator will create empty block for it + send_invalid_nonce_message_to_hotshot sequencer $user2 $((i+1)) + sleep 1 + + ## This is a valid transaction with a invalid position, signed by valid signer + ## Batcher should ignore this message since the message position is taken + send_valid_tx_message_to_hotshot sequencer $user2 $((i+1)) + sleep 1 + + messagePosition=$(($messagePosition+1)) +done; + +echo "sleeping for 60 seconds. Waiting for the validator to create blocks" +sleep 60 + +newBlockNumber=$(cast block-number --rpc-url http://127.0.0.1:8247) +echo "newBlockNumber: $newBlockNumber" + +if [ "$newBlockNumber" -eq "$oldBlockNumber" ]; then + echo "Smoke test failed. No blocks created." + exit 1 +fi + +userAddress=$(docker compose run scripts print-address --account $user2) +invalidUserAddress=$(docker compose run scripts print-address --account user_invalid_user) + +balance1=$(cast balance $userAddress --rpc-url http://127.0.0.1:8247) +echo "$user2 balance: $balance1" + +balance2=$(cast balance $invalidUserAddress --rpc-url http://127.0.0.1:8247) +echo "user_invalid_user balance: $balance2" + +if [ "$balance1" -gt 0 ] || [ "$balance2" -gt 0 ]; then + echo "Smoke test failed." + exit 1 +else + echo "Smoke test succeeded." +fi + +docker compose down --remove-orphans diff --git a/regression-tests/caff-node-batcher-addr-monitor.bash b/regression-tests/caff-node-batcher-addr-monitor.bash new file mode 100755 index 00000000..ebe7e7ef --- /dev/null +++ b/regression-tests/caff-node-batcher-addr-monitor.bash @@ -0,0 +1,56 @@ + +#!/usr/bin/env bash +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --no-simple --caff-node --detach + +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 10000 --delay 200 --wait + +seqInboxAddr=0x06EBC64fDE465bB5569844B81DcfF12ECd9fb419 +newBatcherAddr=0x0000000000000000000000000000000000000000 + +docker compose run scripts set-is-batch-poster --batchPoster $newBatcherAddr --isBatchPoster true --seqInboxAddr $seqInboxAddr --wait + +while true; do + if docker compose logs caff-node | grep "adding event" | grep "Addr:$newBatcherAddr IsBatcher:true"; then + break + fi + sleep 1 +done + +# Should be same as the one in `config.ts` +batcherAddr=0xe2148eE53c0755215Df69b2616E552154EdC584f +docker compose run scripts set-is-batch-poster --batchPoster $batcherAddr --isBatchPoster false --seqInboxAddr $seqInboxAddr --wait + +while true; do + if docker compose logs caff-node | grep "adding event" | grep "Addr:$batcherAddr IsBatcher:false"; then + break + fi + sleep 1 +done + +echo "wait for consumption of all messages" +sleep 120 + +# From this point, caff node should not create any blocks. +nowBlock=$(cast block-number --rpc-url http://localhost:8550) +echo "nowBlock: $nowBlock" +count=0 +while true; do + if [ $(cast block-number --rpc-url http://localhost:8550) -gt $nowBlock ]; then + echo "Error: caff node created a block" + exit 1 + fi + count=$((count + 1)) + if [ $count -gt 10 ]; then + break + fi + sleep 5 +done + +docker compose down --remove-orphans diff --git a/regression-tests/caff-node-force-inclusion-test.bash b/regression-tests/caff-node-force-inclusion-test.bash new file mode 100755 index 00000000..e3a0e624 --- /dev/null +++ b/regression-tests/caff-node-force-inclusion-test.bash @@ -0,0 +1,243 @@ +#!/usr/bin/env bash +# This is a utility function for creating assertions at the end of thie test. + +fail() { + echo "$*" 1>&2; exit 1; +} + +set -euo pipefail +set -a # automatically export all variables +set -x # print each command before executing it, for debugging + +# CI is "true" in the CI +CI="${CI:-false}" + +# Output debug information on CI +DEBUG="${DEBUG:-false}" +if [ "$CI" = "true" ]; then + set -x + DEBUG=true +fi + +# Show the command we are running, then run it. Due to piping this spawns a +# subshell so does not work for command like `cd` or `source`. +function run { + echo -e "\033[34m>>> $*\033[0m" + "$@" 2>&1 | fmt +} + +function cd { + emph "cd $*" + builtin cd "$@" +} + +function emph { + echo -e "\033[34m>>> $*\033[0m\n" +} + +# Display only the last line of piped input, continuously updating +function fmt { + # Leave output unchanged in DEBUG mode + if [ "$DEBUG" = "true" ]; then + cat + return + fi + # rewrite the last line to avoid noisy output + while read -r line; do + tput cr + tput el + echo "$line" | cut -c -"$(tput cols)" | tr -d '\r\n' + done + echo +} + +# Show something with a comment in front, to distinguish it from console output. +function info { + echo "# $@" +} + +# Remove log files on exit +trap "exit" INT TERM +trap cleanup EXIT +function cleanup { + exit_code=$? + if [ $exit_code -ne 0 ]; then + echo + echo "An error occurred." + if [ -s "$ESPRESSO_DEVNODE_LOG_FILE" ]; then + echo "Espresso dev node logs:" + cat "$ESPRESSO_DEVNODE_LOG_FILE" + exit $exit_code + elif [ -s "$TESTNODE_LOG_FILE" ]; then + echo "Nitro testnode logs:" + cat "$TESTNODE_LOG_FILE" + exit $exit_code + fi + else + rm -vf "$TESTNODE_LOG_FILE" + rm -vf "$ESPRESSO_DEVNODE_LOG_FILE" + fi +} + +# Find directory of this script, the project, and the orbit-actions submodule +TEST_DIR="$(dirname $(readlink -f $0))" +TESTNODE_LOG_FILE=$(mktemp -t nitro-test-node-logs-XXXXXXXX) +ESPRESSO_DEVNODE_LOG_FILE=$(mktemp -t espresso-dev-node-logs-XXXXXXXX) +TESTNODE_DIR="$(dirname "$TEST_DIR")" +ENV_FILE="$TEST_DIR/.env" +# Hide docker compose warnings about orphaned containers. +export COMPOSE_IGNORE_ORPHANS=true + +info Ensuring docker compose project is stopped +run docker compose down -v --remove-orphans + +source ./common.bash +info Deploying a Espresso Nitro stack with caff node also enabled +emph ./test-node.bash --espresso $(get_espresso_image_flag) --caff-node --validate --tokenbridge --init-force --detach +if [ "$DEBUG" = "true" ]; then + ./test-node.bash --espresso $(get_espresso_image_flag) --caff-node --tokenbridge --init-force --detach +else + info "This command starts up an entire Nitro stack. It takes a long time." + info "Run \`tail -f $TESTNODE_LOG_FILE\` to see logs, if necessary." + echo + ./test-node.bash --espresso $(get_espresso_image_flag) --validate --tokenbridge --caff-node --init-force --detach > "$TESTNODE_LOG_FILE" 2>&1 +fi + +# Start espresso sequencer node for the purposes of the test e.g. not needed for the real migration. +info "Starting a local Espresso confirmation layer development node" +emph docker compose up espresso-dev-node --detach +if [ "$DEBUG" = "true" ]; then + docker compose up espresso-dev-node --detach +else + info "Run \`tail -f $ESPRESSO_DEVNODE_LOG_FILE\` to see logs, if necessary." + echo + docker compose up espresso-dev-node --detach > "$ESPRESSO_DEVNODE_LOG_FILE" 2>&1 +fi + + +info "Load environment variables in $ENV_FILE" +# A similar env file should be supplied for whatever +emph . "$TEST_DIR/.env" +. "$TEST_DIR/.env" +echo +info "Loaded env vars:" +echo +cat "$TEST_DIR/.env" | sed 's/^/ /' +echo + +function trim-last { + tail -n 1 | tr -d '\r\n' + +} +function get-addr { + local file="$1" + local path="$2" + docker compose run --entrypoint cat scripts $file | jq -r "$path" | trim-last +} + + +PARENT_CHAIN_UPGRADE_EXECUTOR=$(get-addr /config/deployed_chain_info.json '.[0].rollup."upgrade-executor"') +declare -p PARENT_CHAIN_UPGRADE_EXECUTOR + +SEQUENCER_INBOX=$(get-addr /config/deployed_chain_info.json '.[0].rollup."sequencer-inbox"') +declare -p SEQUENCER_INBOX + +INBOX_ADDRESS=$(get-addr /config/deployed_chain_info.json '.[0].rollup.inbox') +declare -p INBOX_ADDRESS + +echo "UPGRADE_EXECUTOR: $PARENT_CHAIN_UPGRADE_EXECUTOR" +echo "SEQUENCER_INBOX: $SEQUENCER_INBOX" +echo "Inbox: $INBOX_ADDRESS" + +while true; do + # Before setting the max delay verify that Caff node is running + CAFF_NODE_RESPONSE=$(cast balance 0x3f1Eae7D46d88F08fc2F8ed27FCb2AB183EB2d0E --rpc-url http://127.0.0.1:8550) + + if [[ $CAFF_NODE_RESPONSE == "0" ]]; then + echo "Caff node is catching up, wait" + sleep 10 + else + break + fi +done + +PRIVATE_KEY="$(docker compose run scripts print-private-key --account l2owner 2>/dev/null | trim-last)" +# This is a private key used for testing, save to print +declare -p PRIVATE_KEY + +# Set the max delay blocks to 10 blocks, future blocks to 120 blocks, delay seconds to 150 seconds, future seconds to 3600 seconds +cast send $PARENT_CHAIN_UPGRADE_EXECUTOR $(cast calldata "executeCall(address, bytes)" $SEQUENCER_INBOX $(cast calldata "setMaxTimeVariation((uint256,uint256,uint256,uint256))" "(10,120,150,3600)")) --rpc-url $PARENT_CHAIN_RPC_URL --private-key $PRIVATE_KEY + +# First call the maxTimeVariation function to get the max time variation +{ + read DELAY_BLOCKS + read FUTURE_BLOCKS + read DELAY_SECONDS + read FUTURE_SECONDS +} < <( + cast call --rpc-url "$PARENT_CHAIN_RPC_URL" "$SEQUENCER_INBOX" \ + 'maxTimeVariation()(uint256,uint256,uint256,uint256)' | + awk '{print $1}' # Extract only the first field (in case of extra text like "[8.64e4]") +) + +echo "DELAY_BLOCKS: $DELAY_BLOCKS" +echo "FUTURE_BLOCKS: $FUTURE_BLOCKS" +echo "DELAY_SECONDS: $DELAY_SECONDS" +echo "FUTURE_SECONDS: $FUTURE_SECONDS" + +# Now we stop the sequencer +run docker stop nitro-testnode-sequencer-1 + +USER_L1_PRIVATE_KEY="$(docker compose run scripts print-private-key --account funnel 2>/dev/null | trim-last)" +# This is a private key used for testing, save to print +declare -p USER_L1_PRIVATE_KEY + +# Add retry logic to send delayed message because sometimes it fails on the first attempt +MAX_RETRIES=5 +RETRY_DELAY=5 # seconds between retries + +# Your original command +CMD="cast send $INBOX_ADDRESS \$(cast calldata \"sendL2MessageFromOrigin(bytes)\" \"0x123456\") --rpc-url $PARENT_CHAIN_RPC_URL --private-key $USER_L1_PRIVATE_KEY --gas-limit 20000000" + +# Retry logic +retry_count=0 +while [ $retry_count -lt $MAX_RETRIES ]; do + echo "Attempt $((retry_count + 1)) of $MAX_RETRIES..." + + if eval "$CMD"; then + echo "Command succeeded!" + break + else + echo "Command failed. Retrying in $RETRY_DELAY seconds..." + sleep $RETRY_DELAY + ((retry_count++)) + fi +done + +if [ $retry_count -ge $MAX_RETRIES ]; then + echo "All retries failed. Exiting." + exit 1 +fi + +sleep 120 + + +has_force_inclusion_log() { + local container_name="caff-node-1" + local search_string="force inclusion is going to happen" + if docker logs "$container_name" 2>&1 | grep -q "$search_string"; then + return 1 + else + return 0 + fi +} + + +if has_force_inclusion_log "caff-node-1" "force inclusion is going to happen"; then + echo "It printed force inclusion is going to happen log" + docker compose down --remove-orphans + exit 0 +else + echo "Caff node did not print force inclusion log" + exit 1 +fi diff --git a/regression-tests/caff-node-restart.bash b/regression-tests/caff-node-restart.bash new file mode 100755 index 00000000..5b0f96c5 --- /dev/null +++ b/regression-tests/caff-node-restart.bash @@ -0,0 +1,130 @@ +#!/usr/bin/env bash + +l3_arg="" + +if [[ $1 == "--l3" ]]; then + l3_arg="--l3node" +fi + +wait_for_block_number() { + local rpcUrl="$1" + local targetBlockNumber="$2" + while true; do + blockNumber=$(cast block-number --rpc-url $rpcUrl) + echo "Current caff node block number: $blockNumber" + if [[ $blockNumber -gt $targetBlockNumber ]]; then + break + fi + sleep 5 + done +} + +read_last_log() { + local container_name="$1" + local search_string="$2" + last_log=$(docker compose logs "$container_name" 2>&1 | grep "$search_string" | tail -1) + echo "$last_log" +} + +parse_block_number() { + local log="$1" + result=$(echo "$log" | awk -F'blockNumber=' '{print $2}' | awk '{print $1}') + if [ -n "$result" ]; then + echo "$result" + return + fi + echo $(echo "$log" | awk -F'\"block number\"=' '{print $2}' | awk '{print $1}') + + echo $(echo "$log" | awk -F'nextHotshotBlock=' '{print $2}' | awk '{print $1}') +} + +get_log_count() { + local container_name="$1" + echo $(docker compose logs "$container_name" 2>&1 | wc -l) +} + +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --no-simple --caff-node $l3_arg --detach + +container_name="caff-node" + +while true; do + curl -sfL http://localhost:41000/v0/status/block-height && break || sleep 5 + echo "waiting for nodes" +done + +echo "starting tx spammer" +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 500000 --delay 20000 --wait + +if [ "$l3_arg" != "" ]; then + docker compose run --detach scripts send-l3 --ethamount 10 --to user_l3user --times 500000 --delay 20000 --wait +fi + +caff_rpc="http://localhost:8550" + +wait_for_block_number $caff_rpc 20 + +echo "shutting down caff node" +docker compose stop $container_name 2>&1 +docker compose stop $container_name 2>&1 + +sleep 10 + +count=$(get_log_count $container_name) +last_produced_block_log=$(read_last_log $container_name "Produced block") +last_produced_block_num=$(parse_block_number "$last_produced_block_log") +echo "last log: $last_produced_block_log" +echo "last block number: $last_produced_block_num" + +last_processing_hotshot_block_log=$(read_last_log $container_name "processing hotshot block") +last_processing_hotshot_block_num=$(parse_block_number "$last_processing_hotshot_block_log") +echo "last processing hotshot block number: $last_processing_hotshot_block_num" + +restart_ts="$(date -u +%Y-%m-%dT%H:%M:%SZ)" +docker compose start $container_name + +sleep 20 + +next_produced_block_log=$(docker compose logs $container_name 2>&1 | tail -n +$count | grep "Produced block" | head -1) +echo "next log: $next_produced_block_log" +next_produced_block_num=$(parse_block_number "$next_produced_block_log") + +next_processing_hotshot_block_log=$( + docker compose logs --since "$restart_ts" "$container_name" 2>&1 \ + | grep "Starting streamer" \ + | head -1 +) +next_processing_hotshot_block_num=$(parse_block_number "$next_processing_hotshot_block_log") + +echo "last block number: $last_produced_block_num" +echo "next block number: $next_produced_block_num" + +echo "last_hotshot_log: $last_processing_hotshot_block_log" +echo "next_hotshot_log: $next_processing_hotshot_block_log" +echo "last processing hotshot block number: $last_processing_hotshot_block_num" +echo "next processing hotshot block number: $next_processing_hotshot_block_num" + +if [[ $next_produced_block_num -eq $((last_produced_block_num + 1)) ]]; then + echo "caff node next produced block check succeeded" +else + echo "caff node next produced block check failed" + exit 1 +fi + +if [[ $next_processing_hotshot_block_num -le $((last_processing_hotshot_block_num)) ]]; then + # It is allowed the caff node restarts from a bit earlier hotshot block + # because the caff node stores the earliest hotshot block number of its buffer + if [[ $next_processing_hotshot_block_num -ge $((last_processing_hotshot_block_num - 10)) ]]; then + echo "caff node next processing hotshot block check succeeded" + docker compose down + exit 0 + fi +fi +echo "caff node next processing hotshot block check failed" +exit 1 diff --git a/regression-tests/caff-node-state-check.bash b/regression-tests/caff-node-state-check.bash new file mode 100755 index 00000000..659180aa --- /dev/null +++ b/regression-tests/caff-node-state-check.bash @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -euo pipefail + +cd "$(dirname "$0")" + +source ./common.bash + +echo "starting nodes" +../test-node.bash --init-force --espresso $(get_espresso_image_flag) --validate --caff-node --detach + +echo "starting tx spammer" +docker compose run --detach scripts send-l2 --ethamount 10 --to user_l2user --times 50 --delay 200 --wait + +for i in {1..20}; do + echo "sending delayed tx" + docker compose run scripts send-l2-delayed --ethamount 10 --to user_delayed_user --wait + sleep 5 +done + +echo "waiting for all transactions to be processed by the caff node" +sleep 60 + +user_l2user_address=$(docker compose run scripts print-address --account user_l2user | tail -n 1 | tr -d '\r\n') +balance1=$(cast balance $user_l2user_address --rpc-url http://127.0.0.1:8550) +actualBalance1=$(cast balance $user_l2user_address --rpc-url http://127.0.0.1:8247) +if [ "$balance1" != "$actualBalance1" ]; then + echo "Error: balance1 ($balance1) does not match actualBalance1 ($actualBalance1)" + exit 1 +fi + +user_delayed_user_address=$(docker compose run scripts print-address --account user_delayed_user | tail -n 1 | tr -d '\r\n') +balance2=$(cast balance $user_delayed_user_address --rpc-url http://127.0.0.1:8550) +actualBalance2=$(cast balance $user_delayed_user_address --rpc-url http://127.0.0.1:8247) +if [ "$balance2" != "$actualBalance2" ]; then + echo "Error: balance2 ($balance2) does not match actualBalance2 ($actualBalance2)" + exit 1 +fi + +blockNumber=$(cast block-number --rpc-url http://localhost:8247) +echo "blockNumber: $blockNumber" +trustedState=$(cast block $blockNumber --rpc-url http://localhost:8247 --json | jq -r .stateRoot) +caffState=$(cast block $blockNumber --rpc-url http://localhost:8550 --json | jq -r .stateRoot) +if [ "$trustedState" != "$caffState" ]; then + echo "Error: trustedState ($trustedState) does not match caffState ($caffState)" + exit 1 +fi + +echo "use the geth node as the trusted node" +docker compose run scripts update-config-value --path /config/caff_sequencer_config.json --property node.espresso-caff-node.state-checker.trusted-node-url --value http://geth:8545 + +echo "restart caff node" +docker compose restart caff-node + +sleep 20 + +while true; do + if docker compose ps -a caff-node | grep Exited; then + break + fi + sleep 10 +done + +docker compose down --remove-orphans diff --git a/regression-tests/common.bash b/regression-tests/common.bash new file mode 100755 index 00000000..edc5ad9b --- /dev/null +++ b/regression-tests/common.bash @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +get_espresso_image_flag() { + if [[ -n "${ESPRESSO_DEV:-}" ]]; then + echo "--dev nitro" + else + echo "--latest-espresso-image" + fi +} diff --git a/rollupcreator/Dockerfile b/rollupcreator/Dockerfile index f98daf71..fd915a25 100644 --- a/rollupcreator/Dockerfile +++ b/rollupcreator/Dockerfile @@ -1,4 +1,4 @@ -FROM node:18-bookworm-slim +FROM node:20-bookworm-slim ARG NITRO_CONTRACTS_BRANCH=celestia-integration ARG NITRO_CONTRACTS_REPO=https://github.com/EspressoSystems/nitro-contracts.git RUN apt-get update && \ @@ -6,12 +6,11 @@ RUN apt-get update && \ WORKDIR /workspace RUN git clone --recurse-submodules --no-checkout ${NITRO_CONTRACTS_REPO} ./ RUN git checkout ${NITRO_CONTRACTS_BRANCH} +RUN git submodule update --init --recursive RUN yarn install && yarn cache clean RUN curl -L https://foundry.paradigm.xyz | bash ENV PATH="${PATH}:/root/.foundry/bin" -RUN foundryup -RUN forge update lib/forge-std -RUN git submodule update --init --recursive +RUN foundryup --install 1.0.0 RUN touch scripts/config.ts RUN yarn build:all ENTRYPOINT ["yarn"] diff --git a/scripts/Dockerfile b/scripts/Dockerfile index dfba9806..367de50b 100644 --- a/scripts/Dockerfile +++ b/scripts/Dockerfile @@ -1,4 +1,4 @@ -FROM node:18-bullseye-slim +FROM node:20-bookworm-slim WORKDIR /workspace COPY ./package.json ./yarn.lock ./ RUN yarn diff --git a/scripts/config.ts b/scripts/config.ts index 68eb523b..09a38819 100644 --- a/scripts/config.ts +++ b/scripts/config.ts @@ -183,6 +183,7 @@ function getChainInfo(): ChainInfo { return chainInfo; } + function writeConfigs(argv: any) { const valJwtSecret = path.join(consts.configpath, "val_jwt.hex"); const chainInfoFile = path.join(consts.configpath, "l2_chain_info.json"); @@ -298,12 +299,14 @@ function writeConfigs(argv: any) { vhosts: "*", corsdomain: "*", }, + "log-level": "DEBUG" }; if (argv.espresso) { let config = baseConfig as any; config.node["batch-poster"]["hotshot-url"] = ""; config.node["batch-poster"]["light-client-address"] = ""; + config.node["batch-poster"]["max-empty-batch-delay"] = "1h"; } baseConfig.node["data-availability"]["sequencer-inbox-address"] = @@ -314,7 +317,7 @@ function writeConfigs(argv: any) { if (argv.simple) { let simpleConfig = JSON.parse(baseConfJSON); simpleConfig.node.staker.enable = true; - simpleConfig.node.staker["use-smart-contract-wallet"] = true; + simpleConfig.node.staker["use-smart-contract-wallet"] = false; simpleConfig.node.staker.dangerous["without-block-validator"] = true; simpleConfig.node.sequencer = true; simpleConfig.node.dangerous["no-sequencer-coordinator"] = true; @@ -328,7 +331,7 @@ function writeConfigs(argv: any) { } if (argv.espresso) { simpleConfig.node.feed.output.enable = true; - simpleConfig.node["batch-poster"]["hotshot-url"] = argv.espressoUrl; + simpleConfig.node["batch-poster"]["hotshot-urls"] = [argv.espressoUrl, argv.espressoUrl]; simpleConfig.node["batch-poster"]["light-client-address"] = argv.lightClientAddress; simpleConfig.node["block-validator"]["dangerous"][ @@ -409,21 +412,49 @@ function writeConfigs(argv: any) { sequencerConfig.node["seq-coordinator"].enable = true; } - if (argv.espresso && argv.enableCaffNode) { + if (argv.enableCaffNode) { sequencerConfig.node.sequencer = false; + sequencerConfig.node["seq-coordinator"].enable = false; sequencerConfig.execution["sequencer"].enable = false; sequencerConfig.node["delayed-sequencer"].enable = false; - sequencerConfig.node["parent-chain-reader"].enable = false; + sequencerConfig.node["parent-chain-reader"].enable = true; sequencerConfig.node["espresso-caff-node"] = { - enable: true, - "hotshot-urls": [argv.espressoUrl], - "fallback-urls": [argv.espressoUrl], + "enable": true, + "hotshot-urls": [argv.espressoUrl, argv.espressoUrl], "next-hotshot-block": 1, namespace: 412346, "hotshot-polling-interval": "250ms", "retry-time": "2s", - "espresso-tee-verifier-addr": "0xb562622f2D76F355D673560CB88c1dF6088702f1", + "espresso-sgx-verifier-addr": + "0xb562622f2D76F355D673560CB88c1dF6088702f1", + "batch-poster-addr": "0xe2148eE53c0755215Df69b2616E552154EdC584f", + "wait-for-finalization": true, + "from-block": 1, + "wait-for-confirmations": false, + "blocks-to-read": 1, + "force-inclusion-checker": { + "block-threshold-tolerance": 100000, + "second-threshold-tolerance": 100000, + "polling-interval": "1h", + }, + "state-checker": { + "trusted-node-url": "http://bad-url:8550", + "error-tolerance-duration": "1h" + } }; + if (argv.l3Espresso) { + sequencerConfig.node["espresso-caff-node"]["namespace"] = 333333; + sequencerConfig.chain.id = 333333; + sequencerConfig["parent-chain"].connection.url = argv.l2url; + const l3ChainInfoFile = path.join(consts.configpath, "l3_chain_info.json"); + sequencerConfig.chain["info-files"] = [l3ChainInfoFile]; + sequencerConfig.node["espresso-caff-node"]["batch-poster-addr"] = "0x3E6134aAD4C4d422FF2A4391Dc315c4DDf98D1a5"; + } else if (argv.validate) { + sequencerConfig.node["espresso-caff-node"]["state-checker"] = { + "trusted-node-url": "http://validator:8547", + "error-tolerance-duration": "1m", + } + } sequencerConfig.execution["forwarding-target"] = "ws://sequencer:8548"; fs.writeFileSync( @@ -439,10 +470,16 @@ function writeConfigs(argv: any) { let posterConfig = JSON.parse(baseConfJSON); if (argv.espresso) { - posterConfig.node.feed.input.url.push("ws://sequencer:9642"); - posterConfig.node["batch-poster"]["hotshot-url"] = argv.espressoUrl; + if (argv.mockSequencer) { + posterConfig.node.feed.input.url.push("ws://mock-sequencer:9642"); + posterConfig.node["batch-poster"]["max-empty-batch-delay"] = "30s" + } else { + posterConfig.node.feed.input.url.push("ws://sequencer:9642"); + } + posterConfig.node["batch-poster"]["hotshot-urls"] = [argv.espressoUrl, argv.espressoUrl]; posterConfig.node["batch-poster"]["light-client-address"] = argv.lightClientAddress; + posterConfig.node["batch-poster"]["espresso-tee-type"] = "SGX"; } else { posterConfig.node["seq-coordinator"].enable = true; } @@ -474,13 +511,14 @@ function writeConfigs(argv: any) { l3Config.node["delayed-sequencer"]["use-merge-finality"] = false; l3Config.node["batch-poster"].enable = true; l3Config.node["batch-poster"]["redis-url"] = ""; - if (argv.espresso) { + if (argv.l3Espresso) { l3Config.node.feed.output.enable = true; l3Config.node.dangerous["no-sequencer-coordinator"] = true; - l3Config.node.feed.input.url.push("ws://sequencer:9642"); - l3Config.node["batch-poster"]["hotshot-url"] = argv.espressoUrl; + l3Config.node.feed.input.url.push("ws://l3node:3348"); + l3Config.node["batch-poster"]["hotshot-urls"] = [argv.espressoUrl, argv.espressoUrl]; l3Config.node["batch-poster"]["light-client-address"] = argv.lightClientAddress; + l3Config.node["batch-poster"]["espresso-tee-type"] = "SGX"; } fs.writeFileSync( path.join(consts.configpath, "l3node_config.json"), @@ -725,6 +763,11 @@ export const writeConfigCommand = { describe: "DAS committee member B BLS pub key", default: "", }, + validate: { + boolean: true, + describe: "enable the caff node to use the validator as the trusted node for its state checker", + default: false, + } }, handler: (argv: any) => { writeConfigs(argv); diff --git a/scripts/ethcommands.ts b/scripts/ethcommands.ts index 06285e79..81cb56f8 100644 --- a/scripts/ethcommands.ts +++ b/scripts/ethcommands.ts @@ -6,6 +6,7 @@ import * as L1GatewayRouter from "@arbitrum/token-bridge-contracts/build/contrac import * as L1AtomicTokenBridgeCreator from "@arbitrum/token-bridge-contracts/build/contracts/contracts/tokenbridge/ethereum/L1AtomicTokenBridgeCreator.sol/L1AtomicTokenBridgeCreator.json"; import * as ERC20 from "@openzeppelin/contracts/build/contracts/ERC20.json"; import * as fs from "fs"; +import * as rlp from "rlp"; import { ARB_OWNER } from "./consts"; const path = require("path"); @@ -31,6 +32,43 @@ async function sendTransaction(argv: any, threadId: number) { } } +function updateConfigValue(argv: any) { + const filePath = argv.path + const propertyPath = argv.property + const value = argv.value + let v: any + if (argv.isArray) { + v = value.split(",").map((v: string) => { + const s = v.trim() + if (argv.isBool) { + return s === "true" + } + if (argv.isNumber) { + return Number(s) + } + return s + }) + } else if (argv.isBool) { + v = value === "true" + } else if (argv.isNumber) { + v = Number(value) + } else { + v = value + } + const fileContents = fs.readFileSync(filePath).toString(); + const config = JSON.parse(fileContents); + const property = propertyPath.split("."); + let current = config; + for (let i = 0; i < property.length - 1; i++) { + if (!current[property[i]]) { + throw new Error(`Property ${property[i]} not found`); + } + current = current[property[i]]; + } + current[property[property.length - 1]] = v; + fs.writeFileSync(filePath, JSON.stringify(config, null, 2)); +} + async function bridgeFunds(argv: any, parentChainUrl: string, chainUrl: string, inboxAddr: string) { argv.provider = new ethers.providers.WebSocketProvider(parentChainUrl); @@ -55,6 +93,186 @@ async function bridgeFunds(argv: any, parentChainUrl: string, chainUrl: string, } } +async function setIsBatchPoster(argv: any) { + const parentChainUrl = argv.l1url; + const seqInboxAddr = argv.seqInboxAddr; + const batchPoster = argv.batchPoster; + const isBatchPoster = argv.isBatchPoster; + + const provider = new ethers.providers.WebSocketProvider(parentChainUrl); + const account = namedAccount("l2owner", argv.threadId).connect(provider) + const iface = new ethers.utils.Interface([ + "function setIsBatchPoster(address, bool)" + ]); + const data = iface.encodeFunctionData("setIsBatchPoster", [batchPoster, isBatchPoster]) + const response = await account.sendTransaction({ + to: seqInboxAddr, + value: 0, + data: data, + nonce: await account.getTransactionCount("pending"), + }) + if (argv.wait) { + const receipt = await response.wait() + console.log(receipt) + } + provider.destroy() +} + +async function sendL2DelayedTransaction(argv: any, parentChainUrl: string, chainUrl: string, inboxAddr: string) { + const l2provider = new ethers.providers.WebSocketProvider(chainUrl); + const account = namedAccount(argv.from, argv.threadId).connect(l2provider) + const startNonce = await account.getTransactionCount("pending") + argv.data = undefined + const tx = await account.populateTransaction({ + to: namedAddress(argv.to, argv.threadId), + value: ethers.utils.parseEther(argv.ethamount), + data: argv.data, + nonce: startNonce, + }) + const signedTx = await account.signTransaction(tx) + // signed transaction type is 4 + const txBytes = [4, ...ethers.utils.arrayify(signedTx)] + const dataStr = ethers.utils.hexlify(txBytes) + const iface = new ethers.utils.Interface([ + "function sendL2Message(bytes messageData)" + ]); + const data = iface.encodeFunctionData("sendL2Message", [dataStr]); + + argv.provider = new ethers.providers.WebSocketProvider(parentChainUrl); + argv.data = data + const l1provider = new ethers.providers.WebSocketProvider(parentChainUrl); + const l1Account = namedAccount("funnel", argv.threadId).connect(l1provider) + const nonce = await l1Account.getTransactionCount("pending") + for (let index = 0; index < argv.times; index++) { + const response = await l1Account.sendTransaction({ + to: inboxAddr, + value: 0, + data: argv.data, + nonce: nonce + index, + }) + if (argv.wait) { + const receipt = await response.wait() + console.log(receipt) + } + if (argv.delay > 0) { + await new Promise(f => setTimeout(f, argv.delay)); + } + } + l1provider.destroy() +} + +async function sendL2TransactionToHotShot(argv: any) { + const to = namedAddress(argv.to) + const provider = new ethers.providers.WebSocketProvider(argv.l2url) + const l1provider = new ethers.providers.WebSocketProvider(argv.l1url) + const account = namedAccount(argv.signer).connect(provider) + const nonce = argv.nonce + + const network = await provider.getNetwork() + const chainId = network.chainId + + const tx = await account.populateTransaction({ + to, + value: ethers.utils.parseEther(argv.ethamount), + nonce: nonce, + }) + const signedTx = await account.signTransaction(tx) + // signed transaction type is 4 + const uint8ArrayTx = ethers.utils.arrayify("0x04" + signedTx.slice(2)) + const l1Block = await l1provider.getBlock("latest") + + const header = [ + 3, // kind + ethers.utils.arrayify(namedAddress("sequencer", argv.threadId)), // poster + ethers.utils.arrayify(ethers.utils.hexlify(l1Block.number)), // blockNumber + ethers.utils.arrayify(ethers.utils.hexlify(l1Block.timestamp)), // timestamp + [], // requestId (nilList) + null, // l1BaseFee (nil) + ] + + const message = [ + header, + uint8ArrayTx, // l2msg + null, + ] + + const messageWithMeta = [ + message, + argv.delayed, // delayedMessageRead + ] + + const encodedPayload = rlp.encode(messageWithMeta) + + const positionBuf = new Uint8Array(8) + const sizeBuf = new Uint8Array(8) + + new DataView(positionBuf.buffer).setBigUint64(0, BigInt(argv.position)) + new DataView(sizeBuf.buffer).setBigUint64(0, BigInt(encodedPayload.length)) + + const payload = new Uint8Array(positionBuf.length + sizeBuf.length + encodedPayload.length) + payload.set(positionBuf) + payload.set(sizeBuf, positionBuf.length) + payload.set(encodedPayload, positionBuf.length + sizeBuf.length) + + const signer = namedAccount(argv.signer) + const privateKey = signer.privateKey + const payloadHash = ethers.utils.keccak256(payload) + const signatureObj = new ethers.utils.SigningKey(privateKey).signDigest(payloadHash) + const signature = ethers.utils.joinSignature(signatureObj) + const uint8ArraySignature = ethers.utils.arrayify(signature) + + + // The Go code expects a secp256k1 format signature where v should be 0 or 1 + // Ethereum signature has v as 27 or 28, we need to convert it to 0 or 1 + if (uint8ArraySignature[64] === 27 || uint8ArraySignature[64] === 28) { + uint8ArraySignature[64] = uint8ArraySignature[64] - 27 + } + + const signatureLengthBuf = new Uint8Array(8) + new DataView(signatureLengthBuf.buffer).setBigUint64(0, BigInt(uint8ArraySignature.length)) + + const combined = new Uint8Array(signatureLengthBuf.length + uint8ArraySignature.length + payload.length) + combined.set(signatureLengthBuf) + combined.set(uint8ArraySignature, signatureLengthBuf.length) + combined.set(payload, signatureLengthBuf.length + uint8ArraySignature.length) + + const hotshotTx = { + namespace: chainId, + payload: arrayBufferToBase64(combined) + } + + const url = `${argv.espressoUrl}/submit/submit` + const body = JSON.stringify(hotshotTx) + + const response = await fetch(url, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Accept': 'application/json' + }, + body: body + }) + + const responseText = await response.text() + console.log('Response:', responseText) + + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`) + } + + return +} + +function arrayBufferToBase64(buffer: ArrayBuffer) { + let binary = ''; + const bytes = new Uint8Array(buffer); + const len = bytes.byteLength; + for(let i = 0; i < len; i++) { + binary += String.fromCharCode(bytes[i]); + } + return btoa(binary); +} + async function bridgeNativeToken(argv: any, parentChainUrl: string, chainUrl: string, inboxAddr: string, token: string) { argv.provider = new ethers.providers.WebSocketProvider(parentChainUrl); @@ -425,6 +643,76 @@ export const transferERC20Command = { }, }; +export const updateConfigValueCommand = { + command: "update-config-value", + describe: "updates a config value", + builder: { + path: { + string: true, + describe: "path to config file", + default: "l2_chain_info.json", + }, + property: { + string: true, + describe: "property to update", + }, + value: { + string: true, + describe: "value to set", + }, + isBool: { + boolean: true, + describe: "value is boolean", + default: false, + }, + isNumber: { + boolean: true, + describe: "value is number", + default: false, + }, + isArray: { + boolean: true, + describe: "value is an array, separated by commas", + default: false, + } + }, + handler: async (argv: any) => { + updateConfigValue(argv) + }, +} + +export const setIsBatchPosterCommand = { + command: "set-is-batch-poster", + describe: "sets the isBatchPoster flag for a batch poster", + builder: { + parentChainUrl: { + string: true, + describe: "parent chain url", + }, + seqInboxAddr: { + string: true, + describe: "sequencer inbox address", + }, + batchPoster: { + string: true, + describe: "batch poster address", + }, + isBatchPoster: { + boolean: true, + describe: "is batch poster", + default: false, + }, + wait: { + boolean: true, + describe: "wait for transaction to complete", + default: false, + }, + }, + handler: async (argv: any) => { + await setIsBatchPoster(argv) + }, +} + export const sendL1Command = { command: "send-l1", describe: "sends funds between l1 accounts", @@ -495,6 +783,84 @@ export const sendL2Command = { }, }; +export const sendL2DelayedCommand = { + command: "send-l2-delayed", + describe: "sends funds between l2 accounts using delayed inbox", + builder: { + ethamount: { + string: true, + describe: "amount to transfer (in eth)", + default: "10", + }, + from: { + string: true, + describe: "account (see general help)", + default: "funnel", + }, + to: { + string: true, + describe: "address (see general help)", + default: "funnel", + }, + wait: { + boolean: true, + describe: "wait for transaction to complete", + default: false, + }, + data: { string: true, describe: "data" }, + }, + handler: async (argv: any) => { + const deploydata = JSON.parse( + fs + .readFileSync(path.join(consts.configpath, "deployment.json")) + .toString() + ); + const inboxAddr = ethers.utils.hexlify(deploydata.inbox); + await sendL2DelayedTransaction(argv, argv.l1url, argv.l2url, inboxAddr); + }, +}; + +export const sendL2ToHotShotCommand = { + command: "send-l2-to-hotshot", + describe: "send a transaction to HotShot directly, not through sequencer or batch poster", + builder: { + ethamount: { + string: true, + describe: "amount to transfer (in eth)", + default: "1", + }, + to: { + string: true, + describe: "address (see general help)", + default: "funnel", + }, + position: { + number: true, + describe: "position of the message", + default: 0, + }, + signer: { + string: true, + describle: "the private key of the signer", + default: "", + }, + delayed: { + number: true, + describe: "delayed message read", + default: 1, + }, + nonce: { + number: true, + describe: "nonce of the transaction", + default: 0, + }, + }, + handler: async (argv: any) => { + await sendL2TransactionToHotShot(argv) + }, + +} + export const sendL3Command = { command: "send-l3", describe: "sends funds between l3 accounts", diff --git a/scripts/index.ts b/scripts/index.ts index f4279c1e..91a64e85 100644 --- a/scripts/index.ts +++ b/scripts/index.ts @@ -22,6 +22,10 @@ import { setValidKeysetCommand, waitForSyncCommand, transferL3ChainOwnershipCommand, + sendL2DelayedCommand, + sendL2ToHotShotCommand, + updateConfigValueCommand, + setIsBatchPosterCommand, } from "./ethcommands"; async function main() { @@ -38,10 +42,12 @@ async function main() { .options(stressOptions) .options({ espresso: { boolean: true, decription: 'use Espresso Sequencer for sequencing and DA', default: false }, + l3Espresso: { boolean: true, decription: 'use Espresso Sequencer for sequencing and DA', default: false }, espressoUrl: { string: true, description: 'Espresso Sequencer url', default: 'http://espresso-dev-node:41000' }, lightClientAddress: { string: true, description: 'address of the light client contract', default: ''}, enableCaffNode: {boolean: true, description: 'enable caff node', default: false}, simpleWithValidator: {boolean: true, description: 'start a simple node that validates', default: false}, + mockSequencer: {boolean: true, description: 'start a mock sequencer', default: false}, }) .command(bridgeFundsCommand) .command(bridgeToL3Command) @@ -50,10 +56,14 @@ async function main() { .command(transferERC20Command) .command(sendL1Command) .command(sendL2Command) + .command(sendL2DelayedCommand) + .command(sendL2ToHotShotCommand) .command(sendL3Command) .command(sendRPCCommand) .command(setValidKeysetCommand) .command(transferL3ChainOwnershipCommand) + .command(updateConfigValueCommand) + .command(setIsBatchPosterCommand) .command(writeConfigCommand) .command(writeGethGenesisCommand) .command(writeL2ChainConfigCommand) diff --git a/scripts/package.json b/scripts/package.json index d1de3706..2bf2b4b0 100644 --- a/scripts/package.json +++ b/scripts/package.json @@ -13,6 +13,7 @@ "@types/yargs": "^17.0.10", "ethers": "^5.6.1", "path": "^0.12.7", + "rlp": "^3.0.0", "typescript": "^4.6.2", "yargs": "^17.4.0" }, diff --git a/scripts/yarn.lock b/scripts/yarn.lock index 26f86656..2bb8b34f 100644 --- a/scripts/yarn.lock +++ b/scripts/yarn.lock @@ -2400,6 +2400,11 @@ rlp@^2.2.4: dependencies: bn.js "^5.2.0" +rlp@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/rlp/-/rlp-3.0.0.tgz#5a60725ca4314a3a165feecca1836e4f2c1e2343" + integrity sha512-PD6U2PGk6Vq2spfgiWZdomLvRGDreBLxi5jv5M8EpRo3pU6VEm31KO+HFxE18Q3vgqfDrQ9pZA3FP95rkijNKw== + safe-array-concat@^1.0.1: version "1.1.0" resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.0.tgz#8d0cae9cb806d6d1c06e08ab13d847293ebe0692" diff --git a/smoke-test-altlayer.bash b/smoke-test-altlayer.bash index 2652cfb4..e9d9ed1c 100755 --- a/smoke-test-altlayer.bash +++ b/smoke-test-altlayer.bash @@ -1,9 +1,10 @@ #!/usr/bin/env bash set -euo pipefail +source ./regression-tests/common.bash # Run altlayer config with batch poster, sequencer, full node and validator -./test-node.bash --init-force --validate --batchposters 1 --latest-espresso-image --detach +./test-node.bash --init-force --validate --batchposters 1 $(get_espresso_image_flag) --detach docker compose up -d full-node --detach # Sending L2 transaction through the full-node's api diff --git a/smoke-test-caff-node.bash b/smoke-test-caff-node.bash index aac54216..5a32a01a 100755 --- a/smoke-test-caff-node.bash +++ b/smoke-test-caff-node.bash @@ -17,7 +17,9 @@ user=user_l2user funnel=funnel caff_url="ws://caff-node:8548" -./test-node.bash --espresso --latest-espresso-image --validate --tokenbridge --init-force --detach --caff-node +source ./regression-tests/common.bash + +./test-node.bash --espresso $(get_espresso_image_flag) --validate --tokenbridge --init-force --detach --caff-node # Start the caff node docker compose up -d caff-node --wait --detach diff --git a/smoke-test-caldera.bash b/smoke-test-caldera.bash index 04f4c1db..4066ea11 100755 --- a/smoke-test-caldera.bash +++ b/smoke-test-caldera.bash @@ -13,9 +13,10 @@ listen_to_sequencer_feed() { done < <(wscat -c ws://127.0.0.1:9652) } +source ./regression-tests/common.bash # Run caldera with batch poster, sequencer, full node, validator and an anytrust chain which runs the dasserver -./test-node.bash --init-force --validate --batchposters 1 --latest-espresso-image --detach --l2-anytrust +./test-node.bash --init-force --validate --batchposters 1 $(get_espresso_image_flag) --detach --l2-anytrust docker compose up -d full-node --detach # Sending L2 transaction through the full-node's api diff --git a/smoke-test-l3.bash b/smoke-test-l3.bash index b992cc7c..6a5eb64e 100755 --- a/smoke-test-l3.bash +++ b/smoke-test-l3.bash @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -euo pipefail -./test-node.bash --init-force --espresso --latest-espresso-image --l3node --l3-token-bridge --l3-fee-token --detach +source ./regression-tests/common.bash + +./test-node.bash --init-force --espresso $(get_espresso_image_flag) --l3node --l3-token-bridge --l3-fee-token --detach echo "Sending L3 transaction" user=user_l3 diff --git a/smoke-test.bash b/smoke-test.bash index 89b36720..2e20db60 100755 --- a/smoke-test.bash +++ b/smoke-test.bash @@ -1,7 +1,9 @@ #!/usr/bin/env bash set -euo pipefail -./test-node.bash --espresso --latest-espresso-image --validate --tokenbridge --init-force --detach +source ./regression-tests/common.bash + +./test-node.bash --espresso $(get_espresso_image_flag) --validate --tokenbridge --init-force --detach # Sending L2 transaction ./test-node.bash script send-l2 --ethamount 100 --to user_l2user --wait diff --git a/test-node.bash b/test-node.bash index 7af8dcb0..e1d611dc 100755 --- a/test-node.bash +++ b/test-node.bash @@ -11,6 +11,7 @@ DEFAULT_NITRO_CONTRACTS_VERSION="99c07a7db2fcce75b751c5a2bd4936e898cda065" DEFAULT_TOKEN_BRIDGE_VERSION="v1.2.2" ESPRESSO_VERSION=ghcr.io/espressosystems/nitro-espresso-integration/nitro-node-dev:celestia-integration +: ${ESPRESSO_VERSION:=$ESPRESSO_DEFAULT_VERSION} # Set default versions if not overriden by provided env vars : ${NITRO_CONTRACTS_REPO:=$DEFAULT_NITRO_CONTRACTS_REPO} @@ -52,7 +53,9 @@ lightClientAddr=0xb7fc0e52ec06f125f3afeba199248c79f71c2e3a lightClientAddrForL3=0x5e36aa9caaf5f708fca5c04d2d4c776a62b2b258 enableCaffNode=false espresso=false +espresso_mock_sequencer=false l2_espresso=false +l3_espresso=false latest_espresso_image=false l3_custom_fee_token=false l3_token_bridge=false @@ -126,6 +129,10 @@ while [[ $# -gt 0 ]]; do l2_espresso=true shift ;; + --mock-sequencer) + espresso_mock_sequencer=true + shift + ;; --caff-node) enableCaffNode=true shift @@ -322,7 +329,9 @@ done if $espresso; then NITRO_CONTRACTS_REPO=https://github.com/EspressoSystems/nitro-contracts.git - NITRO_CONTRACTS_BRANCH=v2.1.3-celestia-1b04973 + DEFAULT_ESPRESSO_NITRO_CONTRACTS_BRANCH=develop + : ${ESPRESSO_NITRO_CONTRACTS_BRANCH:=$DEFAULT_ESPRESSO_NITRO_CONTRACTS_BRANCH} + NITRO_CONTRACTS_BRANCH=$ESPRESSO_NITRO_CONTRACTS_BRANCH export NITRO_CONTRACTS_REPO export NITRO_CONTRACTS_BRANCH echo "Running espresso mode" @@ -346,6 +355,9 @@ fi if [ $redundantsequencers -gt 2 ]; then NODES="$NODES sequencer_d" fi +if $espresso_mock_sequencer; then + NODES="$NODES mock-sequencer" +fi if [ $batchposters -gt 0 ] && ! $simple; then NODES="$NODES poster" @@ -383,10 +395,15 @@ if $espresso; then # If we run the `l3node` with enabling espresso mode, then the # l2 node will run without `espresso` mode. l2_espresso=false + l3_espresso=true fi NODES="$NODES espresso-dev-node" fi +if $enableCaffNode; then + NODES="$NODES caff-node" +fi + if $dev_nitro && $build_dev_nitro; then echo == Building Nitro if ! [ -n "${NITRO_SRC+set}" ]; then @@ -422,11 +439,13 @@ if $dev_nitro; then docker tag nitro-node-dev:latest nitro-node-dev-testnode else if $latest_espresso_image; then - docker pull $ESPRESSO_VERSION + echo "Using Espresso image: $ESPRESSO_VERSION" + docker pull $ESPRESSO_VERSION --platform linux/amd64 docker tag $ESPRESSO_VERSION nitro-node-dev-testnode - else - docker pull $NITRO_NODE_VERSION - docker tag $NITRO_NODE_VERSION nitro-node-dev-testnode + else + echo "Using Nitro image: $NITRO_NODE_VERSION" + docker pull $NITRO_NODE_VERSION + docker tag $NITRO_NODE_VERSION nitro-node-dev-testnode fi fi @@ -525,10 +544,11 @@ if $force_init; then if $l2anytrust; then echo "== Writing l2 chain config (anytrust enabled)" - docker compose run scripts --l2owner $l2ownerAddress write-l2-chain-config --anytrust --espresso $l2_espresso + docker compose run scripts --l2owner $l2ownerAddress write-l2-chain-config --anytrust --espresso $l2_espresso --mockSequencer $espresso_mock_sequencer else echo == Writing l2 chain config - docker compose run scripts --l2owner $l2ownerAddress write-l2-chain-config --espresso $l2_espresso + echo "espresso: $l2_espresso, mockSequencer: $espresso_mock_sequencer" + docker compose run scripts --l2owner $l2ownerAddress write-l2-chain-config --espresso $l2_espresso --mockSequencer $espresso_mock_sequencer fi sequenceraddress=`docker compose run scripts print-address --account sequencer | tail -n 1 | tr -d '\r\n'` @@ -579,10 +599,10 @@ if $force_init; then else echo == Writing configs - docker compose run scripts write-config $anytrustNodeConfigLine --espresso $l2_espresso --lightClientAddress $lightClientAddr + docker compose run scripts write-config $anytrustNodeConfigLine --espresso $l2_espresso --l3Espresso $l3_espresso --lightClientAddress $lightClientAddr --mockSequencer $espresso_mock_sequencer if $enableCaffNode; then echo == Writing configs for finality node - docker compose run scripts write-config $anytrustNodeConfigLine --espresso $l2_espresso --enableCaffNode --lightClientAddress $lightClientAddr + docker compose run scripts write-config $anytrustNodeConfigLine --espresso $l2_espresso --l3Espresso $l3_espresso --enableCaffNode --validate $validate --lightClientAddress $lightClientAddr --mockSequencer $espresso_mock_sequencer fi echo == Initializing redis docker compose up --wait redis @@ -632,7 +652,7 @@ if $force_init; then echo == Writing l3 chain config l3owneraddress=`docker compose run scripts print-address --account l3owner | tail -n 1 | tr -d '\r\n'` echo l3owneraddress $l3owneraddress - docker compose run scripts --l2owner $l3owneraddress write-l3-chain-config --espresso $espresso + docker compose run scripts --l2owner $l3owneraddress write-l3-chain-config --espresso $l3_espresso EXTRA_L3_DEPLOY_FLAG="" if $l3_custom_fee_token; then diff --git a/tokenbridge/Dockerfile b/tokenbridge/Dockerfile index 8f3ae386..2d208a7e 100644 --- a/tokenbridge/Dockerfile +++ b/tokenbridge/Dockerfile @@ -1,4 +1,4 @@ -FROM node:18-bullseye-slim +FROM node:20-bookworm-slim ARG TOKEN_BRIDGE_BRANCH=main RUN apt-get update && \ apt-get install -y git docker.io python3 build-essential