forked from OffchainLabs/nitro-testnode
-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathmigration-test.bash
More file actions
executable file
·279 lines (217 loc) · 14.1 KB
/
migration-test.bash
File metadata and controls
executable file
·279 lines (217 loc) · 14.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
#!/usr/bin/env bash
# This is a utility function for creating assertions at the end of this test.
fail(){
echo "$*" 1>&2; exit 1;
}
set -euo pipefail
set -a # automatically export all variables
set -x # print each command before executing it, for debugging
function forge3 {
forge $@ --root espresso-migration-3.1.0
}
# Find directory of this script, the project, and the orbit-actions submodule
TEST_DIR="$(dirname $(readlink -f $0))"
TEST_SCRIPT_DIR="v2.1.3-migration"
TESTNODE_LOG_FILE=$(mktemp -t nitro-test-node-logs-XXXXXXXX)
ESPRESSO_DEVNODE_LOG_FILE=$(mktemp -t espresso-dev-node-logs-XXXXXXXX)
TESTNODE_DIR="$(dirname "$TEST_DIR")"
ORBIT_ACTIONS_DIR="$TESTNODE_DIR/orbit-actions"
ORBIT_MIGRATION_ACTION_DIR="contracts/parent-chain/espresso-migration/"
ENV_FILE="$TEST_DIR/.env"
# Hide docker compose warnings about orphaned containers.
export COMPOSE_IGNORE_ORPHANS=true
v3=false
forge=forge
ESPRESSO_NITRO_CONTRACTS_BRANCH=v2.1.3-8e58a9a
# This commit matches v2.1.0 release of nitro-contracts, with additional support to set arb owner through upgrade executor
NITRO_CONTRACTS_BRANCH="99c07a7db2fcce75b751c5a2bd4936e898cda065"
BROADCAST_DIR="broadcast"
PROXY_ADMIN_ADDRESS="0x2A1f38c9097e7883570e0b02BFBE6869Cc25d8a3"
if [[ ${VERSION:-2} == "3" ]]; then
info "Using v3 migration scripts"
v3=true
TEST_SCRIPT_DIR="v3.1.0-migration"
ESPRESSO_NITRO_CONTRACTS_BRANCH=develop
ORBIT_MIGRATION_ACTION_DIR="espresso-migration-3.1.0/"
BROADCAST_DIR="espresso-migration-3.1.0/broadcast"
NITRO_CONTRACTS_BRANCH="v3.1.0"
PROXY_ADMIN_ADDRESS="0x275FC51309e5928Cb085b463ADEF5cbD45c76b62"
forge="forge3"
else
info "Using v2 migration scripts"
fi
info Ensuring docker compose project is stopped
run docker compose down -v --remove-orphans
# Change to orbit actions directory, update the submodule, and install any dependencies for the purposes of the test.
cd "$ORBIT_ACTIONS_DIR"
info "Ensuring submodules are checked out"
run git submodule update --init --recursive
info "Ensuring nodejs dependencies are installed"
run yarn
info "Ensuring we can compile the migration smart contracts"
run $forge build
# Change to the top level directory for the purposes of the test.
cd "$TESTNODE_DIR"
# NOTE: the test-node.bash script (or potentially docker compose) does not work
# well with the `fmt` utility function and hangs at the end. I don't know why.
# Furthermore the long warning lines don't work with the `fmt` function but I
# can't work out a way to be able to filter the lines (e. g. grep -v WARN) and
# still have the output show up.
info Deploying a vanilla Nitro stack locally, to be migrated to Espresso later.
emph ./test-node.bash --simple --init-force --tokenbridge --detach
if [ "$DEBUG" = "true" ]; then
./test-node.bash --simple --init-force --tokenbridge --detach
else
info "This command starts up an entire Nitro stack. It takes a long time."
info "Run \`tail -f $TESTNODE_LOG_FILE\` to see logs, if necessary."
echo
./test-node.bash --simple --init-force --tokenbridge --detach > "$TESTNODE_LOG_FILE" 2>&1
fi
# Start espresso sequencer node for the purposes of the test e.g. not needed for the real migration.
docker compose up espresso-dev-node --detach
info "Waiting for espresso dev node to start"
sleep 200
info "Load environment variables in $ENV_FILE"
# A similar env file should be supplied for whatever
. "$TEST_DIR/.env"
# Overwrite the ROLLUP_ADDRESS for this test, it might not be the same as the one in the .env file
#* Essential migration sub step * This address (the rollup proxy address) is likely a known address to operators.
ROLLUP_ADDRESS=$(docker compose run --entrypoint cat scripts /config/deployed_chain_info.json | jq -r '.[0].rollup.rollup' | tail -n 1 | tr -d '\r\n')
# A convoluted way to get the address of the child chain upgrade executor, maybe there's a better way?
# These steps below are just for the purposes of the test. In a real deployment operators will likely already know their child-chain's upgrade executor address, and it should be included in a .env file for the migration run.
INBOX_ADDRESS=$(get-addr /config/deployed_chain_info.json '.[0].rollup.inbox')
declare -p INBOX_ADDRESS
PARENT_CHAIN_UPGRADE_EXECUTOR=$(get-addr /config/deployed_chain_info.json '.[0].rollup["upgrade-executor"]')
declare -p PARENT_CHAIN_UPGRADE_EXECUTOR
L1_TOKEN_BRIDGE_CREATOR_ADDRESS=$(get-addr /tokenbridge-data/network.json '.l1TokenBridgeCreator')
declare -p L1_TOKEN_BRIDGE_CREATOR_ADDRESS
CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS=$(cast call $L1_TOKEN_BRIDGE_CREATOR_ADDRESS 'inboxToL2Deployment(address)(address,address,address,address,address,address,address,address,address)' $INBOX_ADDRESS | tail -n 2 | head -n 1 | tr -d '\r\n')
# Export l2 owner private key and address
# These commands are exclusive to the test.
# * Essential migration sub step * These addresses are likely known addresses to operators in the event of a real migration
PRIVATE_KEY="$(docker compose run scripts print-private-key --account l2owner | tail -n 1 | tr -d '\r\n')"
OWNER_ADDRESS="$(docker compose run scripts print-address --account l2owner | tail -n 1 | tr -d '\r\n')"
info "Disabling validator whitelist"
run cast send $PARENT_CHAIN_UPGRADE_EXECUTOR "executeCall(address,bytes)" $ROLLUP_ADDRESS "$(cast calldata 'setValidatorWhitelistDisabled(bool)' true)" --rpc-url $PARENT_CHAIN_RPC_URL --private-key $PRIVATE_KEY
cd $ORBIT_ACTIONS_DIR
info "Deploying mock espresso TEE verifier"
run $forge script --chain $PARENT_CHAIN_CHAIN_ID ../espresso-tests/$TEST_SCRIPT_DIR/DeployMockVerifier.s.sol:DeployMockVerifier --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv
ESPRESSO_TEE_VERIFIER_ADDRESS=$(cat $BROADCAST_DIR/DeployMockVerifier.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum)
declare -p ESPRESSO_TEE_VERIFIER_ADDRESS
# Echo for debug
echo "Deploying and initializing Espresso SequencerInbox"
# ** Essential migration step ** Forge script to deploy the new SequencerInbox. We do this to later point the rollups challenge manager to the espresso integrated OSP.
run $forge script --chain $PARENT_CHAIN_CHAIN_ID ../espresso-tests/$TEST_SCRIPT_DIR/DeployAndInitEspressoSequencerInboxForTest.s.sol:DeployAndInitEspressoSequencerInbox --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv --skip-simulation --private-key $PRIVATE_KEY
# Extract new_osp_entry address from run-latest.json
# * Essential migration sub step * These addresses are likely known addresses to operators in the event of a real migration after they have deployed the new OSP contracts, however, if operators create a script for the migration, this command is useful.
NEW_SEQUENCER_INBOX_IMPL_ADDRESS=$(cat $BROADCAST_DIR/DeployAndInitEspressoSequencerInboxForTest.s.sol/1337/run-latest.json | jq -r '.receipts[0].contractAddress'| cast to-checksum)
declare -p NEW_SEQUENCER_INBOX_IMPL_ADDRESS
# Echo for debugging.
echo "Deployed new SequencerInbox at $NEW_SEQUENCER_INBOX_IMPL_ADDRESS"
# Echo for debug
echo "Deploying Espresso SequencerInbox migration action"
# ** Essential migration step ** Forge script to deploy Espresso OSP migration action
run $forge script --chain $PARENT_CHAIN_CHAIN_ID $ORBIT_MIGRATION_ACTION_DIR/DeployEspressoSequencerMigrationAction.s.sol:DeployEspressoSequencerMigrationAction --rpc-url $PARENT_CHAIN_RPC_URL --broadcast -vvvv
# Capture new OSP address
# * Essential migration sub step ** Essential migration sub step * operators will be able to manually determine this address while running the upgrade, but this can be useful if they wish to make a script.
SEQUENCER_MIGRATION_ACTION=$(cat $BROADCAST_DIR/DeployEspressoSequencerMigrationAction.s.sol/1337/run-latest.json | jq -r '.transactions[0].contractAddress' | cast to-checksum)
declare -p SEQUENCER_MIGRATION_ACTION
echo "Deployed new EspressoSequencerMigrationAction at $SEQUENCER_MIGRATION_ACTION"
echo "Deploying ArbOS Upgrade action"
# Forge script to deploy the Espresso ArbOS upgrade action.
# ** Essential migration step ** the ArbOS upgrade signifies that the chain is now espresso compatible.
forge script --chain $CHILD_CHAIN_CHAIN_NAME contracts/child-chain/espresso-migration/DeployArbOSUpgradeAction.s.sol:DeployArbOSUpgradeAction --rpc-url $CHILD_CHAIN_RPC_URL --broadcast -vvvv
# Get the address of the newly deployed upgrade action.
ARBOS_UPGRADE_ACTION=$(cat broadcast/DeployArbOSUpgradeAction.s.sol/412346/run-latest.json | jq -r '.transactions[0].contractAddress')
# Echo information for debugging.
echo "Deployed ArbOSUpgradeAction at $ARBOS_UPGRADE_ACTION"
# Change directories to start nitro node in new docker container with espresso image
cd $TESTNODE_DIR
docker stop nitro-testnode-sequencer-1
docker wait nitro-testnode-sequencer-1
# Start nitro node in new docker container with espresso image
./espresso-tests/create-espresso-integrated-nitro-node.bash
# Use cast to call the upgradeExecutor and execute the L1 upgrade actions.This will point the challenge manager at the new OSP entry, as well as update the wasmModuleRoot for the rollup. ** Essential migration step **
cast send $PARENT_CHAIN_UPGRADE_EXECUTOR "execute(address, bytes)" $SEQUENCER_MIGRATION_ACTION $(cast calldata "perform()") --rpc-url $PARENT_CHAIN_RPC_URL --private-key $PRIVATE_KEY
echo "Executed SequencerMigrationAction via UpgradeExecutor"
function get_latest_confirmed_v2() {
result=$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(uint256)')
echo $result
}
function get_latest_confirmed_v3() {
result=$(cast call --rpc-url $PARENT_CHAIN_RPC_URL $ROLLUP_ADDRESS 'latestConfirmed()(bytes32)')
echo $result
}
function get_latest_confirmed() {
if $v3; then
get_latest_confirmed_v3
else
get_latest_confirmed_v2
fi
}
# Get the number of confirmed nodes before the upgrade to ensure the staker is still working.
NUM_CONFIRMED_NODES_BEFORE_UPGRADE=$(get_latest_confirmed)
info "Before upgrade: $NUM_CONFIRMED_NODES_BEFORE_UPGRADE"
# Wait for CHILD_CHAIN_RPC_URL to be available
# * Essential migration sub step * This is technically essential to the migration, but doesn't usually take long and shouldn't need to be programatically determined during a live migration.
while ! curl -s $CHILD_CHAIN_RPC_URL > /dev/null; do
echo "Waiting for $CHILD_CHAIN_RPC_URL to be available..."
sleep 5
done
# Echo for debugging
echo "Adding child chain upgrade executor as an L2 chain owner"
# This step is done for the purposes of the test, as there should already be an upgrade executor on the child chain that is a chain owner
cast send 0x0000000000000000000000000000000000000070 'addChainOwner(address)' $CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS --rpc-url $CHILD_CHAIN_RPC_URL --private-key $PRIVATE_KEY
cd $ORBIT_ACTIONS_DIR
# Grab the pre-upgrade ArbOS version for testing.
ARBOS_VERSION_BEFORE_UPGRADE=$(cast call "0x0000000000000000000000000000000000000064" "arbOSVersion()(uint64)" --rpc-url $CHILD_CHAIN_RPC_URL)
# Use the Upgrde executor on the child chain to execute the ArbOS upgrade to signify that the node is now operating in espresso mode. This is essential for the migration.
# ** Essential migration step ** This step can technically be done before all of the others as it is just scheduling the ArbOS upgrade. The unix timestamp at which the upgrade occurrs can be determined by operators, but for the purposes of the test we use 0 to upgrade immediately.
cast send $CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS "execute(address, bytes)" $ARBOS_UPGRADE_ACTION $(cast calldata "perform()") --rpc-url $CHILD_CHAIN_RPC_URL --private-key $PRIVATE_KEY
cd $TEST_DIR
# write tee verifier address into chain config
jq -r '.arbitrum.EspressoTEEVerifierAddress |= $ESPRESSO_TEE_VERIFIER_ADDRESS' test-chain-config.json > sent-chain-config.json --arg ESPRESSO_TEE_VERIFIER_ADDRESS $ESPRESSO_TEE_VERIFIER_ADDRESS
CHAIN_CONFIG=$(cat sent-chain-config.json)
cast send $CHILD_CHAIN_UPGRADE_EXECUTOR_ADDRESS $(cast calldata "executeCall(address, bytes)" "0x0000000000000000000000000000000000000070" $(cast calldata "setChainConfig(string)" "$CHAIN_CONFIG")) --rpc-url $CHILD_CHAIN_RPC_URL --private-key $PRIVATE_KEY
# Set the chain config
# Check the upgrade happened
# Grab the post upgrade ArbOS version.
ARBOS_VERSION_AFTER_UPGRADE=$(cast call "0x0000000000000000000000000000000000000064" "arbOSVersion()(uint64)" --rpc-url $CHILD_CHAIN_RPC_URL)
# Wait to observe the ArbOS version update. (potentially add a timeout or max retry number before failing)
while [ $ARBOS_VERSION_BEFORE_UPGRADE == $ARBOS_VERSION_AFTER_UPGRADE ]
do
sleep 5
ARBOS_VERSION_AFTER_UPGRADE=$(cast call "0x0000000000000000000000000000000000000064" "arbOSVersion()(uint64)" --rpc-url $CHILD_CHAIN_RPC_URL)
done
# We are upgrading the ArbOS version to 35 so the expect the return value to be 55 + 35 = 90
if [ $ARBOS_VERSION_AFTER_UPGRADE != "90" ]; then
fail "ArbOS version not updated: Expected 90, Actual $ARBOS_VERSION_AFTER_UPGRADE"
fi
# Check for balance before transfer.
# The following sequence is to check that transactions are still successfully being sequenced on the L2
ORIGINAL_OWNER_BALANCE=$(cast balance $OWNER_ADDRESS -e --rpc-url $CHILD_CHAIN_RPC_URL)
# Send 1 eth as the owner
RECIPIENT_ADDRESS=0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
BALANCE_ORIG=$(cast balance $RECIPIENT_ADDRESS -e --rpc-url $CHILD_CHAIN_RPC_URL)
cast send $RECIPIENT_ADDRESS --value 1ether --rpc-url $CHILD_CHAIN_RPC_URL --private-key $PRIVATE_KEY
# Get the new balance after the transfer.
BALANCE_NEW=$(cast balance $RECIPIENT_ADDRESS -e --rpc-url $CHILD_CHAIN_RPC_URL)
# Assertion that balance should have changed.
if [ $BALANCE_NEW == $BALANCE_ORIG ]; then
fail "Balance of $RECIPIENT_ADDRESS should have changed but remained: $BALANCE_ORIG"
fi
# Echo successful balance update
echo "Balance of $RECIPIENT_ADDRESS changed from $BALANCE_ORIG to $BALANCE_NEW"
info Check that the staker is making progress after the upgrade
echo
START=$SECONDS
echo "Waiting for confirmed nodes."
while [ "$NUM_CONFIRMED_NODES_BEFORE_UPGRADE" == "$(get_latest_confirmed)" ]; do
sleep 5
echo "Waited $(( SECONDS - START )) seconds for confirmed nodes. $NUM_CONFIRMED_NODES_BEFORE_UPGRADE"
done
# Echo to confirm that stakers are behaving normally.
echo "Confirmed nodes have progressed"
# Echo to signal that test has been successful
echo "Migration successfully completed!"
docker compose down