From 3be1bd2904f42b0efbb7b802f9eee41141c1af61 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 25 Aug 2025 12:25:48 +0000 Subject: [PATCH 01/10] Test batcher restart in enclave --- espresso/devnet-tests/batcher_restart_test.go | 13 +++++++++++++ espresso/devnet-tests/devnet_tools.go | 10 ++++++++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/espresso/devnet-tests/batcher_restart_test.go b/espresso/devnet-tests/batcher_restart_test.go index 2088f317e880a..729d129752c1e 100644 --- a/espresso/devnet-tests/batcher_restart_test.go +++ b/espresso/devnet-tests/batcher_restart_test.go @@ -6,9 +6,20 @@ import ( "github.com/ethereum/go-ethereum" "github.com/stretchr/testify/require" + + env "github.com/ethereum-optimism/optimism/espresso/environment" ) func TestBatcherRestart(t *testing.T) { + testRestart(t, false) +} + +func TestEnclaveRestart(t *testing.T) { + env.RunOnlyWithEnclave(t) + testRestart(t, true) +} + +func testRestart(t *testing.T, tee bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -18,6 +29,8 @@ func TestBatcherRestart(t *testing.T) { require.NoError(t, d.Down()) }() + d.tee = tee + // Send a transaction just to check that everything has started up ok. require.NoError(t, d.RunSimpleL2Burn()) diff --git a/espresso/devnet-tests/devnet_tools.go b/espresso/devnet-tests/devnet_tools.go index c83aab3c9775f..32f7a4c9df858 100644 --- a/espresso/devnet-tests/devnet_tools.go +++ b/espresso/devnet-tests/devnet_tools.go @@ -34,6 +34,7 @@ import ( type Devnet struct { ctx context.Context + tee bool secrets secrets.Secrets outageTime time.Duration successTime time.Duration @@ -106,10 +107,15 @@ func (d *Devnet) Up() (err error) { // up any existing state. return fmt.Errorf("devnet is already running, this should be a clean state; please shut it down first") } - + var profile string + if d.tee { + profile = "tee" + } else { + profile = "default" + } cmd := exec.CommandContext( d.ctx, - "docker", "compose", "up", "-d", + "docker", "compose", "--profile", profile, "up", "-d", ) cmd.Env = append( cmd.Env, From 61766cc57aa12b4a3496517bb093375a959d888d Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Mon, 29 Sep 2025 17:57:12 +0200 Subject: [PATCH 02/10] Support differing container names in profiles --- espresso/devnet-tests/batcher_restart_test.go | 4 +- espresso/devnet-tests/devnet_tools.go | 141 +++- espresso/devnet-tests/devnet_tools.go.orig | 764 ++++++++++++++++++ espresso/devnet-tests/key_rotation_test.go | 4 +- 4 files changed, 887 insertions(+), 26 deletions(-) create mode 100644 espresso/devnet-tests/devnet_tools.go.orig diff --git a/espresso/devnet-tests/batcher_restart_test.go b/espresso/devnet-tests/batcher_restart_test.go index 729d129752c1e..6193c8935e4bf 100644 --- a/espresso/devnet-tests/batcher_restart_test.go +++ b/espresso/devnet-tests/batcher_restart_test.go @@ -35,7 +35,7 @@ func testRestart(t *testing.T, tee bool) { require.NoError(t, d.RunSimpleL2Burn()) // Shut down the batcher and have another transaction submitted while it is down. - require.NoError(t, d.ServiceDown("op-batcher")) + require.NoError(t, d.ServiceDown(ServiceBatcher)) d.SleepOutageDuration() receipt, err := d.SubmitSimpleL2Burn() @@ -48,7 +48,7 @@ func testRestart(t *testing.T, tee bool) { // Bring the batcher back up and check that it processes the transaction which was submitted // while it was down. - require.NoError(t, d.ServiceUp("op-batcher")) + require.NoError(t, d.ServiceUp(ServiceBatcher)) require.NoError(t, d.VerifySimpleL2Burn(receipt)) // Submit another transaction at the end just to check that things stay working. diff --git a/espresso/devnet-tests/devnet_tools.go b/espresso/devnet-tests/devnet_tools.go index 32f7a4c9df858..016a36288c519 100644 --- a/espresso/devnet-tests/devnet_tools.go +++ b/espresso/devnet-tests/devnet_tools.go @@ -32,6 +32,82 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/config/secrets" ) +type Service uint64 + +const ( + ServiceBatcher Service = iota + ServiceCaffNode + ServiceCaffNodeGeth + ServiceChallenger + ServiceDevNode + ServiceHTTPProxy + ServiceL1Beacon + ServiceL1DataInit + ServiceL1Genesis + ServiceL1Geth + ServiceL1Validator + ServiceL2Genesis + ServiceL2Rollup + ServiceL2Seq + ServiceL2SeqGeth + ServiceL2Verif + ServiceL2VerifGeth + ServiceProposer +) + +const ( + ProfileDefault = "default" + ProfileTee = "tee" +) + +type Profile struct { + BatcherService string + ProposerService string +} + +// modifyDefaultProfile creates a new profile based on the default profile with specified modifications +func modifyDefaultProfile(modifications map[Service]string) map[Service]string { + result := make(map[Service]string) + // Copy all services from default profile + for service, container := range defaultProfile { + result[service] = container + } + // Apply modifications + for service, container := range modifications { + result[service] = container + } + return result +} + +var defaultProfile = map[Service]string{ + ServiceBatcher: "op-batcher", + ServiceCaffNode: "caff-node", + ServiceCaffNodeGeth: "op-geth-caff-node", + ServiceChallenger: "op-challenger", + ServiceDevNode: "espresso-dev-node", + ServiceHTTPProxy: "http-proxy", + ServiceL1Beacon: "l1-beacon", + ServiceL1DataInit: "l1-data-init", + ServiceL1Genesis: "l1-genesis", + ServiceL1Geth: "l1-geth", + ServiceL1Validator: "l1-validator", + ServiceL2Genesis: "l2-genesis", + ServiceL2Rollup: "l2-rollup", + ServiceL2Seq: "op-node-sequencer", + ServiceL2SeqGeth: "op-geth-sequencer", + ServiceL2Verif: "op-node-verifier", + ServiceL2VerifGeth: "op-geth-verifier", + ServiceProposer: "op-proposer", +} + +var Profiles = map[string]map[Service]string{ + ProfileDefault: defaultProfile, + ProfileTee: modifyDefaultProfile(map[Service]string{ + ServiceBatcher: "op-batcher-tee", + ServiceProposer: "op-proposer-tee", + }), +} + type Devnet struct { ctx context.Context tee bool @@ -98,6 +174,27 @@ func (d *Devnet) isRunning() bool { return len(out) > 0 } +// getProfile returns the current profile name based on devnet configuration +func (d *Devnet) getProfile() string { + if d.tee { + return ProfileTee + } + return ProfileDefault +} + +// getServiceName returns the container name for a given service in the current profile +func (d *Devnet) getServiceName(service Service) string { + profile := d.getProfile() + if container, ok := Profiles[profile][service]; ok { + return container + } + // Fall back to default profile if service not found + if container, ok := Profiles[ProfileDefault][service]; ok { + return container + } + return "" +} + func (d *Devnet) Up() (err error) { if d.isRunning() { if err := d.Down(); err != nil { @@ -107,12 +204,9 @@ func (d *Devnet) Up() (err error) { // up any existing state. return fmt.Errorf("devnet is already running, this should be a clean state; please shut it down first") } - var profile string - if d.tee { - profile = "tee" - } else { - profile = "default" - } + + profile := d.getProfile() + cmd := exec.CommandContext( d.ctx, "docker", "compose", "--profile", profile, "up", "-d", @@ -150,7 +244,7 @@ func (d *Devnet) Up() (err error) { // Stream logs to stdout while the test runs. This goroutine will automatically exit when // the context is cancelled. go func() { - cmd = exec.CommandContext(d.ctx, "docker", "compose", "logs", "-f") + cmd = exec.CommandContext(d.ctx, "docker", "compose", "--profile", profile, "logs", "-f") cmd.Stdout = os.Stdout // We don't care about the error return of this command, since it's always going to be // killed by the context cancellation. @@ -159,24 +253,24 @@ func (d *Devnet) Up() (err error) { } // Open RPC clients for the different nodes. - d.L2Seq, err = d.serviceClient("op-geth-sequencer", 8546) + d.L2Seq, err = d.serviceClient(d.getServiceName(ServiceL2SeqGeth), 8546) if err != nil { return err } - d.L2SeqRollup, err = d.rollupClient("op-node-sequencer", 9545) + d.L2SeqRollup, err = d.rollupClient(d.getServiceName(ServiceL2Seq), 9545) if err != nil { return err } - d.L2Verif, err = d.serviceClient("op-geth-verifier", 8546) + d.L2Verif, err = d.serviceClient(d.getServiceName(ServiceL2VerifGeth), 8546) if err != nil { return err } - d.L2VerifRollup, err = d.rollupClient("op-node-verifier", 9546) + d.L2VerifRollup, err = d.rollupClient(d.getServiceName(ServiceL2Verif), 9546) if err != nil { return err } - d.L1, err = d.serviceClient("l1-geth", 8545) + d.L1, err = d.serviceClient(d.getServiceName(ServiceL1Geth), 8545) if err != nil { return err } @@ -184,25 +278,27 @@ func (d *Devnet) Up() (err error) { return nil } -func (d *Devnet) ServiceUp(service string) error { - log.Info("bringing up service", "service", service) +func (d *Devnet) ServiceUp(service Service) error { + serviceName := d.getServiceName(service) + log.Info("bringing up service", "service", serviceName) cmd := exec.CommandContext( d.ctx, - "docker", "compose", "up", "-d", service, + "docker", "compose", "--profile", d.getProfile(), "up", "-d", serviceName, ) return cmd.Run() } -func (d *Devnet) ServiceDown(service string) error { - log.Info("shutting down service", "service", service) +func (d *Devnet) ServiceDown(service Service) error { + serviceName := d.getServiceName(service) + log.Info("shutting down service", "service", serviceName) cmd := exec.CommandContext( d.ctx, - "docker", "compose", "down", service, + "docker", "compose", "--profile", d.getProfile(), "down", serviceName, ) return cmd.Run() } -func (d *Devnet) ServiceRestart(service string) error { +func (d *Devnet) ServiceRestart(service Service) error { if err := d.ServiceDown(service); err != nil { return err } @@ -428,7 +524,7 @@ func (d *Devnet) Down() error { // Use timeout flag for faster Docker shutdown cmd := exec.CommandContext( d.ctx, - "docker", "compose", "down", "-v", "--remove-orphans", "--timeout", "10", + "docker", "compose", "--profile", d.getProfile(), "down", "-v", "--remove-orphans", "--timeout", "10", ) return cmd.Run() } @@ -574,7 +670,8 @@ func (d *Devnet) OpChallengerOutput(opts ...string) (string, error) { } func (d *Devnet) opChallengerCmd(opts ...string) *exec.Cmd { - opts = append([]string{"compose", "exec", "op-challenger", "entrypoint.sh", "op-challenger"}, opts...) + serviceName := d.getServiceName(ServiceChallenger) + opts = append([]string{"compose", "--profile", d.getProfile(), "exec", serviceName, "entrypoint.sh", "op-challenger"}, opts...) cmd := exec.CommandContext( d.ctx, "docker", @@ -594,7 +691,7 @@ func (d *Devnet) hostPort(service string, privatePort uint16) (uint16, error) { errBuf := new(bytes.Buffer) cmd := exec.CommandContext( d.ctx, - "docker", "compose", "port", service, fmt.Sprint(privatePort), + "docker", "compose", "--profile", d.getProfile(), "port", service, fmt.Sprint(privatePort), ) cmd.Stdout = buf cmd.Stderr = errBuf diff --git a/espresso/devnet-tests/devnet_tools.go.orig b/espresso/devnet-tests/devnet_tools.go.orig new file mode 100644 index 0000000000000..d52972c602d19 --- /dev/null +++ b/espresso/devnet-tests/devnet_tools.go.orig @@ -0,0 +1,764 @@ +package devnet_tests + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "math/big" + "os" + "os/exec" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" + "github.com/ethereum-optimism/optimism/op-node/rollup" + opclient "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + env "github.com/ethereum-optimism/optimism/espresso/environment" + "github.com/ethereum-optimism/optimism/op-e2e/config/secrets" +) + +type Service uint64 + +const ( + ServiceBatcher Service = iota + ServiceCaffNode + ServiceCaffNodeGeth + ServiceChallenger + ServiceDevNode + ServiceHTTPProxy + ServiceL1Beacon + ServiceL1DataInit + ServiceL1Genesis + ServiceL1Geth + ServiceL1Validator + ServiceL2Genesis + ServiceL2Rollup + ServiceL2Seq + ServiceL2SeqGeth + ServiceL2Verif + ServiceL2VerifGeth + ServiceProposer +) + +const ( + ProfileDefault = "default" + ProfileTee = "tee" +) + +type Profile struct { + BatcherService string + ProposerService string +} + +// modifyDefaultProfile creates a new profile based on the default profile with specified modifications +func modifyDefaultProfile(modifications map[Service]string) map[Service]string { + result := make(map[Service]string) + // Copy all services from default profile + for service, container := range defaultProfile { + result[service] = container + } + // Apply modifications + for service, container := range modifications { + result[service] = container + } + return result +} + +var defaultProfile = map[Service]string{ + ServiceBatcher: "op-batcher", + ServiceCaffNode: "caff-node", + ServiceCaffNodeGeth: "op-geth-caff-node", + ServiceChallenger: "op-challenger", + ServiceDevNode: "espresso-dev-node", + ServiceHTTPProxy: "http-proxy", + ServiceL1Beacon: "l1-beacon", + ServiceL1DataInit: "l1-data-init", + ServiceL1Genesis: "l1-genesis", + ServiceL1Geth: "l1-geth", + ServiceL1Validator: "l1-validator", + ServiceL2Genesis: "l2-genesis", + ServiceL2Rollup: "l2-rollup", + ServiceL2Seq: "op-node-sequencer", + ServiceL2SeqGeth: "op-geth-sequencer", + ServiceL2Verif: "op-node-verifier", + ServiceL2VerifGeth: "op-geth-verifier", + ServiceProposer: "op-proposer", +} + +var Profiles = map[string]map[Service]string{ + ProfileDefault: defaultProfile, + ProfileTee: modifyDefaultProfile(map[Service]string{ + ServiceBatcher: "op-batcher-tee", + ServiceProposer: "op-proposer-tee", + }), +} + +type Devnet struct { + ctx context.Context + tee bool + secrets secrets.Secrets + outageTime time.Duration + successTime time.Duration + L1 *ethclient.Client + L2Seq *ethclient.Client + L2SeqRollup *sources.RollupClient + L2Verif *ethclient.Client + L2VerifRollup *sources.RollupClient +} + +func NewDevnet(ctx context.Context, t *testing.T) *Devnet { + + if testing.Short() { + t.Skip("skipping devnet test in short mode") + } + + d := new(Devnet) + d.ctx = ctx + + mnemonics := *secrets.DefaultMnemonicConfig + mnemonics.Batcher = "m/44'/60'/0'/0/0" + secrets, err := mnemonics.Secrets() + if err != nil { + panic(fmt.Sprintf("failed to create default secrets: %e", err)) + } + d.secrets = *secrets + + if outageTime, ok := os.LookupEnv("ESPRESSO_DEVNET_TESTS_OUTAGE_PERIOD"); ok { + d.outageTime, err = time.ParseDuration(outageTime) + if err != nil { + panic(fmt.Sprintf("invalid value for ESPRESSO_DEVNET_TESTS_OUTAGE_PERIOD: %e", err)) + } + } else { + d.outageTime = 10 * time.Second + } + if successTime, ok := os.LookupEnv("ESPRESSO_DEVNET_TESTS_LIVENESS_PERIOD"); ok { + d.successTime, err = time.ParseDuration(successTime) + if err != nil { + panic(fmt.Sprintf("invalid value for ESPRESSO_DEVNET_TESTS_LIVENESS_PERIOD: %e", err)) + } + } else { + d.successTime = 10 * time.Second + } + + return d + +} + +func (d *Devnet) isRunning() bool { + cmd := exec.CommandContext( + d.ctx, + "docker", "compose", "ps", "-q", + ) + buf := new(bytes.Buffer) + cmd.Stdout = buf + if err := cmd.Run(); err != nil { + log.Error("failed to check if devnet is running", "error", err) + return false + } + out := strings.TrimSpace(buf.String()) + return len(out) > 0 +} + +<<<<<<< HEAD +func (d *Devnet) Up() (err error) { + if d.isRunning() { + if err := d.Down(); err != nil { + return err + } + // Let's shutdown the devnet before returning an error, just to clean + // up any existing state. + return fmt.Errorf("devnet is already running, this should be a clean state; please shut it down first") + } + var profile string +||||||| parent of 3a01ee2cf (Support differing container names in profiles) +func (d *Devnet) Up() (err error) { + var profile string +======= +// getProfile returns the current profile name based on devnet configuration +func (d *Devnet) getProfile() string { +>>>>>>> 3a01ee2cf (Support differing container names in profiles) + if d.tee { + return ProfileTee + } +<<<<<<< HEAD +||||||| parent of 3a01ee2cf (Support differing container names in profiles) + +======= + return ProfileDefault +} + +// getServiceName returns the container name for a given service in the current profile +func (d *Devnet) getServiceName(service Service) string { + profile := d.getProfile() + if container, ok := Profiles[profile][service]; ok { + return container + } + // Fall back to default profile if service not found + if container, ok := Profiles[ProfileDefault][service]; ok { + return container + } + return "" +} + +func (d *Devnet) Up() (err error) { + profile := d.getProfile() + +>>>>>>> 3a01ee2cf (Support differing container names in profiles) + cmd := exec.CommandContext( + d.ctx, + "docker", "compose", "--profile", profile, "up", "-d", + ) + cmd.Env = append( + cmd.Env, + fmt.Sprintf("OP_BATCHER_PRIVATE_KEY=%s", hex.EncodeToString(crypto.FromECDSA(d.secrets.Batcher))), + ) + buf := new(bytes.Buffer) + cmd.Stderr = buf + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to start docker compose (%w): %s", err, buf.String()) + } + + // Shut down the now-running devnet if we exit this function with an error (in which case the + // caller expects the devnet not to be running and will not be responsible for shutting it down + // themselves). + defer func() { + if err != nil { + if downErr := d.Down(); downErr != nil { + log.Error("error shutting down devnet after encountering another error", "error", downErr) + } + } + }() + + // Shut down the devnet automatically when the lifetime of the context ends. + go func() { + <-d.ctx.Done() + if err := d.Down(); err != nil { + log.Error("error shutting down devnet asynchronously", "error", err) + } + }() + + if testing.Verbose() { + // Stream logs to stdout while the test runs. This goroutine will automatically exit when + // the context is cancelled. + go func() { + cmd = exec.CommandContext(d.ctx, "docker", "compose", "--profile", profile, "logs", "-f") + cmd.Stdout = os.Stdout + // We don't care about the error return of this command, since it's always going to be + // killed by the context cancellation. + _ = cmd.Run() + }() + } + + // Open RPC clients for the different nodes. + d.L2Seq, err = d.serviceClient(d.getServiceName(ServiceL2SeqGeth), 8546) + if err != nil { + return err + } + d.L2SeqRollup, err = d.rollupClient(d.getServiceName(ServiceL2Seq), 9545) + if err != nil { + return err + } + d.L2Verif, err = d.serviceClient(d.getServiceName(ServiceL2VerifGeth), 8546) + if err != nil { + return err + } + d.L2VerifRollup, err = d.rollupClient(d.getServiceName(ServiceL2Verif), 9546) + if err != nil { + return err + } +<<<<<<< HEAD + + d.L1, err = d.serviceClient("l1-geth", 8545) +||||||| parent of 3a01ee2cf (Support differing container names in profiles) + d.L1, err = d.serviceClient("l1-geth", 8545) +======= + d.L1, err = d.serviceClient(d.getServiceName(ServiceL1Geth), 8545) +>>>>>>> 3a01ee2cf (Support differing container names in profiles) + if err != nil { + return err + } + + return nil +} + +func (d *Devnet) ServiceUp(service Service) error { + serviceName := d.getServiceName(service) + log.Info("bringing up service", "service", serviceName) + cmd := exec.CommandContext( + d.ctx, + "docker", "compose", "--profile", d.getProfile(), "up", "-d", serviceName, + ) + return cmd.Run() +} + +func (d *Devnet) ServiceDown(service Service) error { + serviceName := d.getServiceName(service) + log.Info("shutting down service", "service", serviceName) + cmd := exec.CommandContext( + d.ctx, + "docker", "compose", "--profile", d.getProfile(), "down", serviceName, + ) + return cmd.Run() +} + +func (d *Devnet) ServiceRestart(service Service) error { + if err := d.ServiceDown(service); err != nil { + return err + } + if err := d.ServiceUp(service); err != nil { + return err + } + return nil +} + +func (d *Devnet) RollupConfig(ctx context.Context) (*rollup.Config, error) { + return d.L2SeqRollup.RollupConfig(ctx) +} + +func (d *Devnet) SystemConfig(ctx context.Context) (*bindings.SystemConfig, *bind.TransactOpts, error) { + config, err := d.RollupConfig(ctx) + if err != nil { + return nil, nil, err + } + contract, err := bindings.NewSystemConfig(config.L1SystemConfigAddress, d.L1) + if err != nil { + return nil, nil, err + } + + owner, err := bind.NewKeyedTransactorWithChainID(d.secrets.Deployer, config.L1ChainID) + if err != nil { + return nil, nil, err + } + + return contract, owner, nil +} + +// Submits a transaction and waits until it is confirmed by the sequencer (but not necessarily the verifier). +func (d *Devnet) SubmitL2Tx(applyTxOpts helpers.TxOptsFn) (*types.Receipt, error) { + ctx, cancel := context.WithTimeout(d.ctx, 3*time.Minute) + defer cancel() + + chainID, err := d.L2Seq.ChainID(ctx) + if err != nil { + return nil, err + } + + privKey := d.secrets.Alice + address := crypto.PubkeyToAddress(privKey.PublicKey) + balance, err := d.L2Seq.BalanceAt(ctx, address, nil) + if err != nil { + return nil, fmt.Errorf("getting initial sender balance: %w", err) + } + if balance.Cmp(big.NewInt(0)) <= 0 { + return nil, fmt.Errorf("sender account empty") + } + nonce, err := d.L2Seq.NonceAt(ctx, address, nil) + if err != nil { + return nil, fmt.Errorf("error getting nonce: %w", err) + } + log.Debug("sender wallet", "private key", privKey, "address", address, "balance", balance, "nonce", nonce) + + opts := &helpers.TxOpts{ + ToAddr: nil, + Nonce: nonce, + Value: common.Big0, + GasTipCap: big.NewInt(10), + GasFeeCap: big.NewInt(1000000000), + Gas: 21_000, + Data: nil, + ExpectedStatus: types.ReceiptStatusSuccessful, + } + applyTxOpts(opts) + + tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(chainID), &types.DynamicFeeTx{ + ChainID: chainID, + Nonce: opts.Nonce, + To: opts.ToAddr, + Value: opts.Value, + GasTipCap: opts.GasTipCap, + GasFeeCap: opts.GasFeeCap, + Gas: opts.Gas, + Data: opts.Data, + }) + log.Info("send transaction", "from", address, "hash", tx.Hash()) + if err := d.L2Seq.SendTransaction(ctx, tx); err != nil { + return nil, fmt.Errorf("sending L2 tx: %w", err) + } + + receipt, err := wait.ForReceiptOK(ctx, d.L2Seq, tx.Hash()) + if err != nil { + return nil, fmt.Errorf("waiting for L2 tx: %w", err) + } + if opts.ExpectedStatus != receipt.Status { + return nil, fmt.Errorf("wrong status: have %d, want %d", receipt.Status, opts.ExpectedStatus) + } + + log.Info("submitted transaction to sequencer", "hash", tx.Hash(), "receipt", receipt) + + return receipt, nil +} + +// Waits for a previously submitted transaction to be confirmed by the verifier. +func (d *Devnet) VerifyL2Tx(receipt *types.Receipt) error { + ctx, cancel := context.WithTimeout(d.ctx, 2*time.Minute) + defer cancel() + + log.Info("waiting for transaction verification", "hash", receipt.TxHash) + verified, err := wait.ForReceiptOK(ctx, d.L2Verif, receipt.TxHash) + if err != nil { + return fmt.Errorf("waiting for L2 tx on verification client: %w", err) + } + if !reflect.DeepEqual(receipt, verified) { + return fmt.Errorf("verification client returned incorrect receipt\nSeq: %v\nVerif: %v", receipt, verified) + } + return nil +} + +// Submits a transaction and waits for it to be verified. +func (d *Devnet) RunL2Tx(applyTxOpts helpers.TxOptsFn) error { + receipt, err := d.SubmitL2Tx(applyTxOpts) + if err != nil { + return err + } + return d.VerifyL2Tx(receipt) +} + +func (d *Devnet) SendL1Tx(ctx context.Context, tx *types.Transaction) (*types.Receipt, error) { + err := d.L1.SendTransaction(ctx, tx) + if err != nil { + return nil, err + } + + return wait.ForReceiptOK(ctx, d.L1, tx.Hash()) +} + +type BurnReceipt struct { + InitialBurnBalance *big.Int + BurnAmount *big.Int + BurnAddress common.Address + Receipt *types.Receipt +} + +// Submits a burn transaction and waits until it is confirmed by the sequencer (but not necessarily the verifier). +func (d *Devnet) SubmitSimpleL2Burn() (*BurnReceipt, error) { + var err error + + receipt := new(BurnReceipt) + receipt.BurnAddress = common.Address{0xff, 0xff} + receipt.BurnAmount = big.NewInt(1) + + receipt.InitialBurnBalance, err = d.L2Verif.BalanceAt(d.ctx, receipt.BurnAddress, nil) + if err != nil { + return nil, fmt.Errorf("getting initial burn address balance: %w", err) + } + + tx := env.L2TxWithOptions( + env.L2TxWithAmount(receipt.BurnAmount), + env.L2TxWithToAddress(&receipt.BurnAddress), + env.L2TxWithVerifyOnClients(d.L2Verif), + ) + if receipt.Receipt, err = d.SubmitL2Tx(tx); err != nil { + return nil, err + } + return receipt, nil +} + +// Waits for a previously submitted burn transaction to be confirmed by the verifier. +func (d *Devnet) VerifySimpleL2Burn(receipt *BurnReceipt) error { + ctx, cancel := context.WithTimeout(d.ctx, 2*time.Minute) + defer cancel() + + if err := d.VerifyL2Tx(receipt.Receipt); err != nil { + return err + } + + // Check the balance of the burn address using the L2 Verifier + final, err := wait.ForBalanceChange(ctx, d.L2Verif, receipt.BurnAddress, receipt.InitialBurnBalance) + if err != nil { + return fmt.Errorf("waiting for balance change for burn address %s: %w", receipt.BurnAddress, err) + } + balanceBurned := new(big.Int).Sub(final, receipt.InitialBurnBalance) + if balanceBurned.Cmp(receipt.BurnAmount) != 0 { + return fmt.Errorf("incorrect amount burned (have %s, want %s)", balanceBurned, receipt.BurnAmount) + } + + return nil +} + +// RunSimpleL2Burn runs a simple L2 burn transaction and verifies it on the L2 Verifier. +func (d *Devnet) RunSimpleL2Burn() error { + receipt, err := d.SubmitSimpleL2Burn() + if err != nil { + return err + } + return d.VerifySimpleL2Burn(receipt) +} + +// Wait for a configurable amount of time while simulating an outage. +func (d *Devnet) SleepOutageDuration() { + log.Info("sleeping during simulated outage", "duration", d.outageTime) + time.Sleep(d.outageTime) +} + +// Wait for a configurable amount of time before considering a run a success. +func (d *Devnet) SleepRecoveryDuration() { + log.Info("sleeping to check that things stay working", "duration", d.successTime) + time.Sleep(d.successTime) +} + +func (d *Devnet) Down() error { + + if d.L1 != nil { + d.L1.Close() + } + if d.L2Seq != nil { + d.L2Seq.Close() + } + if d.L2SeqRollup != nil { + d.L2SeqRollup.Close() + } + if d.L2Verif != nil { + d.L2Verif.Close() + } + if d.L2VerifRollup != nil { + d.L2VerifRollup.Close() + } + + // Use timeout flag for faster Docker shutdown + cmd := exec.CommandContext( + d.ctx, +<<<<<<< HEAD + "docker", "compose", "down", "-v", "--remove-orphans", "--timeout", "10", +||||||| parent of 3a01ee2cf (Support differing container names in profiles) + "docker", "compose", "down", "-v", "--remove-orphans", +======= + "docker", "compose", "--profile", d.getProfile(), "down", "-v", "--remove-orphans", +>>>>>>> 3a01ee2cf (Support differing container names in profiles) + ) + return cmd.Run() +} + +type TaggedWriter struct { + inner io.Writer + tag string + newline bool +} + +func NewTaggedWriter(tag string, inner io.Writer) *TaggedWriter { + return &TaggedWriter{ + inner: inner, + tag: tag, + newline: true, + } +} + +// Implementation of io.Write interface for TaggedWriter. +// Allows to prepend a tag to each line of output. +// The `p` parameter is the tag to add at the beginning of each line. +func (w *TaggedWriter) Write(p []byte) (int, error) { + if w.newline { + if _, err := fmt.Fprintf(w.inner, "%s | ", w.tag); err != nil { + return 0, err + } + w.newline = false + } + + written := 0 + for i := range len(p) { + // Buffer bytes until we hit a newline. + if p[i] == '\n' { + // Print everything we've buffered up to and including the newline. + line := p[written : i+1] + n, err := w.inner.Write(line) + written += n + if err != nil || n < len(line) { + return written, err + } + + // If that's the end of the output, return, but make a note that the buffer ended with a + // newline and we need to print the tag before the next message. + if written == len(p) { + w.newline = true + return written, nil + } + + // Otherwise print the tag now before proceeding with the next line in `p`. + if _, err := fmt.Fprintf(w.inner, "%s | ", w.tag); err != nil { + return written, err + } + } + } + + // Print anything that was buffered after the final newline. + if written < len(p) { + line := p[written:] + n, err := w.inner.Write(line) + written += n + if err != nil || n < len(line) { + return written, err + } + } + + return written, nil +} + +func (d *Devnet) OpChallenger(opts ...string) error { + return d.opChallengerCmd(opts...).Run() +} + +type ChallengeGame struct { + Index uint64 + Address common.Address + OutputRoot []byte + Claims uint64 +} + +func ParseChallengeGame(s string) (ChallengeGame, error) { + fields := strings.Fields(s) + if len(fields) < 8 { + return ChallengeGame{}, fmt.Errorf("challenge game is missing fields; expected at least 8 but got only %v", len(fields)) + } + + index, err := strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return ChallengeGame{}, fmt.Errorf("index invalid: %w", err) + } + + address := common.HexToAddress(fields[1]) + + outputRoot := common.Hex2Bytes(fields[6]) + + claims, err := strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return ChallengeGame{}, fmt.Errorf("claims count invalid: %w", err) + } + + return ChallengeGame{ + Index: index, + Address: address, + OutputRoot: outputRoot, + Claims: claims, + }, nil +} + +func (d *Devnet) ListChallengeGames() ([]ChallengeGame, error) { + output, err := d.OpChallengerOutput("list-games") + if err != nil { + return nil, err + } + + var games []ChallengeGame + for i, line := range strings.Split(output, "\n") { + if i == 0 { + // Ignore header. + continue + } + line = strings.TrimSpace(line) + if len(line) == 0 { + // Ignore empty lines (e.g. trailing newline) + continue + } + + game, err := ParseChallengeGame(line) + if err != nil { + return nil, fmt.Errorf("game %v is invalid: %w", i, err) + } + games = append(games, game) + } + return games, nil +} + +func (d *Devnet) OpChallengerOutput(opts ...string) (string, error) { + cmd := d.opChallengerCmd(opts...) + buf := new(bytes.Buffer) + cmd.Stdout = buf + if err := cmd.Run(); err != nil { + return "", err + } + return buf.String(), nil +} + +func (d *Devnet) opChallengerCmd(opts ...string) *exec.Cmd { + serviceName := d.getServiceName(ServiceChallenger) + opts = append([]string{"compose", "--profile", d.getProfile(), "exec", serviceName, "entrypoint.sh", "op-challenger"}, opts...) + cmd := exec.CommandContext( + d.ctx, + "docker", + opts..., + ) + if testing.Verbose() { + cmd.Stdout = NewTaggedWriter("op-challenger-cmd", os.Stdout) + cmd.Stderr = NewTaggedWriter("op-challenger-cmd", os.Stderr) + } + log.Info("invoking op-challenger", "cmd", cmd) + return cmd +} + +// Get the host port mapped to `privatePort` for the given Docker service. +func (d *Devnet) hostPort(service string, privatePort uint16) (uint16, error) { + buf := new(bytes.Buffer) + errBuf := new(bytes.Buffer) + cmd := exec.CommandContext( + d.ctx, + "docker", "compose", "--profile", d.getProfile(), "port", service, fmt.Sprint(privatePort), + ) + cmd.Stdout = buf + cmd.Stderr = errBuf + + if err := cmd.Run(); err != nil { + return 0, fmt.Errorf("command failed (%w)\nStdout: %s\nStderr: %s", err, buf.String(), errBuf.String()) + } + out := strings.TrimSpace(buf.String()) + _, portStr, found := strings.Cut(out, ":") + if !found { + return 0, fmt.Errorf("invalid output from docker port: %s (missing : separator)", out) + } + + port, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid output from docker port: %s (%w)", out, err) + } + return uint16(port), nil +} + +// Open an Ethereum RPC client for a Docker service running an RPC server on the given port. +func (d *Devnet) serviceClient(service string, port uint16) (*ethclient.Client, error) { + port, err := d.hostPort(service, port) + if err != nil { + return nil, fmt.Errorf("could not get %s port: %w", service, err) + } + client, err := ethclient.DialContext(d.ctx, fmt.Sprintf("http://127.0.0.1:%d", port)) + if err != nil { + return nil, fmt.Errorf("could not open %s RPC client: %w", service, err) + } + return client, nil +} + +func (d *Devnet) rollupClient(service string, port uint16) (*sources.RollupClient, error) { + port, err := d.hostPort(service, port) + if err != nil { + return nil, fmt.Errorf("could not get %s port: %w", service, err) + } + rpc, err := opclient.NewRPC(d.ctx, log.Root(), fmt.Sprintf("http://127.0.0.1:%d", port), opclient.WithDialAttempts(10)) + if err != nil { + return nil, fmt.Errorf("could not open %s RPC client: %w", service, err) + } + + client := sources.NewRollupClient(rpc) + return client, nil +} diff --git a/espresso/devnet-tests/key_rotation_test.go b/espresso/devnet-tests/key_rotation_test.go index e9d3084b751c0..281f0a4212d5f 100644 --- a/espresso/devnet-tests/key_rotation_test.go +++ b/espresso/devnet-tests/key_rotation_test.go @@ -28,7 +28,7 @@ func TestRotateBatcherKey(t *testing.T) { require.NoError(t, d.RunSimpleL2Burn()) // Shut down the batcher - require.NoError(t, d.ServiceDown("op-batcher")) + require.NoError(t, d.ServiceDown(ServiceBatcher)) d.SleepOutageDuration() // Change the batch sender key to Bob @@ -44,7 +44,7 @@ func TestRotateBatcherKey(t *testing.T) { d.secrets.Batcher = d.secrets.Bob // Restart the batcher - require.NoError(t, d.ServiceUp("op-batcher")) + require.NoError(t, d.ServiceUp(ServiceBatcher)) d.SleepOutageDuration() // Send a transaction to check the L2 still runs From ab312aeb015d93c268f0d2bd8e38f61f2f52fea0 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 15 Oct 2025 18:50:08 +0200 Subject: [PATCH 03/10] Run enclave devnet tests in CI --- espresso/scripts/run-tests-github-actions.sh | 9 ++++++++- justfile | 3 +++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/espresso/scripts/run-tests-github-actions.sh b/espresso/scripts/run-tests-github-actions.sh index fbbdb03646b82..2e4dcabca08c0 100644 --- a/espresso/scripts/run-tests-github-actions.sh +++ b/espresso/scripts/run-tests-github-actions.sh @@ -15,6 +15,13 @@ git submodule update --init --recursive # Poblate cachix cahe nix flake archive --json | jq -r '.path,(.inputs|to_entries[].value.path)' | cachix push espresso-systems-private +echo "[*] Downloading Docker Compose..." +DOCKER_PLUGINS=/usr/local/lib/docker/cli-plugins/ +sudo mkdir -p $DOCKER_PLUGINS +sudo curl -SL https://github.com/docker/compose/releases/download/v2.40.0/docker-compose-linux-x86_64 \ + -o $DOCKER_PLUGINS/docker-compose +sudo chmod +x $DOCKER_PLUGINS/docker-compose + echo "[*] Starting Docker..." sudo systemctl enable --now docker sudo usermod -a -G docker ec2-user @@ -28,4 +35,4 @@ sudo systemctl start nitro-enclaves-allocator.service echo "[*] Running tests in nix develop shell..." -nix develop --command bash -c "just compile-contracts-fast && just build-batcher-enclave-image && just espresso-enclave-tests" +nix develop --command bash -c "just build-devnet && just build-batcher-enclave-image && just espresso-enclave-tests && just devnet-enclave-tests" diff --git a/justfile b/justfile index 53f20fc02eb01..e74b1266a7732 100644 --- a/justfile +++ b/justfile @@ -43,6 +43,9 @@ espresso-tests timeout=espresso_tests_timeout: compile-contracts espresso-enclave-tests: ESPRESSO_RUN_ENCLAVE_TESTS=true go test -timeout={{espresso_tests_timeout}} -p=1 -count=1 ./espresso/enclave-tests/... +devnet-enclave-tests: + ESPRESSO_RUN_ENCLAVE_TESTS=true U_ID={{uid}} GID={{gid}} go test -timeout 30m -p 1 -count 1 -v -run 'TestEnclaveRestart' ./espresso/devnet-tests/... + IMAGE_NAME := "ghcr.io/espressosystems/espresso-sequencer/espresso-dev-node:release-fix-cors" remove-espresso-containers: From 1b9e42e1c249b5a08f555b9b41c779629998c6d5 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 15 Oct 2025 20:40:01 +0200 Subject: [PATCH 04/10] Don't run enclave tests in CI --- espresso/scripts/run-tests-github-actions.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/espresso/scripts/run-tests-github-actions.sh b/espresso/scripts/run-tests-github-actions.sh index 2e4dcabca08c0..7b1d276d4c294 100644 --- a/espresso/scripts/run-tests-github-actions.sh +++ b/espresso/scripts/run-tests-github-actions.sh @@ -35,4 +35,4 @@ sudo systemctl start nitro-enclaves-allocator.service echo "[*] Running tests in nix develop shell..." -nix develop --command bash -c "just build-devnet && just build-batcher-enclave-image && just espresso-enclave-tests && just devnet-enclave-tests" +nix develop --command bash -c "just compile-contracts-fast && just build-batcher-enclave-image && just espresso-enclave-tests" From 31b627104e0234c8242f43536ec684929eae851a Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 15 Oct 2025 21:10:24 +0200 Subject: [PATCH 05/10] Add build-devnet dependency to devnet-enclave-tests --- justfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/justfile b/justfile index e74b1266a7732..bcd25f9a03b4c 100644 --- a/justfile +++ b/justfile @@ -43,7 +43,7 @@ espresso-tests timeout=espresso_tests_timeout: compile-contracts espresso-enclave-tests: ESPRESSO_RUN_ENCLAVE_TESTS=true go test -timeout={{espresso_tests_timeout}} -p=1 -count=1 ./espresso/enclave-tests/... -devnet-enclave-tests: +devnet-enclave-tests: build-devnet ESPRESSO_RUN_ENCLAVE_TESTS=true U_ID={{uid}} GID={{gid}} go test -timeout 30m -p 1 -count 1 -v -run 'TestEnclaveRestart' ./espresso/devnet-tests/... From 523723d86d418b34bb65e438fd90ea60b913a526 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Wed, 15 Oct 2025 23:53:51 +0200 Subject: [PATCH 06/10] Remove mergiraf artifact --- espresso/devnet-tests/devnet_tools.go.orig | 764 --------------------- 1 file changed, 764 deletions(-) delete mode 100644 espresso/devnet-tests/devnet_tools.go.orig diff --git a/espresso/devnet-tests/devnet_tools.go.orig b/espresso/devnet-tests/devnet_tools.go.orig deleted file mode 100644 index d52972c602d19..0000000000000 --- a/espresso/devnet-tests/devnet_tools.go.orig +++ /dev/null @@ -1,764 +0,0 @@ -package devnet_tests - -import ( - "bytes" - "context" - "encoding/hex" - "fmt" - "io" - "math/big" - "os" - "os/exec" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-e2e/bindings" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" - "github.com/ethereum-optimism/optimism/op-node/rollup" - opclient "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - - env "github.com/ethereum-optimism/optimism/espresso/environment" - "github.com/ethereum-optimism/optimism/op-e2e/config/secrets" -) - -type Service uint64 - -const ( - ServiceBatcher Service = iota - ServiceCaffNode - ServiceCaffNodeGeth - ServiceChallenger - ServiceDevNode - ServiceHTTPProxy - ServiceL1Beacon - ServiceL1DataInit - ServiceL1Genesis - ServiceL1Geth - ServiceL1Validator - ServiceL2Genesis - ServiceL2Rollup - ServiceL2Seq - ServiceL2SeqGeth - ServiceL2Verif - ServiceL2VerifGeth - ServiceProposer -) - -const ( - ProfileDefault = "default" - ProfileTee = "tee" -) - -type Profile struct { - BatcherService string - ProposerService string -} - -// modifyDefaultProfile creates a new profile based on the default profile with specified modifications -func modifyDefaultProfile(modifications map[Service]string) map[Service]string { - result := make(map[Service]string) - // Copy all services from default profile - for service, container := range defaultProfile { - result[service] = container - } - // Apply modifications - for service, container := range modifications { - result[service] = container - } - return result -} - -var defaultProfile = map[Service]string{ - ServiceBatcher: "op-batcher", - ServiceCaffNode: "caff-node", - ServiceCaffNodeGeth: "op-geth-caff-node", - ServiceChallenger: "op-challenger", - ServiceDevNode: "espresso-dev-node", - ServiceHTTPProxy: "http-proxy", - ServiceL1Beacon: "l1-beacon", - ServiceL1DataInit: "l1-data-init", - ServiceL1Genesis: "l1-genesis", - ServiceL1Geth: "l1-geth", - ServiceL1Validator: "l1-validator", - ServiceL2Genesis: "l2-genesis", - ServiceL2Rollup: "l2-rollup", - ServiceL2Seq: "op-node-sequencer", - ServiceL2SeqGeth: "op-geth-sequencer", - ServiceL2Verif: "op-node-verifier", - ServiceL2VerifGeth: "op-geth-verifier", - ServiceProposer: "op-proposer", -} - -var Profiles = map[string]map[Service]string{ - ProfileDefault: defaultProfile, - ProfileTee: modifyDefaultProfile(map[Service]string{ - ServiceBatcher: "op-batcher-tee", - ServiceProposer: "op-proposer-tee", - }), -} - -type Devnet struct { - ctx context.Context - tee bool - secrets secrets.Secrets - outageTime time.Duration - successTime time.Duration - L1 *ethclient.Client - L2Seq *ethclient.Client - L2SeqRollup *sources.RollupClient - L2Verif *ethclient.Client - L2VerifRollup *sources.RollupClient -} - -func NewDevnet(ctx context.Context, t *testing.T) *Devnet { - - if testing.Short() { - t.Skip("skipping devnet test in short mode") - } - - d := new(Devnet) - d.ctx = ctx - - mnemonics := *secrets.DefaultMnemonicConfig - mnemonics.Batcher = "m/44'/60'/0'/0/0" - secrets, err := mnemonics.Secrets() - if err != nil { - panic(fmt.Sprintf("failed to create default secrets: %e", err)) - } - d.secrets = *secrets - - if outageTime, ok := os.LookupEnv("ESPRESSO_DEVNET_TESTS_OUTAGE_PERIOD"); ok { - d.outageTime, err = time.ParseDuration(outageTime) - if err != nil { - panic(fmt.Sprintf("invalid value for ESPRESSO_DEVNET_TESTS_OUTAGE_PERIOD: %e", err)) - } - } else { - d.outageTime = 10 * time.Second - } - if successTime, ok := os.LookupEnv("ESPRESSO_DEVNET_TESTS_LIVENESS_PERIOD"); ok { - d.successTime, err = time.ParseDuration(successTime) - if err != nil { - panic(fmt.Sprintf("invalid value for ESPRESSO_DEVNET_TESTS_LIVENESS_PERIOD: %e", err)) - } - } else { - d.successTime = 10 * time.Second - } - - return d - -} - -func (d *Devnet) isRunning() bool { - cmd := exec.CommandContext( - d.ctx, - "docker", "compose", "ps", "-q", - ) - buf := new(bytes.Buffer) - cmd.Stdout = buf - if err := cmd.Run(); err != nil { - log.Error("failed to check if devnet is running", "error", err) - return false - } - out := strings.TrimSpace(buf.String()) - return len(out) > 0 -} - -<<<<<<< HEAD -func (d *Devnet) Up() (err error) { - if d.isRunning() { - if err := d.Down(); err != nil { - return err - } - // Let's shutdown the devnet before returning an error, just to clean - // up any existing state. - return fmt.Errorf("devnet is already running, this should be a clean state; please shut it down first") - } - var profile string -||||||| parent of 3a01ee2cf (Support differing container names in profiles) -func (d *Devnet) Up() (err error) { - var profile string -======= -// getProfile returns the current profile name based on devnet configuration -func (d *Devnet) getProfile() string { ->>>>>>> 3a01ee2cf (Support differing container names in profiles) - if d.tee { - return ProfileTee - } -<<<<<<< HEAD -||||||| parent of 3a01ee2cf (Support differing container names in profiles) - -======= - return ProfileDefault -} - -// getServiceName returns the container name for a given service in the current profile -func (d *Devnet) getServiceName(service Service) string { - profile := d.getProfile() - if container, ok := Profiles[profile][service]; ok { - return container - } - // Fall back to default profile if service not found - if container, ok := Profiles[ProfileDefault][service]; ok { - return container - } - return "" -} - -func (d *Devnet) Up() (err error) { - profile := d.getProfile() - ->>>>>>> 3a01ee2cf (Support differing container names in profiles) - cmd := exec.CommandContext( - d.ctx, - "docker", "compose", "--profile", profile, "up", "-d", - ) - cmd.Env = append( - cmd.Env, - fmt.Sprintf("OP_BATCHER_PRIVATE_KEY=%s", hex.EncodeToString(crypto.FromECDSA(d.secrets.Batcher))), - ) - buf := new(bytes.Buffer) - cmd.Stderr = buf - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to start docker compose (%w): %s", err, buf.String()) - } - - // Shut down the now-running devnet if we exit this function with an error (in which case the - // caller expects the devnet not to be running and will not be responsible for shutting it down - // themselves). - defer func() { - if err != nil { - if downErr := d.Down(); downErr != nil { - log.Error("error shutting down devnet after encountering another error", "error", downErr) - } - } - }() - - // Shut down the devnet automatically when the lifetime of the context ends. - go func() { - <-d.ctx.Done() - if err := d.Down(); err != nil { - log.Error("error shutting down devnet asynchronously", "error", err) - } - }() - - if testing.Verbose() { - // Stream logs to stdout while the test runs. This goroutine will automatically exit when - // the context is cancelled. - go func() { - cmd = exec.CommandContext(d.ctx, "docker", "compose", "--profile", profile, "logs", "-f") - cmd.Stdout = os.Stdout - // We don't care about the error return of this command, since it's always going to be - // killed by the context cancellation. - _ = cmd.Run() - }() - } - - // Open RPC clients for the different nodes. - d.L2Seq, err = d.serviceClient(d.getServiceName(ServiceL2SeqGeth), 8546) - if err != nil { - return err - } - d.L2SeqRollup, err = d.rollupClient(d.getServiceName(ServiceL2Seq), 9545) - if err != nil { - return err - } - d.L2Verif, err = d.serviceClient(d.getServiceName(ServiceL2VerifGeth), 8546) - if err != nil { - return err - } - d.L2VerifRollup, err = d.rollupClient(d.getServiceName(ServiceL2Verif), 9546) - if err != nil { - return err - } -<<<<<<< HEAD - - d.L1, err = d.serviceClient("l1-geth", 8545) -||||||| parent of 3a01ee2cf (Support differing container names in profiles) - d.L1, err = d.serviceClient("l1-geth", 8545) -======= - d.L1, err = d.serviceClient(d.getServiceName(ServiceL1Geth), 8545) ->>>>>>> 3a01ee2cf (Support differing container names in profiles) - if err != nil { - return err - } - - return nil -} - -func (d *Devnet) ServiceUp(service Service) error { - serviceName := d.getServiceName(service) - log.Info("bringing up service", "service", serviceName) - cmd := exec.CommandContext( - d.ctx, - "docker", "compose", "--profile", d.getProfile(), "up", "-d", serviceName, - ) - return cmd.Run() -} - -func (d *Devnet) ServiceDown(service Service) error { - serviceName := d.getServiceName(service) - log.Info("shutting down service", "service", serviceName) - cmd := exec.CommandContext( - d.ctx, - "docker", "compose", "--profile", d.getProfile(), "down", serviceName, - ) - return cmd.Run() -} - -func (d *Devnet) ServiceRestart(service Service) error { - if err := d.ServiceDown(service); err != nil { - return err - } - if err := d.ServiceUp(service); err != nil { - return err - } - return nil -} - -func (d *Devnet) RollupConfig(ctx context.Context) (*rollup.Config, error) { - return d.L2SeqRollup.RollupConfig(ctx) -} - -func (d *Devnet) SystemConfig(ctx context.Context) (*bindings.SystemConfig, *bind.TransactOpts, error) { - config, err := d.RollupConfig(ctx) - if err != nil { - return nil, nil, err - } - contract, err := bindings.NewSystemConfig(config.L1SystemConfigAddress, d.L1) - if err != nil { - return nil, nil, err - } - - owner, err := bind.NewKeyedTransactorWithChainID(d.secrets.Deployer, config.L1ChainID) - if err != nil { - return nil, nil, err - } - - return contract, owner, nil -} - -// Submits a transaction and waits until it is confirmed by the sequencer (but not necessarily the verifier). -func (d *Devnet) SubmitL2Tx(applyTxOpts helpers.TxOptsFn) (*types.Receipt, error) { - ctx, cancel := context.WithTimeout(d.ctx, 3*time.Minute) - defer cancel() - - chainID, err := d.L2Seq.ChainID(ctx) - if err != nil { - return nil, err - } - - privKey := d.secrets.Alice - address := crypto.PubkeyToAddress(privKey.PublicKey) - balance, err := d.L2Seq.BalanceAt(ctx, address, nil) - if err != nil { - return nil, fmt.Errorf("getting initial sender balance: %w", err) - } - if balance.Cmp(big.NewInt(0)) <= 0 { - return nil, fmt.Errorf("sender account empty") - } - nonce, err := d.L2Seq.NonceAt(ctx, address, nil) - if err != nil { - return nil, fmt.Errorf("error getting nonce: %w", err) - } - log.Debug("sender wallet", "private key", privKey, "address", address, "balance", balance, "nonce", nonce) - - opts := &helpers.TxOpts{ - ToAddr: nil, - Nonce: nonce, - Value: common.Big0, - GasTipCap: big.NewInt(10), - GasFeeCap: big.NewInt(1000000000), - Gas: 21_000, - Data: nil, - ExpectedStatus: types.ReceiptStatusSuccessful, - } - applyTxOpts(opts) - - tx := types.MustSignNewTx(privKey, types.LatestSignerForChainID(chainID), &types.DynamicFeeTx{ - ChainID: chainID, - Nonce: opts.Nonce, - To: opts.ToAddr, - Value: opts.Value, - GasTipCap: opts.GasTipCap, - GasFeeCap: opts.GasFeeCap, - Gas: opts.Gas, - Data: opts.Data, - }) - log.Info("send transaction", "from", address, "hash", tx.Hash()) - if err := d.L2Seq.SendTransaction(ctx, tx); err != nil { - return nil, fmt.Errorf("sending L2 tx: %w", err) - } - - receipt, err := wait.ForReceiptOK(ctx, d.L2Seq, tx.Hash()) - if err != nil { - return nil, fmt.Errorf("waiting for L2 tx: %w", err) - } - if opts.ExpectedStatus != receipt.Status { - return nil, fmt.Errorf("wrong status: have %d, want %d", receipt.Status, opts.ExpectedStatus) - } - - log.Info("submitted transaction to sequencer", "hash", tx.Hash(), "receipt", receipt) - - return receipt, nil -} - -// Waits for a previously submitted transaction to be confirmed by the verifier. -func (d *Devnet) VerifyL2Tx(receipt *types.Receipt) error { - ctx, cancel := context.WithTimeout(d.ctx, 2*time.Minute) - defer cancel() - - log.Info("waiting for transaction verification", "hash", receipt.TxHash) - verified, err := wait.ForReceiptOK(ctx, d.L2Verif, receipt.TxHash) - if err != nil { - return fmt.Errorf("waiting for L2 tx on verification client: %w", err) - } - if !reflect.DeepEqual(receipt, verified) { - return fmt.Errorf("verification client returned incorrect receipt\nSeq: %v\nVerif: %v", receipt, verified) - } - return nil -} - -// Submits a transaction and waits for it to be verified. -func (d *Devnet) RunL2Tx(applyTxOpts helpers.TxOptsFn) error { - receipt, err := d.SubmitL2Tx(applyTxOpts) - if err != nil { - return err - } - return d.VerifyL2Tx(receipt) -} - -func (d *Devnet) SendL1Tx(ctx context.Context, tx *types.Transaction) (*types.Receipt, error) { - err := d.L1.SendTransaction(ctx, tx) - if err != nil { - return nil, err - } - - return wait.ForReceiptOK(ctx, d.L1, tx.Hash()) -} - -type BurnReceipt struct { - InitialBurnBalance *big.Int - BurnAmount *big.Int - BurnAddress common.Address - Receipt *types.Receipt -} - -// Submits a burn transaction and waits until it is confirmed by the sequencer (but not necessarily the verifier). -func (d *Devnet) SubmitSimpleL2Burn() (*BurnReceipt, error) { - var err error - - receipt := new(BurnReceipt) - receipt.BurnAddress = common.Address{0xff, 0xff} - receipt.BurnAmount = big.NewInt(1) - - receipt.InitialBurnBalance, err = d.L2Verif.BalanceAt(d.ctx, receipt.BurnAddress, nil) - if err != nil { - return nil, fmt.Errorf("getting initial burn address balance: %w", err) - } - - tx := env.L2TxWithOptions( - env.L2TxWithAmount(receipt.BurnAmount), - env.L2TxWithToAddress(&receipt.BurnAddress), - env.L2TxWithVerifyOnClients(d.L2Verif), - ) - if receipt.Receipt, err = d.SubmitL2Tx(tx); err != nil { - return nil, err - } - return receipt, nil -} - -// Waits for a previously submitted burn transaction to be confirmed by the verifier. -func (d *Devnet) VerifySimpleL2Burn(receipt *BurnReceipt) error { - ctx, cancel := context.WithTimeout(d.ctx, 2*time.Minute) - defer cancel() - - if err := d.VerifyL2Tx(receipt.Receipt); err != nil { - return err - } - - // Check the balance of the burn address using the L2 Verifier - final, err := wait.ForBalanceChange(ctx, d.L2Verif, receipt.BurnAddress, receipt.InitialBurnBalance) - if err != nil { - return fmt.Errorf("waiting for balance change for burn address %s: %w", receipt.BurnAddress, err) - } - balanceBurned := new(big.Int).Sub(final, receipt.InitialBurnBalance) - if balanceBurned.Cmp(receipt.BurnAmount) != 0 { - return fmt.Errorf("incorrect amount burned (have %s, want %s)", balanceBurned, receipt.BurnAmount) - } - - return nil -} - -// RunSimpleL2Burn runs a simple L2 burn transaction and verifies it on the L2 Verifier. -func (d *Devnet) RunSimpleL2Burn() error { - receipt, err := d.SubmitSimpleL2Burn() - if err != nil { - return err - } - return d.VerifySimpleL2Burn(receipt) -} - -// Wait for a configurable amount of time while simulating an outage. -func (d *Devnet) SleepOutageDuration() { - log.Info("sleeping during simulated outage", "duration", d.outageTime) - time.Sleep(d.outageTime) -} - -// Wait for a configurable amount of time before considering a run a success. -func (d *Devnet) SleepRecoveryDuration() { - log.Info("sleeping to check that things stay working", "duration", d.successTime) - time.Sleep(d.successTime) -} - -func (d *Devnet) Down() error { - - if d.L1 != nil { - d.L1.Close() - } - if d.L2Seq != nil { - d.L2Seq.Close() - } - if d.L2SeqRollup != nil { - d.L2SeqRollup.Close() - } - if d.L2Verif != nil { - d.L2Verif.Close() - } - if d.L2VerifRollup != nil { - d.L2VerifRollup.Close() - } - - // Use timeout flag for faster Docker shutdown - cmd := exec.CommandContext( - d.ctx, -<<<<<<< HEAD - "docker", "compose", "down", "-v", "--remove-orphans", "--timeout", "10", -||||||| parent of 3a01ee2cf (Support differing container names in profiles) - "docker", "compose", "down", "-v", "--remove-orphans", -======= - "docker", "compose", "--profile", d.getProfile(), "down", "-v", "--remove-orphans", ->>>>>>> 3a01ee2cf (Support differing container names in profiles) - ) - return cmd.Run() -} - -type TaggedWriter struct { - inner io.Writer - tag string - newline bool -} - -func NewTaggedWriter(tag string, inner io.Writer) *TaggedWriter { - return &TaggedWriter{ - inner: inner, - tag: tag, - newline: true, - } -} - -// Implementation of io.Write interface for TaggedWriter. -// Allows to prepend a tag to each line of output. -// The `p` parameter is the tag to add at the beginning of each line. -func (w *TaggedWriter) Write(p []byte) (int, error) { - if w.newline { - if _, err := fmt.Fprintf(w.inner, "%s | ", w.tag); err != nil { - return 0, err - } - w.newline = false - } - - written := 0 - for i := range len(p) { - // Buffer bytes until we hit a newline. - if p[i] == '\n' { - // Print everything we've buffered up to and including the newline. - line := p[written : i+1] - n, err := w.inner.Write(line) - written += n - if err != nil || n < len(line) { - return written, err - } - - // If that's the end of the output, return, but make a note that the buffer ended with a - // newline and we need to print the tag before the next message. - if written == len(p) { - w.newline = true - return written, nil - } - - // Otherwise print the tag now before proceeding with the next line in `p`. - if _, err := fmt.Fprintf(w.inner, "%s | ", w.tag); err != nil { - return written, err - } - } - } - - // Print anything that was buffered after the final newline. - if written < len(p) { - line := p[written:] - n, err := w.inner.Write(line) - written += n - if err != nil || n < len(line) { - return written, err - } - } - - return written, nil -} - -func (d *Devnet) OpChallenger(opts ...string) error { - return d.opChallengerCmd(opts...).Run() -} - -type ChallengeGame struct { - Index uint64 - Address common.Address - OutputRoot []byte - Claims uint64 -} - -func ParseChallengeGame(s string) (ChallengeGame, error) { - fields := strings.Fields(s) - if len(fields) < 8 { - return ChallengeGame{}, fmt.Errorf("challenge game is missing fields; expected at least 8 but got only %v", len(fields)) - } - - index, err := strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return ChallengeGame{}, fmt.Errorf("index invalid: %w", err) - } - - address := common.HexToAddress(fields[1]) - - outputRoot := common.Hex2Bytes(fields[6]) - - claims, err := strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return ChallengeGame{}, fmt.Errorf("claims count invalid: %w", err) - } - - return ChallengeGame{ - Index: index, - Address: address, - OutputRoot: outputRoot, - Claims: claims, - }, nil -} - -func (d *Devnet) ListChallengeGames() ([]ChallengeGame, error) { - output, err := d.OpChallengerOutput("list-games") - if err != nil { - return nil, err - } - - var games []ChallengeGame - for i, line := range strings.Split(output, "\n") { - if i == 0 { - // Ignore header. - continue - } - line = strings.TrimSpace(line) - if len(line) == 0 { - // Ignore empty lines (e.g. trailing newline) - continue - } - - game, err := ParseChallengeGame(line) - if err != nil { - return nil, fmt.Errorf("game %v is invalid: %w", i, err) - } - games = append(games, game) - } - return games, nil -} - -func (d *Devnet) OpChallengerOutput(opts ...string) (string, error) { - cmd := d.opChallengerCmd(opts...) - buf := new(bytes.Buffer) - cmd.Stdout = buf - if err := cmd.Run(); err != nil { - return "", err - } - return buf.String(), nil -} - -func (d *Devnet) opChallengerCmd(opts ...string) *exec.Cmd { - serviceName := d.getServiceName(ServiceChallenger) - opts = append([]string{"compose", "--profile", d.getProfile(), "exec", serviceName, "entrypoint.sh", "op-challenger"}, opts...) - cmd := exec.CommandContext( - d.ctx, - "docker", - opts..., - ) - if testing.Verbose() { - cmd.Stdout = NewTaggedWriter("op-challenger-cmd", os.Stdout) - cmd.Stderr = NewTaggedWriter("op-challenger-cmd", os.Stderr) - } - log.Info("invoking op-challenger", "cmd", cmd) - return cmd -} - -// Get the host port mapped to `privatePort` for the given Docker service. -func (d *Devnet) hostPort(service string, privatePort uint16) (uint16, error) { - buf := new(bytes.Buffer) - errBuf := new(bytes.Buffer) - cmd := exec.CommandContext( - d.ctx, - "docker", "compose", "--profile", d.getProfile(), "port", service, fmt.Sprint(privatePort), - ) - cmd.Stdout = buf - cmd.Stderr = errBuf - - if err := cmd.Run(); err != nil { - return 0, fmt.Errorf("command failed (%w)\nStdout: %s\nStderr: %s", err, buf.String(), errBuf.String()) - } - out := strings.TrimSpace(buf.String()) - _, portStr, found := strings.Cut(out, ":") - if !found { - return 0, fmt.Errorf("invalid output from docker port: %s (missing : separator)", out) - } - - port, err := strconv.ParseInt(portStr, 10, 32) - if err != nil { - return 0, fmt.Errorf("invalid output from docker port: %s (%w)", out, err) - } - return uint16(port), nil -} - -// Open an Ethereum RPC client for a Docker service running an RPC server on the given port. -func (d *Devnet) serviceClient(service string, port uint16) (*ethclient.Client, error) { - port, err := d.hostPort(service, port) - if err != nil { - return nil, fmt.Errorf("could not get %s port: %w", service, err) - } - client, err := ethclient.DialContext(d.ctx, fmt.Sprintf("http://127.0.0.1:%d", port)) - if err != nil { - return nil, fmt.Errorf("could not open %s RPC client: %w", service, err) - } - return client, nil -} - -func (d *Devnet) rollupClient(service string, port uint16) (*sources.RollupClient, error) { - port, err := d.hostPort(service, port) - if err != nil { - return nil, fmt.Errorf("could not get %s port: %w", service, err) - } - rpc, err := opclient.NewRPC(d.ctx, log.Root(), fmt.Sprintf("http://127.0.0.1:%d", port), opclient.WithDialAttempts(10)) - if err != nil { - return nil, fmt.Errorf("could not open %s RPC client: %w", service, err) - } - - client := sources.NewRollupClient(rpc) - return client, nil -} From 081d9140775ef0cf27b88e46ebfe17e546ce2a07 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 16 Oct 2025 00:00:10 +0200 Subject: [PATCH 07/10] Shutdown tee on exit --- espresso/docker/op-batcher-tee/run-enclave.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/espresso/docker/op-batcher-tee/run-enclave.sh b/espresso/docker/op-batcher-tee/run-enclave.sh index f89e0efd041ac..f49b6506a8220 100755 --- a/espresso/docker/op-batcher-tee/run-enclave.sh +++ b/espresso/docker/op-batcher-tee/run-enclave.sh @@ -197,6 +197,9 @@ CONTAINER_ID=$(docker ps --filter "name=$CONTAINER_NAME" --format "{{.ID}}" | he CONTAINER_IMAGE=$(docker inspect "$CONTAINER_NAME" --format '{{.Config.Image}}' 2>/dev/null) STARTED_AT=$(docker inspect "$CONTAINER_NAME" --format '{{.State.StartedAt}}' 2>/dev/null) +# Shutdown the container when we exit for any reason +trap "docker rm -f $CONTAINER_ID" EXIT + echo "Container Details:" echo " ID: $CONTAINER_ID" echo " Image: $CONTAINER_IMAGE" From 076c8b6c4e0c4e68035c76f4d7883bd6f2fcece2 Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 16 Oct 2025 00:09:35 +0200 Subject: [PATCH 08/10] Use profile to query devnet status --- espresso/devnet-tests/devnet_tools.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/espresso/devnet-tests/devnet_tools.go b/espresso/devnet-tests/devnet_tools.go index 016a36288c519..f87c2ab4af77d 100644 --- a/espresso/devnet-tests/devnet_tools.go +++ b/espresso/devnet-tests/devnet_tools.go @@ -162,7 +162,7 @@ func NewDevnet(ctx context.Context, t *testing.T) *Devnet { func (d *Devnet) isRunning() bool { cmd := exec.CommandContext( d.ctx, - "docker", "compose", "ps", "-q", + "docker", "compose", "--profile", d.getProfile(), "ps", "-q", ) buf := new(bytes.Buffer) cmd.Stdout = buf From 88186fdbd988f0c2660e23c2525b67c96b222f0d Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 16 Oct 2025 00:22:36 +0200 Subject: [PATCH 09/10] Fix order of operations setting tee flag --- espresso/devnet-tests/batcher_restart_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/espresso/devnet-tests/batcher_restart_test.go b/espresso/devnet-tests/batcher_restart_test.go index 6193c8935e4bf..28e1e1d572b28 100644 --- a/espresso/devnet-tests/batcher_restart_test.go +++ b/espresso/devnet-tests/batcher_restart_test.go @@ -24,13 +24,13 @@ func testRestart(t *testing.T, tee bool) { defer cancel() d := NewDevnet(ctx, t) + d.tee = tee + require.NoError(t, d.Up()) defer func() { require.NoError(t, d.Down()) }() - d.tee = tee - // Send a transaction just to check that everything has started up ok. require.NoError(t, d.RunSimpleL2Burn()) From f961472ff9daaeb7a28ea36cb26f61745e53b78a Mon Sep 17 00:00:00 2001 From: Artemii Gerasimovich Date: Thu, 16 Oct 2025 01:55:11 +0200 Subject: [PATCH 10/10] Wait for devent to start up --- espresso/devnet-tests/batcher_restart_test.go | 2 ++ espresso/devnet-tests/devnet_tools.go | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/espresso/devnet-tests/batcher_restart_test.go b/espresso/devnet-tests/batcher_restart_test.go index 28e1e1d572b28..87af6822dd052 100644 --- a/espresso/devnet-tests/batcher_restart_test.go +++ b/espresso/devnet-tests/batcher_restart_test.go @@ -31,6 +31,8 @@ func testRestart(t *testing.T, tee bool) { require.NoError(t, d.Down()) }() + require.NoError(t, d.WaitForL2Operational()) + // Send a transaction just to check that everything has started up ok. require.NoError(t, d.RunSimpleL2Burn()) diff --git a/espresso/devnet-tests/devnet_tools.go b/espresso/devnet-tests/devnet_tools.go index f87c2ab4af77d..a61046b2677b3 100644 --- a/espresso/devnet-tests/devnet_tools.go +++ b/espresso/devnet-tests/devnet_tools.go @@ -16,6 +16,7 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -278,6 +279,19 @@ func (d *Devnet) Up() (err error) { return nil } +func (d *Devnet) WaitForL2Operational() error { + + timeout := time.Minute * 5 + + // Batcher needs more time to startup in tee + if d.getProfile() == "tee" { + timeout = time.Minute * 10 + } + + _, err := geth.WaitForBlockToBeSafe(big.NewInt(1), d.L2Verif, timeout) + return err +} + func (d *Devnet) ServiceUp(service Service) error { serviceName := d.getServiceName(service) log.Info("bringing up service", "service", serviceName)