Skip to content

Commit fd3cd5c

Browse files
QuentinIshenkeyao
authored andcommitted
Reduce cumulative diff with Celo (#192)
1 parent fad4d1b commit fd3cd5c

File tree

15 files changed

+176
-200
lines changed

15 files changed

+176
-200
lines changed

.envrc

Lines changed: 0 additions & 5 deletions
This file was deleted.

.github/workflows/docker-build-scan.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ jobs:
2121
id: detect-files-changed
2222
uses: step-security/changed-files@3dbe17c78367e7d60f00d78ae6781a35be47b4a1
2323
with:
24-
separator: ","
24+
separator: ','
2525

2626
# Build op-node op-batcher op-proposer using docker-bake
2727
build-op-stack:
@@ -55,8 +55,8 @@ jobs:
5555
- name: Login at GCP Artifact Registry
5656
uses: celo-org/reusable-workflows/.github/actions/[email protected]
5757
with:
58-
workload-id-provider: "projects/1094498259535/locations/global/workloadIdentityPools/gh-optimism/providers/github-by-repos"
59-
service-account: "[email protected]"
58+
workload-id-provider: 'projects/1094498259535/locations/global/workloadIdentityPools/gh-optimism/providers/github-by-repos'
59+
service-account: '[email protected]'
6060
docker-gcp-registries: us-west1-docker.pkg.dev
6161
# We need a custom steps as it's using docker bake
6262
- name: Set up Docker Buildx
File renamed without changes.

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ packages/contracts-bedrock/deployments/anvil
3434

3535
.secrets
3636
.env
37+
.envrc
3738
!espresso/.env
3839
!.env.example
3940
!.envrc.example

op-alt-da/cmd/daserver/entrypoint.go

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,6 @@ func StartDAServer(cliCtx *cli.Context) error {
3939
return fmt.Errorf("failed to create S3 store: %w", err)
4040
}
4141
store = s3
42-
} else if cfg.EspressoEnabled() {
43-
l.Info("Using Espresso DA", "url", cfg.EspressoBaseUrl)
44-
store = NewEspressoStore(cfg.EspressoBaseUrl, l)
4542
}
4643

4744
server := altda.NewDAServer(cliCtx.String(ListenAddrFlagName), cliCtx.Int(PortFlagName), store, l, cfg.UseGenericComm)

op-alt-da/cmd/daserver/flags.go

Lines changed: 6 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,6 @@ import (
1313
const (
1414
ListenAddrFlagName = "addr"
1515
PortFlagName = "port"
16-
EspressoBaseUrlFlagName = "espresso.url"
1716
S3BucketFlagName = "s3.bucket"
1817
S3EndpointFlagName = "s3.endpoint"
1918
S3AccessKeyIDFlagName = "s3.access-key-id"
@@ -75,12 +74,6 @@ var (
7574
Value: "",
7675
EnvVars: prefixEnvVars("S3_ACCESS_KEY_SECRET"),
7776
}
78-
EspressoBaseUrlFlag = &cli.StringFlag{
79-
Name: EspressoBaseUrlFlagName,
80-
Usage: "espresso network base url",
81-
Value: "",
82-
EnvVars: prefixEnvVars("ESPRESSO_BASE_URL"),
83-
}
8477
)
8578

8679
var requiredFlags = []cli.Flag{
@@ -94,7 +87,6 @@ var optionalFlags = []cli.Flag{
9487
S3EndpointFlag,
9588
S3AccessKeyIDFlag,
9689
S3AccessKeySecretFlag,
97-
EspressoBaseUrlFlag,
9890
GenericCommFlag,
9991
}
10092

@@ -112,7 +104,6 @@ type CLIConfig struct {
112104
S3Endpoint string
113105
S3AccessKeyID string
114106
S3AccessKeySecret string
115-
EspressoBaseUrl string
116107
UseGenericComm bool
117108
}
118109

@@ -123,38 +114,23 @@ func ReadCLIConfig(ctx *cli.Context) CLIConfig {
123114
S3Endpoint: ctx.String(S3EndpointFlagName),
124115
S3AccessKeyID: ctx.String(S3AccessKeyIDFlagName),
125116
S3AccessKeySecret: ctx.String(S3AccessKeySecretFlagName),
126-
EspressoBaseUrl: ctx.String(EspressoBaseUrlFlagName),
127117
UseGenericComm: ctx.Bool(GenericCommFlagName),
128118
}
129119
}
130120

131121
func (c CLIConfig) Check() error {
132-
enabledCount := 0
133-
if c.S3Enabled() {
134-
enabledCount++
135-
if c.S3Bucket == "" || c.S3Endpoint == "" || c.S3AccessKeyID == "" || c.S3AccessKeySecret == "" {
136-
return errors.New("all S3 flags must be set")
137-
}
138-
}
139-
if c.FileStoreEnabled() {
140-
enabledCount++
141-
}
142-
if c.EspressoEnabled() {
143-
enabledCount++
144-
}
145-
if enabledCount == 0 {
122+
if !c.S3Enabled() && !c.FileStoreEnabled() {
146123
return errors.New("at least one storage backend must be enabled")
147124
}
148-
if enabledCount > 1 {
149-
return errors.New("only one storage backend must be enabled")
125+
if c.S3Enabled() && c.FileStoreEnabled() {
126+
return errors.New("only one storage backend can be enabled")
127+
}
128+
if c.S3Enabled() && (c.S3Bucket == "" || c.S3Endpoint == "" || c.S3AccessKeyID == "" || c.S3AccessKeySecret == "") {
129+
return errors.New("all S3 flags must be set")
150130
}
151131
return nil
152132
}
153133

154-
func (c CLIConfig) EspressoEnabled() bool {
155-
return c.EspressoBaseUrl != ""
156-
}
157-
158134
func (c CLIConfig) S3Enabled() bool {
159135
return !(c.S3Bucket == "" && c.S3Endpoint == "" && c.S3AccessKeyID == "" && c.S3AccessKeySecret == "")
160136
}

op-batcher/batcher/config.go

Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -53,8 +53,6 @@ type CLIConfig struct {
5353
// and creating a new batch.
5454
PollInterval time.Duration
5555

56-
EspressoPollInterval time.Duration
57-
5856
// MaxPendingTransactions is the maximum number of concurrent pending
5957
// transactions sent to the transaction manager (0 == no limit).
6058
MaxPendingTransactions uint64
@@ -126,6 +124,7 @@ type CLIConfig struct {
126124
RPC oprpc.CLIConfig
127125
AltDA altda.CLIConfig
128126

127+
EspressoPollInterval time.Duration
129128
EspressoUrls []string
130129
EspressoLightClientAddr string
131130
TestingEspressoBatcherPrivateKey string
@@ -193,41 +192,42 @@ func (c *CLIConfig) Check() error {
193192
func NewConfig(ctx *cli.Context) *CLIConfig {
194193
return &CLIConfig{
195194
/* Required Flags */
196-
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
197-
L2EthRpc: ctx.String(flags.L2EthRpcFlag.Name),
198-
RollupRpc: ctx.String(flags.RollupRpcFlag.Name),
199-
SubSafetyMargin: ctx.Uint64(flags.SubSafetyMarginFlag.Name),
200-
PollInterval: ctx.Duration(flags.PollIntervalFlag.Name),
201-
EspressoPollInterval: ctx.Duration(flags.EspressoPollIntervalFlag.Name),
195+
L1EthRpc: ctx.String(flags.L1EthRpcFlag.Name),
196+
L2EthRpc: ctx.String(flags.L2EthRpcFlag.Name),
197+
RollupRpc: ctx.String(flags.RollupRpcFlag.Name),
198+
SubSafetyMargin: ctx.Uint64(flags.SubSafetyMarginFlag.Name),
199+
PollInterval: ctx.Duration(flags.PollIntervalFlag.Name),
202200

203201
/* Optional Flags */
204-
MaxPendingTransactions: ctx.Uint64(flags.MaxPendingTransactionsFlag.Name),
205-
MaxChannelDuration: ctx.Uint64(flags.MaxChannelDurationFlag.Name),
206-
MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name),
207-
MaxBlocksPerSpanBatch: ctx.Int(flags.MaxBlocksPerSpanBatch.Name),
208-
TargetNumFrames: ctx.Int(flags.TargetNumFramesFlag.Name),
209-
ApproxComprRatio: ctx.Float64(flags.ApproxComprRatioFlag.Name),
210-
Compressor: ctx.String(flags.CompressorFlag.Name),
211-
CompressionAlgo: derive.CompressionAlgo(ctx.String(flags.CompressionAlgoFlag.Name)),
212-
Stopped: ctx.Bool(flags.StoppedFlag.Name),
213-
WaitNodeSync: ctx.Bool(flags.WaitNodeSyncFlag.Name),
214-
CheckRecentTxsDepth: ctx.Int(flags.CheckRecentTxsDepthFlag.Name),
215-
BatchType: ctx.Uint(flags.BatchTypeFlag.Name),
216-
DataAvailabilityType: flags.DataAvailabilityType(ctx.String(flags.DataAvailabilityTypeFlag.Name)),
217-
ActiveSequencerCheckDuration: ctx.Duration(flags.ActiveSequencerCheckDurationFlag.Name),
218-
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
219-
LogConfig: oplog.ReadCLIConfig(ctx),
220-
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
221-
PprofConfig: oppprof.ReadCLIConfig(ctx),
222-
RPC: oprpc.ReadCLIConfig(ctx),
223-
AltDA: altda.ReadCLIConfig(ctx),
224-
ThrottleThreshold: ctx.Uint64(flags.ThrottleThresholdFlag.Name),
225-
ThrottleTxSize: ctx.Uint64(flags.ThrottleTxSizeFlag.Name),
226-
ThrottleBlockSize: ctx.Uint64(flags.ThrottleBlockSizeFlag.Name),
227-
ThrottleAlwaysBlockSize: ctx.Uint64(flags.ThrottleAlwaysBlockSizeFlag.Name),
228-
PreferLocalSafeL2: ctx.Bool(flags.PreferLocalSafeL2Flag.Name),
202+
MaxPendingTransactions: ctx.Uint64(flags.MaxPendingTransactionsFlag.Name),
203+
MaxChannelDuration: ctx.Uint64(flags.MaxChannelDurationFlag.Name),
204+
MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name),
205+
MaxBlocksPerSpanBatch: ctx.Int(flags.MaxBlocksPerSpanBatch.Name),
206+
TargetNumFrames: ctx.Int(flags.TargetNumFramesFlag.Name),
207+
ApproxComprRatio: ctx.Float64(flags.ApproxComprRatioFlag.Name),
208+
Compressor: ctx.String(flags.CompressorFlag.Name),
209+
CompressionAlgo: derive.CompressionAlgo(ctx.String(flags.CompressionAlgoFlag.Name)),
210+
Stopped: ctx.Bool(flags.StoppedFlag.Name),
211+
WaitNodeSync: ctx.Bool(flags.WaitNodeSyncFlag.Name),
212+
CheckRecentTxsDepth: ctx.Int(flags.CheckRecentTxsDepthFlag.Name),
213+
BatchType: ctx.Uint(flags.BatchTypeFlag.Name),
214+
DataAvailabilityType: flags.DataAvailabilityType(ctx.String(flags.DataAvailabilityTypeFlag.Name)),
215+
ActiveSequencerCheckDuration: ctx.Duration(flags.ActiveSequencerCheckDurationFlag.Name),
216+
TxMgrConfig: txmgr.ReadCLIConfig(ctx),
217+
LogConfig: oplog.ReadCLIConfig(ctx),
218+
MetricsConfig: opmetrics.ReadCLIConfig(ctx),
219+
PprofConfig: oppprof.ReadCLIConfig(ctx),
220+
RPC: oprpc.ReadCLIConfig(ctx),
221+
AltDA: altda.ReadCLIConfig(ctx),
222+
ThrottleThreshold: ctx.Uint64(flags.ThrottleThresholdFlag.Name),
223+
ThrottleTxSize: ctx.Uint64(flags.ThrottleTxSizeFlag.Name),
224+
ThrottleBlockSize: ctx.Uint64(flags.ThrottleBlockSizeFlag.Name),
225+
ThrottleAlwaysBlockSize: ctx.Uint64(flags.ThrottleAlwaysBlockSizeFlag.Name),
226+
PreferLocalSafeL2: ctx.Bool(flags.PreferLocalSafeL2Flag.Name),
227+
229228
EspressoUrls: ctx.StringSlice(flags.EspressoUrlsFlag.Name),
230229
EspressoLightClientAddr: ctx.String(flags.EspressoLCAddrFlag.Name),
231230
TestingEspressoBatcherPrivateKey: ctx.String(flags.TestingEspressoBatcherPrivateKeyFlag.Name),
231+
EspressoPollInterval: ctx.Duration(flags.EspressoPollIntervalFlag.Name),
232232
}
233233
}

op-batcher/batcher/driver.go

Lines changed: 29 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ import (
2828
altda "github.com/ethereum-optimism/optimism/op-alt-da"
2929
"github.com/ethereum-optimism/optimism/op-batcher/metrics"
3030
"github.com/ethereum-optimism/optimism/op-node/rollup"
31-
derive "github.com/ethereum-optimism/optimism/op-node/rollup/derive"
31+
"github.com/ethereum-optimism/optimism/op-node/rollup/derive"
3232
opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto"
3333
"github.com/ethereum-optimism/optimism/op-service/dial"
3434
"github.com/ethereum-optimism/optimism/op-service/eth"
@@ -94,17 +94,18 @@ type AltDAClient interface {
9494

9595
// DriverSetup is the collection of input/output interfaces and configuration that the driver operates on.
9696
type DriverSetup struct {
97-
Log log.Logger
98-
Metr metrics.Metricer
99-
RollupConfig *rollup.Config
100-
Config BatcherConfig
101-
Txmgr txmgr.TxManager
102-
L1Client L1Client
103-
EndpointProvider dial.L2EndpointProvider
104-
ChannelConfig ChannelConfigProvider
105-
AltDA AltDAClient
106-
ChannelOutFactory ChannelOutFactory
107-
ActiveSeqChanged chan struct{} // optional
97+
Log log.Logger
98+
Metr metrics.Metricer
99+
RollupConfig *rollup.Config
100+
Config BatcherConfig
101+
Txmgr txmgr.TxManager
102+
L1Client L1Client
103+
EndpointProvider dial.L2EndpointProvider
104+
ChannelConfig ChannelConfigProvider
105+
AltDA AltDAClient
106+
ChannelOutFactory ChannelOutFactory
107+
ActiveSeqChanged chan struct{} // optional
108+
108109
Espresso *espressoClient.MultipleNodesClient
109110
EspressoLightClient *espressoLightClient.LightclientCaller
110111
ChainSigner opcrypto.ChainSigner
@@ -124,20 +125,16 @@ type BatchSubmitter struct {
124125
mutex sync.Mutex
125126
running bool
126127

127-
submitter *espressoTransactionSubmitter
128-
streamer espresso.EspressoStreamer[derive.EspressoBatch]
129128
txpoolMutex sync.Mutex // guards txpoolState and txpoolBlockedBlob
130129
txpoolState TxPoolState
131130
txpoolBlockedBlob bool
132131

133132
channelMgrMutex sync.Mutex // guards channelMgr and prevCurrentL1
134133
channelMgr *channelManager
135134
prevCurrentL1 eth.L1BlockRef // cached CurrentL1 from the last syncStatus
136-
}
137135

138-
// EspressoStreamer returns the batch submitter's Espresso streamer instance
139-
func (l *BatchSubmitter) EspressoStreamer() *espresso.EspressoStreamer[derive.EspressoBatch] {
140-
return &l.streamer
136+
espressoSubmitter *espressoTransactionSubmitter
137+
espressoStreamer espresso.EspressoStreamer[derive.EspressoBatch]
141138
}
142139

143140
// NewBatchSubmitter initializes the BatchSubmitter driver from a preconfigured DriverSetup
@@ -152,7 +149,7 @@ func NewBatchSubmitter(setup DriverSetup) *BatchSubmitter {
152149
channelMgr: state,
153150
}
154151

155-
batchSubmitter.streamer = espresso.NewEspressoStreamer(
152+
batchSubmitter.espressoStreamer = espresso.NewEspressoStreamer(
156153
batchSubmitter.RollupConfig.L2ChainID.Uint64(),
157154
NewAdaptL1BlockRefClient(batchSubmitter.L1Client),
158155
batchSubmitter.Espresso,
@@ -164,7 +161,7 @@ func NewBatchSubmitter(setup DriverSetup) *BatchSubmitter {
164161
2*time.Second,
165162
)
166163

167-
log.Info("Streamer started", "streamer", batchSubmitter.streamer)
164+
log.Info("Streamer started", "streamer", batchSubmitter.espressoStreamer)
168165

169166
return batchSubmitter
170167
}
@@ -219,26 +216,29 @@ func (l *BatchSubmitter) StartBatchSubmitting() error {
219216
return fmt.Errorf("could not register with batch inbox contract: %w", err)
220217
}
221218

222-
l.submitter = NewEspressoTransactionSubmitter(
219+
l.espressoSubmitter = NewEspressoTransactionSubmitter(
223220
WithContext(l.shutdownCtx),
224221
WithWaitGroup(l.wg),
225222
WithEspressoClient(l.Espresso),
226223
)
227-
l.submitter.SpawnWorkers(4, 4)
228-
l.submitter.Start()
224+
l.espressoSubmitter.SpawnWorkers(4, 4)
225+
l.espressoSubmitter.Start()
229226

230227
l.wg.Add(4)
231228
go l.receiptsLoop(l.wg, receiptsCh) // ranges over receiptsCh channel
232229
go l.espressoBatchQueueingLoop(l.shutdownCtx, l.wg)
233230
go l.espressoBatchLoadingLoop(l.shutdownCtx, l.wg, publishSignal)
234231
go l.publishingLoop(l.killCtx, l.wg, receiptsCh, publishSignal) // ranges over publishSignal, spawns routines which send on receiptsCh. Closes receiptsCh when done.
235-
} else {
236-
l.wg.Add(3)
237-
go l.receiptsLoop(l.wg, receiptsCh) // ranges over receiptsCh channel
238-
go l.publishingLoop(l.killCtx, l.wg, receiptsCh, publishSignal) // ranges over publishSignal, spawns routines which send on receiptsCh. Closes receiptsCh when done.
239-
go l.blockLoadingLoop(l.shutdownCtx, l.wg, pendingBytesUpdated, publishSignal) // sends on pendingBytesUpdated (if throttling enabled), and publishSignal. Closes them both when done
232+
233+
l.Log.Info("Batch Submitter started in Espresso mode")
234+
return nil
240235
}
241236

237+
l.wg.Add(3)
238+
go l.receiptsLoop(l.wg, receiptsCh) // ranges over receiptsCh channel
239+
go l.publishingLoop(l.killCtx, l.wg, receiptsCh, publishSignal) // ranges over publishSignal, spawns routines which send on receiptsCh. Closes receiptsCh when done.
240+
go l.blockLoadingLoop(l.shutdownCtx, l.wg, pendingBytesUpdated, publishSignal) // sends on pendingBytesUpdated (if throttling enabled), and publishSignal. Closes them both when done
241+
242242
l.Log.Info("Batch Submitter started")
243243
return nil
244244
}
@@ -779,7 +779,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) {
779779
defer l.channelMgrMutex.Unlock()
780780
l.channelMgr.Clear(l1SafeOrigin)
781781
if l.Config.UseEspresso {
782-
l.streamer.Reset()
782+
l.espressoStreamer.Reset()
783783
}
784784
return true
785785
}
@@ -931,8 +931,6 @@ func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txRef
931931
if !l.Config.UseAltDA {
932932
l.Log.Crit("Received AltDA type txdata without AltDA being enabled")
933933
}
934-
935-
// if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment.
936934
if txdata.altDACommitment == nil {
937935
// This means the txdata was not sent to the DA Provider yet.
938936
// This will send the txdata to the DA Provider and store the commitment in the channelMgr.

0 commit comments

Comments
 (0)