Skip to content

Commit 965163a

Browse files
authored
op-node,op-service: Make L1 cache size configurable (#13772)
Also removes the default limit of 1000, which was too low to hold more than ~3h of L1 data. This was causing problems whenever a duration of 3h has passed since a latest batch was posted, which then caused derivation of a new batch to fetch all L1 data again. This particularly impacted chains that have a usual channel duration longer than 3h, or when chains experienced a safe head stall >3h. Fixes #13409.
1 parent 3cc36be commit 965163a

File tree

6 files changed

+38
-19
lines changed

6 files changed

+38
-19
lines changed

op-node/flags/flags.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,13 @@ var (
179179
Value: 20,
180180
Category: L1RPCCategory,
181181
}
182+
L1CacheSize = &cli.UintFlag{
183+
Name: "l1.cache-size",
184+
Usage: "Cache size for blocks, receipts and transactions. " +
185+
"It's optional and a sane default of 3/2 the sequencing window size is used if this field is set to 0.",
186+
EnvVars: prefixEnvVars("L1_CACHE_SIZE"),
187+
Category: L1RPCCategory,
188+
}
182189
L1HTTPPollInterval = &cli.DurationFlag{
183190
Name: "l1.http-poll-interval",
184191
Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.",
@@ -423,6 +430,7 @@ var optionalFlags = []cli.Flag{
423430
L1RPCMaxBatchSize,
424431
L1RPCMaxConcurrency,
425432
L1HTTPPollInterval,
433+
L1CacheSize,
426434
VerifierL1Confs,
427435
SequencerEnabledFlag,
428436
SequencerStoppedFlag,

op-node/node/client.go

Lines changed: 21 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -120,6 +120,12 @@ type L1EndpointConfig struct {
120120
// It is recommended to use websockets or IPC for efficient following of the changing block.
121121
// Setting this to 0 disables polling.
122122
HttpPollInterval time.Duration
123+
124+
// CacheSize specifies the cache size for blocks, receipts and transactions. It's optional and a
125+
// sane default of 3/2 the sequencing window size is used during Setup if this field is set to 0.
126+
// Note that receipts and transactions are cached per block, which is why there's only one cache
127+
// size to configure.
128+
CacheSize uint
123129
}
124130

125131
var _ L1EndpointSetup = (*L1EndpointConfig)(nil)
@@ -129,11 +135,14 @@ func (cfg *L1EndpointConfig) Check() error {
129135
return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize)
130136
}
131137
if cfg.RateLimit < 0 {
132-
return fmt.Errorf("rate limit cannot be negative")
138+
return fmt.Errorf("rate limit cannot be negative: %f", cfg.RateLimit)
133139
}
134140
if cfg.MaxConcurrency < 1 {
135141
return fmt.Errorf("max concurrent requests cannot be less than 1, was %d", cfg.MaxConcurrency)
136142
}
143+
if cfg.CacheSize > 1_000_000 {
144+
return fmt.Errorf("cache size is dangerously large: %d", cfg.CacheSize)
145+
}
137146
return nil
138147
}
139148

@@ -146,14 +155,20 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf
146155
opts = append(opts, client.WithRateLimit(cfg.RateLimit, cfg.BatchSize))
147156
}
148157

149-
l1Node, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
158+
l1RPC, err := client.NewRPC(ctx, log, cfg.L1NodeAddr, opts...)
150159
if err != nil {
151160
return nil, nil, fmt.Errorf("failed to dial L1 address (%s): %w", cfg.L1NodeAddr, err)
152161
}
153-
rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
154-
rpcCfg.MaxRequestsPerBatch = cfg.BatchSize
155-
rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency
156-
return l1Node, rpcCfg, nil
162+
163+
var l1Cfg *sources.L1ClientConfig
164+
if cfg.CacheSize > 0 {
165+
l1Cfg = sources.L1ClientSimpleConfig(cfg.L1TrustRPC, cfg.L1RPCKind, int(cfg.CacheSize))
166+
} else {
167+
l1Cfg = sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind)
168+
}
169+
l1Cfg.MaxRequestsPerBatch = cfg.BatchSize
170+
l1Cfg.MaxConcurrentRequests = cfg.MaxConcurrency
171+
return l1RPC, l1Cfg, nil
157172
}
158173

159174
// PreparedL1Endpoint enables testing with an in-process pre-setup RPC connection to L1

op-node/node/config.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ func (cfg *Config) LoadPersisted(log log.Logger) error {
130130
// Check verifies that the given configuration makes sense
131131
func (cfg *Config) Check() error {
132132
if err := cfg.L1.Check(); err != nil {
133-
return fmt.Errorf("l2 endpoint config error: %w", err)
133+
return fmt.Errorf("l1 endpoint config error: %w", err)
134134
}
135135
if err := cfg.L2.Check(); err != nil {
136136
return fmt.Errorf("l2 endpoint config error: %w", err)

op-node/node/node.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,13 +188,13 @@ func (n *OpNode) initTracer(ctx context.Context, cfg *Config) error {
188188
}
189189

190190
func (n *OpNode) initL1(ctx context.Context, cfg *Config) error {
191-
l1Node, rpcCfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
191+
l1RPC, l1Cfg, err := cfg.L1.Setup(ctx, n.log, &cfg.Rollup)
192192
if err != nil {
193193
return fmt.Errorf("failed to get L1 RPC client: %w", err)
194194
}
195195

196196
n.l1Source, err = sources.NewL1Client(
197-
client.NewInstrumentedRPC(l1Node, &n.metrics.RPCMetrics.RPCClientMetrics), n.log, n.metrics.L1SourceCache, rpcCfg)
197+
client.NewInstrumentedRPC(l1RPC, &n.metrics.RPCMetrics.RPCClientMetrics), n.log, n.metrics.L1SourceCache, l1Cfg)
198198
if err != nil {
199199
return fmt.Errorf("failed to create L1 source: %w", err)
200200
}

op-node/service.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig {
160160
BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name),
161161
HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name),
162162
MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name),
163+
CacheSize: ctx.Uint(flags.L1CacheSize.Name),
163164
}
164165
}
165166

op-service/sources/l1_client.go

Lines changed: 5 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,25 +24,20 @@ func L1ClientDefaultConfig(config *rollup.Config, trustRPC bool, kind RPCProvide
2424
}
2525

2626
func L1ClientSimpleConfig(trustRPC bool, kind RPCProviderKind, cacheSize int) *L1ClientConfig {
27-
span := cacheSize
28-
if span > 1000 { // sanity cap. If a large sequencing window is configured, do not make the cache too large
29-
span = 1000
30-
}
3127
return &L1ClientConfig{
3228
EthClientConfig: EthClientConfig{
3329
// receipts and transactions are cached per block
34-
ReceiptsCacheSize: span,
35-
TransactionsCacheSize: span,
36-
HeadersCacheSize: span,
37-
PayloadsCacheSize: span,
30+
ReceiptsCacheSize: cacheSize,
31+
TransactionsCacheSize: cacheSize,
32+
HeadersCacheSize: cacheSize,
33+
PayloadsCacheSize: cacheSize,
3834
MaxRequestsPerBatch: 20, // TODO: tune batch param
3935
MaxConcurrentRequests: 10,
4036
TrustRPC: trustRPC,
4137
MustBePostMerge: false,
4238
RPCProviderKind: kind,
4339
MethodResetDuration: time.Minute,
44-
// Not bounded by span, to cover find-sync-start range fully for speedy recovery after errors.
45-
BlockRefsCacheSize: cacheSize,
40+
BlockRefsCacheSize: cacheSize,
4641
},
4742
}
4843
}

0 commit comments

Comments
 (0)