Skip to content

Commit c31cb46

Browse files
committed
all: Adapt OP-Stack diff after merging upstream geth/v1.15.0
core/txpool: Add PoolJournaler to reimplement journalRemotes fork.yaml: add tx-pool journalremotes sub-section params: don't require BlobScheduleConfig for OP-Stack config miner: fix OP-Stack payload building & test conensus/beacon: fix VerifyHeader for transitioned OP networks core/txpool/locals: Fix PoolJournaler to use ticker & journal on shutdown consensus/beacon: add parenthesis for clarity ethclient: check error in TestEthClientHistoricalBackend consensus/beacon: check for OP config in CalcDifficulty consensus/beacon: fix CalcDifficulty pre-Bedrock check consensus/misc/4844: add exception for OP-Stack in CalcBlobFee params: require nil BlobScheduleConfig for OP-Stack chains
1 parent c5afb51 commit c31cb46

File tree

20 files changed

+251
-46
lines changed

20 files changed

+251
-46
lines changed

cmd/utils/flags.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,7 @@ var (
388388
}
389389
TxPoolJournalRemotesFlag = &cli.BoolFlag{
390390
Name: "txpool.journalremotes",
391-
Usage: "Includes remote transactions in the journal",
391+
Usage: "Includes remote transactions in the journal. Only effective if nolocals is set too.",
392392
Category: flags.TxPoolCategory,
393393
}
394394
TxPoolRejournalFlag = &cli.DurationFlag{

consensus/beacon/consensus.go

Lines changed: 14 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -122,8 +122,11 @@ func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *ty
122122
if parent.Difficulty.Sign() == 0 && header.Difficulty.Sign() > 0 {
123123
return consensus.ErrInvalidTerminalBlock
124124
}
125+
cfg := chain.Config()
125126
// Check >0 TDs with pre-merge, --0 TDs with post-merge rules
126-
if header.Difficulty.Sign() > 0 {
127+
if header.Difficulty.Sign() > 0 ||
128+
// OP-Stack: transitioned networks must use legacy consensus pre-Bedrock
129+
(cfg.IsOptimism() && !cfg.IsBedrock(header.Number)) {
127130
return beacon.ethone.VerifyHeader(chain, header)
128131
}
129132
return beacon.verifyHeader(chain, header, parent)
@@ -132,21 +135,21 @@ func (beacon *Beacon) VerifyHeader(chain consensus.ChainHeaderReader, header *ty
132135
// OP-Stack Bedrock variant of splitHeaders: the total-terminal difficulty is terminated at bedrock transition, but also reset to 0.
133136
// So just use the bedrock fork check to split the headers, to simplify the splitting.
134137
// The returned slices are slices over the input. The input must be sorted.
135-
func (beacon *Beacon) splitBedrockHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header, error) {
138+
func (beacon *Beacon) splitBedrockHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header) {
136139
for i, h := range headers {
137140
if chain.Config().IsBedrock(h.Number) {
138-
return headers[:i], headers[i:], nil
141+
return headers[:i], headers[i:]
139142
}
140143
}
141-
return headers, nil, nil
144+
return headers, nil
142145
}
143146

144147
// splitHeaders splits the provided header batch into two parts according to
145148
// the difficulty field.
146149
//
147150
// Note, this function will not verify the header validity but just split them.
148-
func (beacon *Beacon) splitHeaders(headers []*types.Header) ([]*types.Header, []*types.Header) {
149-
if chain.Config().Optimism != nil {
151+
func (beacon *Beacon) splitHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) ([]*types.Header, []*types.Header) {
152+
if chain.Config().IsOptimism() {
150153
return beacon.splitBedrockHeaders(chain, headers)
151154
}
152155

@@ -169,7 +172,7 @@ func (beacon *Beacon) splitHeaders(headers []*types.Header) ([]*types.Header, []
169172
// a results channel to retrieve the async verifications.
170173
// VerifyHeaders expect the headers to be ordered and continuous.
171174
func (beacon *Beacon) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) {
172-
preHeaders, postHeaders := beacon.splitHeaders(headers)
175+
preHeaders, postHeaders := beacon.splitHeaders(chain, headers)
173176
if len(postHeaders) == 0 {
174177
return beacon.ethone.VerifyHeaders(chain, headers)
175178
}
@@ -488,7 +491,10 @@ func (beacon *Beacon) CalcDifficulty(chain consensus.ChainHeaderReader, time uin
488491
// We do not need to seal non-merge blocks anymore live, but we do need
489492
// to be able to generate test chains, thus we're reverting to a testing-
490493
// settable field to direct that.
491-
if beacon.ttdblock != nil && *beacon.ttdblock > parent.Number.Uint64() {
494+
cfg := chain.Config()
495+
if beacon.ttdblock != nil && *beacon.ttdblock > parent.Number.Uint64() ||
496+
// OP-Stack: transitioned networks must use legacy consensus pre-Bedrock
497+
(cfg.IsOptimism() && !cfg.IsBedrock(new(big.Int).Add(parent.Number, common.Big1))) {
492498
return beacon.ethone.CalcDifficulty(chain, time, parent)
493499
}
494500
return beaconDifficulty

consensus/misc/eip4844/eip4844.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,16 @@ func CalcExcessBlobGas(config *params.ChainConfig, parent *types.Header, headTim
8181

8282
// CalcBlobFee calculates the blobfee from the header's excess blob gas field.
8383
func CalcBlobFee(config *params.ChainConfig, header *types.Header) *big.Int {
84+
// OP-Stack chains don't support blobs, but still set the excessBlobGas field (always to zero).
85+
// So this function is called in many places for OP-Stack chains too. In order to not require
86+
// a blob schedule in the chain config, we short circuit here.
87+
if config.IsOptimism() {
88+
if config.BlobScheduleConfig != nil || header.ExcessBlobGas == nil || *header.ExcessBlobGas != 0 {
89+
panic("OP-Stack: CalcBlobFee: unexpected blob schedule or excess blob gas")
90+
}
91+
return minBlobGasPrice
92+
}
93+
8494
var frac uint64
8595
switch config.LatestFork(header.Time) {
8696
case forks.Prague:

consensus/misc/eip4844/eip4844_test.go

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ import (
2121
"math/big"
2222
"testing"
2323

24+
"github.com/stretchr/testify/require"
25+
2426
"github.com/ethereum/go-ethereum/core/types"
2527
"github.com/ethereum/go-ethereum/params"
2628
)
@@ -90,6 +92,28 @@ func TestCalcBlobFee(t *testing.T) {
9092
}
9193
}
9294

95+
func TestCalcBlobFeeOPStack(t *testing.T) {
96+
zero := uint64(0)
97+
header := &types.Header{ExcessBlobGas: &zero}
98+
// any non-nil optimism confic should do
99+
config := &params.ChainConfig{Optimism: new(params.OptimismConfig)}
100+
bfee := CalcBlobFee(config, header)
101+
require.Equal(t, int64(1), bfee.Int64())
102+
103+
reqPanic := func() {
104+
require.PanicsWithValue(t,
105+
"OP-Stack: CalcBlobFee: unexpected blob schedule or excess blob gas",
106+
func() { CalcBlobFee(config, header) })
107+
}
108+
(*header.ExcessBlobGas)++
109+
reqPanic()
110+
header.ExcessBlobGas = nil
111+
reqPanic()
112+
header.ExcessBlobGas = &zero
113+
config.BlobScheduleConfig = params.DefaultBlobSchedule
114+
reqPanic()
115+
}
116+
93117
func TestFakeExponential(t *testing.T) {
94118
tests := []struct {
95119
factor int64

core/genesis.go

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -477,11 +477,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *triedb.Database, g
477477
// OP-Stack note: Always apply overrides.
478478
// The genesis function arg may be nil, and stored-config may be non-nil at the same time.
479479
// This is important to apply superchain-upgrades to existing DBs, where the network CLI flag is not used.
480-
chainCfg, err := overrides.apply(newCfg)
481-
if err != nil {
480+
if err := overrides.apply(newCfg); err != nil {
482481
return nil, common.Hash{}, nil, err
483482
}
484-
newCfg = chainCfg
485483

486484
var genesisTimestamp *uint64
487485
if genesis != nil {

core/state_transition.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -464,7 +464,7 @@ func (st *stateTransition) execute() (*ExecutionResult, error) {
464464

465465
st.state.RevertToSnapshot(snap)
466466
// Even though we revert the state changes, always increment the nonce for the next deposit transaction
467-
st.state.SetNonce(st.msg.From, st.state.GetNonce(st.msg.From)+1)
467+
st.state.SetNonce(st.msg.From, st.state.GetNonce(st.msg.From)+1, tracing.NonceChangeEoACall)
468468
// Record deposits as using all their gas (matches the gas pool)
469469
// System Transactions are special & are not recorded as using any gas (anywhere)
470470
// Regolith changes this behaviour so the actual gas used is reported.

core/txpool/legacypool/legacypool.go

Lines changed: 25 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -56,11 +56,9 @@ const (
5656
txMaxSize = 4 * txSlotSize // 128KB
5757
)
5858

59-
var (
60-
// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
61-
// another remote transaction.
62-
ErrTxPoolOverflow = errors.New("txpool is full")
63-
)
59+
// ErrTxPoolOverflow is returned if the transaction pool is full and can't accept
60+
// another remote transaction.
61+
var ErrTxPoolOverflow = errors.New("txpool is full")
6462

6563
var (
6664
evictionInterval = time.Minute // Time interval to check for evictable transactions
@@ -480,6 +478,23 @@ func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction,
480478
return pending, queued
481479
}
482480

481+
// ToJournal returns all transactions in the pool in a format suitable for journaling.
482+
//
483+
// OP-Stack addition.
484+
func (pool *LegacyPool) ToJournal() map[common.Address]types.Transactions {
485+
pool.mu.Lock()
486+
defer pool.mu.Unlock()
487+
488+
txs := make(map[common.Address]types.Transactions, len(pool.pending)+len(pool.queue))
489+
for addr, pending := range pool.pending {
490+
txs[addr] = pending.Flatten()
491+
}
492+
for addr, queued := range pool.queue {
493+
txs[addr] = append(txs[addr], queued.Flatten()...)
494+
}
495+
return txs
496+
}
497+
483498
// Pending retrieves all currently processable transactions, grouped by origin
484499
// account and sorted by nonce.
485500
//
@@ -518,7 +533,9 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
518533
}
519534
}
520535
}
521-
if filter.MaxDATxSize != nil && !pool.locals.contains(addr) {
536+
537+
// OP-Stack addition: exclude by max-da-size filter
538+
if filter.MaxDATxSize != nil {
522539
for i, tx := range txs {
523540
estimate := tx.RollupCostData().EstimatedDASize()
524541
if estimate.Cmp(filter.MaxDATxSize) > 0 {
@@ -566,9 +583,6 @@ func (pool *LegacyPool) validateTxBasics(tx *types.Transaction) error {
566583
MinTip: pool.gasTip.Load().ToBig(),
567584
EffectiveGasCeil: pool.config.EffectiveGasCeil,
568585
}
569-
if local {
570-
opts.MinTip = new(big.Int)
571-
}
572586
if err := txpool.ValidateTransaction(tx, pool.currentHead.Load(), pool.signer, opts); err != nil {
573587
return err
574588
}
@@ -920,7 +934,7 @@ func (pool *LegacyPool) Add(txs []*types.Transaction, sync bool) []error {
920934
newErrs, dirtyAddrs := pool.addTxsLocked(news)
921935
pool.mu.Unlock()
922936

923-
var nilSlot = 0
937+
nilSlot := 0
924938
for _, err := range newErrs {
925939
for errs[nilSlot] != nil {
926940
nilSlot++
@@ -1417,7 +1431,7 @@ func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.T
14171431
queuedGauge.Dec(int64(len(readies)))
14181432

14191433
// Drop all transactions over the allowed limit
1420-
var caps = list.Cap(int(pool.config.AccountQueue))
1434+
caps := list.Cap(int(pool.config.AccountQueue))
14211435
for _, tx := range caps {
14221436
hash := tx.Hash()
14231437
pool.all.Remove(hash)

core/txpool/legacypool/legacypool_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -699,8 +699,8 @@ func TestRejectedDropping(t *testing.T) {
699699
// create some txs. tx0 has a conditional
700700
tx0, tx1 := transaction(0, 100, key), transaction(1, 200, key)
701701

702-
pool.all.Add(tx0, false)
703-
pool.all.Add(tx1, false)
702+
pool.all.Add(tx0)
703+
pool.all.Add(tx1)
704704
pool.promoteTx(account, tx0.Hash(), tx0)
705705
pool.promoteTx(account, tx1.Hash(), tx1)
706706

core/txpool/legacypool/list.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ func (m *SortedMap) LastElement() *types.Transaction {
268268

269269
// FirstElement returns the first element from the heap (guaranteed to be lowest), thus, the
270270
// transaction with the lowest nonce. Returns nil if there are no elements.
271-
func (m *sortedMap) FirstElement() *types.Transaction {
271+
func (m *SortedMap) FirstElement() *types.Transaction {
272272
if m.Len() == 0 {
273273
return nil
274274
}
Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
// Copyright 2025 The op-geth Authors
2+
// This file is part of the op-geth library.
3+
//
4+
// The op-geth library is free software: you can redistribute it and/or modify
5+
// it under the terms of the GNU Lesser General Public License as published by
6+
// the Free Software Foundation, either version 3 of the License, or
7+
// (at your option) any later version.
8+
//
9+
// The op-geth library is distributed in the hope that it will be useful,
10+
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11+
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12+
// GNU Lesser General Public License for more details.
13+
//
14+
// You should have received a copy of the GNU Lesser General Public License
15+
// along with the op-geth library. If not, see <http://www.gnu.org/licenses/>.
16+
17+
package locals
18+
19+
import (
20+
"sync"
21+
"time"
22+
23+
"github.com/ethereum/go-ethereum/core/txpool"
24+
"github.com/ethereum/go-ethereum/core/types"
25+
"github.com/ethereum/go-ethereum/log"
26+
)
27+
28+
// A PoolJournaler periodically journales a transaction pool to disk.
29+
// It is meant to be used instead of the TxTracker when the noLocals and
30+
// journalRemotes settings are enabled.
31+
//
32+
// OP-Stack addition.
33+
type PoolJournaler struct {
34+
journal *journal // Journal of local transaction to back up to disk
35+
rejournal time.Duration // How often to rotate journal
36+
pool *txpool.TxPool // The tx pool to interact with
37+
38+
shutdownCh chan struct{}
39+
wg sync.WaitGroup
40+
}
41+
42+
func NewPoolJournaler(journalPath string, journalTime time.Duration, pool *txpool.TxPool) *PoolJournaler {
43+
return &PoolJournaler{
44+
journal: newTxJournal(journalPath),
45+
rejournal: journalTime,
46+
pool: pool,
47+
shutdownCh: make(chan struct{}),
48+
}
49+
}
50+
51+
// Start implements node.Lifecycle interface
52+
// Start is called after all services have been constructed and the networking
53+
// layer was also initialized to spawn any goroutines required by the service.
54+
func (pj *PoolJournaler) Start() error {
55+
pj.wg.Add(1)
56+
go pj.loop()
57+
return nil
58+
}
59+
60+
// Stop implements node.Lifecycle interface
61+
// Stop terminates all goroutines belonging to the service, blocking until they
62+
// are all terminated.
63+
func (pj *PoolJournaler) Stop() error {
64+
close(pj.shutdownCh)
65+
pj.wg.Wait()
66+
return nil
67+
}
68+
69+
func (pj *PoolJournaler) loop() {
70+
defer log.Info("PoolJournaler: Stopped")
71+
defer pj.wg.Done()
72+
73+
start := time.Now()
74+
log.Info("PoolJournaler: Start loading transactions from journal...")
75+
if err := pj.journal.load(func(transactions []*types.Transaction) []error {
76+
log.Info("PoolJournaler: Start adding transactions to pool", "count", len(transactions), "loading_duration", time.Since(start))
77+
errs := pj.pool.Add(transactions, true)
78+
log.Info("PoolJournaler: Done adding transactions to pool", "total_duration", time.Since(start))
79+
return errs
80+
}); err != nil {
81+
log.Error("PoolJournaler: Transaction journal loading failed. Exiting.", "err", err)
82+
return
83+
}
84+
defer pj.journal.close()
85+
86+
ticker := time.NewTicker(pj.rejournal)
87+
defer ticker.Stop()
88+
89+
journal := func() {
90+
start := time.Now()
91+
tojournal := pj.pool.ToJournal()
92+
if err := pj.journal.rotate(tojournal); err != nil {
93+
log.Error("PoolJournaler: Transaction journal rotation failed", "err", err)
94+
} else {
95+
log.Debug("PoolJournaler: Transaction journal rotated", "count", len(tojournal), "duration", time.Since(start))
96+
}
97+
}
98+
99+
for {
100+
select {
101+
case <-pj.shutdownCh:
102+
journal()
103+
return
104+
case <-ticker.C:
105+
journal()
106+
}
107+
}
108+
}

0 commit comments

Comments
 (0)