From a24725d96d51c7ccbdfaf526aa4c4e01ebcabcc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Mon, 12 Aug 2024 09:40:34 +0300 Subject: [PATCH 01/14] feat(mempool)!: Add new `Contains` method to `Mempool` interface (#3659) Required for #3658 --- - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --------- Co-authored-by: Andy Nogueira --- .../3659-mempool-add-contains-method.md | 2 ++ internal/consensus/replay_stubs.go | 16 ++++------------ mempool/clist_mempool.go | 10 +++++----- mempool/mempool.go | 4 ++++ mempool/mocks/mempool.go | 18 ++++++++++++++++++ mempool/nop_mempool.go | 3 +++ 6 files changed, 36 insertions(+), 17 deletions(-) create mode 100644 .changelog/v1.0.0/breaking-changes/3659-mempool-add-contains-method.md diff --git a/.changelog/v1.0.0/breaking-changes/3659-mempool-add-contains-method.md b/.changelog/v1.0.0/breaking-changes/3659-mempool-add-contains-method.md new file mode 100644 index 0000000000..2de89417a6 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/3659-mempool-add-contains-method.md @@ -0,0 +1,2 @@ +- `[mempool]` Add a new `Contains` method to the `Mempool` interface. + ([\#3659](https://github.com/cometbft/cometbft/pull/3659)) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 0cb9b04324..450a93558e 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -26,11 +26,7 @@ func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) CheckTx(types.Tx, p2p.ID) (*abcicli.ReqRes, error) { return nil, nil } - -func (emptyMempool) RemoveTxByKey(types.TxKey) error { - return nil -} - +func (emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } func (emptyMempool) Update( @@ -44,16 +40,12 @@ func (emptyMempool) Update( } func (emptyMempool) Flush() {} func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) Contains(types.TxKey) bool { return false } func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } func (emptyMempool) EnableTxsAvailable() {} func (emptyMempool) TxsBytes() int64 { return 0 } -func (emptyMempool) InMempool(types.TxKey) bool { return false } - -func (emptyMempool) TxsFront() *clist.CElement { return nil } -func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } - -func (emptyMempool) InitWAL() error { return nil } -func (emptyMempool) CloseWAL() {} +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } // ----------------------------------------------------------------------------- // mockProxyApp uses ABCIResponses to give the right results. diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 3058d7209b..ca85278a23 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -103,11 +103,6 @@ func (mem *CListMempool) getCElement(txKey types.TxKey) (*clist.CElement, bool) return nil, false } -func (mem *CListMempool) InMempool(txKey types.TxKey) bool { - _, ok := mem.getCElement(txKey) - return ok -} - func (mem *CListMempool) addToCache(tx types.Tx) bool { return mem.cache.Push(tx) } @@ -214,6 +209,11 @@ func (mem *CListMempool) Flush() { mem.removeAllTxs() } +func (mem *CListMempool) Contains(txKey types.TxKey) bool { + _, ok := mem.getCElement(txKey) + return ok +} + // TxsFront returns the first transaction in the ordered list for peer // goroutines to call .NextWait() on. // FIXME: leaking implementation details! diff --git a/mempool/mempool.go b/mempool/mempool.go index 895b633079..cc271dec7a 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -80,6 +80,10 @@ type Mempool interface { // Flush removes all transactions from the mempool and caches. Flush() + // Contains returns true iff the transaction, identified by its key, is in + // the mempool. + Contains(txKey types.TxKey) bool + // TxsAvailable returns a channel which fires once for every height, and only // when transactions are available in the mempool. // diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index 3f36c03ce7..aa1ceaaf30 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -50,6 +50,24 @@ func (_m *Mempool) CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, error) return r0, r1 } +// Contains provides a mock function with given fields: txKey +func (_m *Mempool) Contains(txKey types.TxKey) bool { + ret := _m.Called(txKey) + + if len(ret) == 0 { + panic("no return value specified for Contains") + } + + var r0 bool + if rf, ok := ret.Get(0).(func(types.TxKey) bool); ok { + r0 = rf(txKey) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + // EnableTxsAvailable provides a mock function with no fields func (_m *Mempool) EnableTxsAvailable() { _m.Called() diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go index f31a192787..5258825c3c 100644 --- a/mempool/nop_mempool.go +++ b/mempool/nop_mempool.go @@ -60,6 +60,9 @@ func (*NopMempool) FlushAppConn() error { return nil } // Flush does nothing. func (*NopMempool) Flush() {} +// Contains always returns false. +func (*NopMempool) Contains(types.TxKey) bool { return false } + // TxsAvailable always returns nil. func (*NopMempool) TxsAvailable() <-chan struct{} { return nil From c48f0af6736408bdd366fb2d8a71e27d02ca3247 Mon Sep 17 00:00:00 2001 From: mmsqe Date: Wed, 29 May 2024 16:18:32 +0800 Subject: [PATCH 02/14] feat: add api to allow get unconfirmed_tx by hash (#3109) --- - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments - [ ] Title follows the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) spec --------- Co-authored-by: Anton Kaliaev --- .../v1.0.0/features/3079-unconfirmed-tx.md | 2 ++ internal/consensus/replay_stubs.go | 1 + light/proxy/routes.go | 9 +++++++ light/rpc/client.go | 4 +++ mempool/clist_mempool.go | 8 ++++++ mempool/mempool.go | 4 +++ mempool/mocks/mempool.go | 20 ++++++++++++++ mempool/nop_mempool.go | 3 +++ rpc/client/http/http.go | 13 ++++++++++ rpc/client/interface.go | 1 + rpc/client/local/local.go | 4 +++ rpc/client/mocks/client.go | 23 ++++++++++++++++ rpc/client/rpc_test.go | 26 +++++++++++++++++++ rpc/core/mempool.go | 7 +++++ rpc/core/routes.go | 1 + rpc/core/types/responses.go | 5 ++++ 16 files changed, 131 insertions(+) create mode 100644 .changelog/v1.0.0/features/3079-unconfirmed-tx.md diff --git a/.changelog/v1.0.0/features/3079-unconfirmed-tx.md b/.changelog/v1.0.0/features/3079-unconfirmed-tx.md new file mode 100644 index 0000000000..ddc4640117 --- /dev/null +++ b/.changelog/v1.0.0/features/3079-unconfirmed-tx.md @@ -0,0 +1,2 @@ +- `[rpc]` Add `unconfirmed_tx` to support query mempool transaction by transaction hash. + ([\#3079](https://github.com/cometbft/cometbft/pull/3079)) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 450a93558e..87f5f894c9 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -28,6 +28,7 @@ func (emptyMempool) CheckTx(types.Tx, p2p.ID) (*abcicli.ReqRes, error) { } func (emptyMempool) RemoveTxByKey(types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return types.Txs{} } +func (emptyMempool) GetTxByHash([]byte) types.Tx { return types.Tx{} } func (emptyMempool) ReapMaxTxs(int) types.Txs { return types.Txs{} } func (emptyMempool) Update( int64, diff --git a/light/proxy/routes.go b/light/proxy/routes.go index e779f3c384..1b38606d0e 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -37,6 +37,7 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), ""), "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), ""), "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", rpcserver.Cacheable("height")), + "unconfirmed_tx": rpcserver.NewRPCFunc(makeUnconfirmedTxFunc(c), "hash"), "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit"), "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), ""), @@ -231,6 +232,14 @@ func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { } } +type rpcUnconfirmedTxFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) + +func makeUnconfirmedTxFunc(c *lrpc.Client) rpcUnconfirmedTxFunc { + return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.UnconfirmedTx(ctx.Context(), hash) + } +} + type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { diff --git a/light/rpc/client.go b/light/rpc/client.go index 4071ca01a7..b97f5f8609 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -203,6 +203,10 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.Resu return c.next.BroadcastTxSync(ctx, tx) } +func (c *Client) UnconfirmedTx(ctx context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.next.UnconfirmedTx(ctx, hash) +} + func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, limit) } diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index ca85278a23..9027ee5ca8 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -565,6 +565,14 @@ func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { return txs } +// GetTxByHash returns the types.Tx with the given hash if found in the mempool, otherwise returns nil. +func (mem *CListMempool) GetTxByHash(hash []byte) types.Tx { + if elem, ok := mem.getCElement(types.TxKey(hash)); ok { + return elem.Value.(*mempoolTx).tx + } + return nil +} + // Lock() must be help by the caller during execution. // TODO: this function always returns nil; remove the return value. func (mem *CListMempool) Update( diff --git a/mempool/mempool.go b/mempool/mempool.go index cc271dec7a..8ae3462c82 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -45,6 +45,10 @@ type Mempool interface { // (~ all available transactions). ReapMaxTxs(max int) types.Txs + // GetTxByHash returns the types.Tx with the given hash if found in the mempool, + // otherwise returns nil. + GetTxByHash(hash []byte) types.Tx + // Lock locks the mempool. The consensus must be able to hold lock to safely // update. Lock() diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index aa1ceaaf30..beb776011d 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -96,6 +96,26 @@ func (_m *Mempool) FlushAppConn() error { return r0 } +// GetTxByHash provides a mock function with given fields: hash +func (_m *Mempool) GetTxByHash(hash []byte) types.Tx { + ret := _m.Called(hash) + + if len(ret) == 0 { + panic("no return value specified for GetTxByHash") + } + + var r0 types.Tx + if rf, ok := ret.Get(0).(func([]byte) types.Tx); ok { + r0 = rf(hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Tx) + } + } + + return r0 +} + // Lock provides a mock function with no fields func (_m *Mempool) Lock() { _m.Called() diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go index 5258825c3c..b7909da04b 100644 --- a/mempool/nop_mempool.go +++ b/mempool/nop_mempool.go @@ -35,6 +35,9 @@ func (*NopMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return nil } // ReapMaxTxs always returns nil. func (*NopMempool) ReapMaxTxs(int) types.Txs { return nil } +// GetTxByHash always returns nil. +func (*NopMempool) GetTxByHash([]byte) types.Tx { return nil } + // Lock does nothing. func (*NopMempool) Lock() {} diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 70982d0fb9..b60d4ebdec 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -286,6 +286,19 @@ func (c *baseRPCClient) broadcastTX( return result, nil } +func (c *baseRPCClient) UnconfirmedTx( + ctx context.Context, + hash []byte, +) (*ctypes.ResultUnconfirmedTx, error) { + result := new(ctypes.ResultUnconfirmedTx) + params := map[string]any{"hash": hash} + _, err := c.caller.Call(ctx, "unconfirmed_tx", params, result) + if err != nil { + return nil, err + } + return result, nil +} + func (c *baseRPCClient) UnconfirmedTxs( ctx context.Context, limit *int, diff --git a/rpc/client/interface.go b/rpc/client/interface.go index c8660753e2..b06d8b5dc8 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -134,6 +134,7 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { + UnconfirmedTx(ctx context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 783711499f..44864cbb97 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -98,6 +98,10 @@ func (c *Local) BroadcastTxSync(_ context.Context, tx types.Tx) (*ctypes.ResultB return c.env.BroadcastTxSync(c.ctx, tx) } +func (c *Local) UnconfirmedTx(_ context.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return c.env.UnconfirmedTx(c.ctx, hash) +} + func (c *Local) UnconfirmedTxs(_ context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index b8cb781600..595305d32d 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -797,6 +797,29 @@ func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page * return r0, r1 } +// UnconfirmedTx provides a mock function with given fields: ctx, hash +func (_m *Client) UnconfirmedTx(ctx context.Context, hash []byte) (*coretypes.ResultUnconfirmedTx, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultUnconfirmedTx + if rf, ok := ret.Get(0).(func(context.Context, []byte) *coretypes.ResultUnconfirmedTx); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // UnconfirmedTxs provides a mock function with given fields: ctx, limit func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(ctx, limit) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index e725d9d6c3..49cdd64fe9 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -368,6 +368,32 @@ func TestBroadcastTxCommit(t *testing.T) { } } +func TestUnconfirmedTx(t *testing.T) { + _, _, tx := MakeTxKV() + + ch := make(chan *abci.CheckTxResponse, 1) + mempool := node.Mempool() + reqRes, err := mempool.CheckTx(tx, "") + require.NoError(t, err) + ch <- reqRes.Response.GetCheckTx() + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + target := types.Tx(tx) + for _, c := range GetClients() { + mc := c.(client.MempoolClient) + res, err := mc.UnconfirmedTx(context.Background(), target.Hash()) + require.NoError(t, err) + assert.Exactly(t, target, res.Tx) + } + + mempool.Flush() +} + func TestUnconfirmedTxs(t *testing.T) { _, _, tx := MakeTxKV() diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 0061a44d28..7bd9abd0cc 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -183,6 +183,13 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* } } +// UnconfirmedTx gets unconfirmed transaction by hash. +func (env *Environment) UnconfirmedTx(_ *rpctypes.Context, hash []byte) (*ctypes.ResultUnconfirmedTx, error) { + return &ctypes.ResultUnconfirmedTx{ + Tx: env.Mempool.GetTxByHash(hash), + }, nil +} + // UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) // including their number. // More: https://docs.cometbft.com/main/rpc/#/Info/unconfirmed_txs diff --git a/rpc/core/routes.go b/rpc/core/routes.go index c7c13a5278..827c4f31b1 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -37,6 +37,7 @@ func (env *Environment) GetRoutes() RoutesMap { "dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(env.GetConsensusState, ""), "consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", rpc.Cacheable("height")), + "unconfirmed_tx": rpc.NewRPCFunc(env.UnconfirmedTx, "hash"), "unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, ""), diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index c2a0aed91c..9e1bb998d4 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -216,6 +216,11 @@ type ResultBlockSearch struct { TotalCount int `json:"total_count"` } +// Single mempool tx. +type ResultUnconfirmedTx struct { + Tx types.Tx `json:"tx"` +} + // List of mempool txs. type ResultUnconfirmedTxs struct { Count int `json:"n_txs"` From 875328aede5d980a7c7d676f0530b6d2455e8ac6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Thu, 15 Aug 2024 17:51:55 +0300 Subject: [PATCH 03/14] fix(mempool): Update variables in `CListMempool` atomically (#3700) Fixes #3694 This PR replaces: ``` type CListMempool struct { txsBytes atomic.Int64 // total size of mempool, in bytes ... txs *clist.CList txsMap sync.Map ... } ``` by ``` type CListMempool struct { ... txsMtx cmtsync.RWMutex txs *clist.CList // concurrent linked-list of valid txs txsMap map[types.TxKey]*clist.CElement // for quick access to txs txsBytes int64 // total size of mempool, in bytes ... } ``` The new `txsMtx` mutex is used every time the variables `txs`, `txsMap`, and `txsBytes` are accessed. --- - [ ] Tests written/updated - [x] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments - [ ] Title follows the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) spec --------- Co-authored-by: Andy Nogueira Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- ...694-mempool-update-variables-atomically.md | 3 + mempool/clist_mempool.go | 132 +++++++++++------- 2 files changed, 82 insertions(+), 53 deletions(-) create mode 100644 .changelog/v1.0.0/bug-fixes/3694-mempool-update-variables-atomically.md diff --git a/.changelog/v1.0.0/bug-fixes/3694-mempool-update-variables-atomically.md b/.changelog/v1.0.0/bug-fixes/3694-mempool-update-variables-atomically.md new file mode 100644 index 0000000000..d6c9c6b25b --- /dev/null +++ b/.changelog/v1.0.0/bug-fixes/3694-mempool-update-variables-atomically.md @@ -0,0 +1,3 @@ +- `[mempool]` Fix race condition when accessing entries by updating variables in + `CListMempool` atomically. + ([\#3694](https://github.com/cometbft/cometbft/issues/3694)) \ No newline at end of file diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 9027ee5ca8..ee1afcad95 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "sync" "sync/atomic" "time" @@ -26,8 +25,7 @@ import ( // mempool uses a concurrent list structure for storing transactions that can // be efficiently accessed by multiple concurrent readers. type CListMempool struct { - height atomic.Int64 // the last block Update()'d to - txsBytes atomic.Int64 // total size of mempool, in bytes + height atomic.Int64 // the last block Update()'d to // notify listeners (ie. consensus) when txs are available notifiedTxsAvailable atomic.Bool @@ -46,11 +44,11 @@ type CListMempool struct { // Keeps track of the rechecking process. recheck *recheck - // Concurrent linked-list of valid txs. - // `txsMap`: txKey -> CElement is for quick access to txs. - // Transactions in both `txs` and `txsMap` must to be kept in sync. - txs *clist.CList - txsMap sync.Map + // Data in `txs` and `txsMap` must to be kept in sync and updated atomically. + txsMtx cmtsync.RWMutex + txs *clist.CList // concurrent linked-list of valid txs + txsMap map[types.TxKey]*clist.CElement // for quick access to txs + txsBytes int64 // total size of mempool, in bytes // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -77,6 +75,7 @@ func NewCListMempool( config: cfg, proxyAppConn: proxyAppConn, txs: clist.New(), + txsMap: make(map[types.TxKey]*clist.CElement), recheck: newRecheck(), logger: log.NewNopLogger(), metrics: NopMetrics(), @@ -96,13 +95,6 @@ func NewCListMempool( return mp } -func (mem *CListMempool) getCElement(txKey types.TxKey) (*clist.CElement, bool) { - if e, ok := mem.txsMap.Load(txKey); ok { - return e.(*clist.CElement), true - } - return nil, false -} - func (mem *CListMempool) addToCache(tx types.Tx) bool { return mem.cache.Push(tx) } @@ -121,15 +113,38 @@ func (mem *CListMempool) tryRemoveFromCache(tx types.Tx) { } func (mem *CListMempool) removeAllTxs() { + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + for e := mem.txs.Front(); e != nil; e = e.Next() { mem.txs.Remove(e) e.DetachPrev() } + mem.txsMap = make(map[types.TxKey]*clist.CElement) + mem.txsBytes = 0 +} - mem.txsMap.Range(func(key, _ any) bool { - mem.txsMap.Delete(key) - return true - }) +// addSender adds a peer ID to the list of senders on the entry corresponding to +// tx, identified by its key. +func (mem *CListMempool) addSender(txKey types.TxKey, sender p2p.ID) error { + if sender == "" { + return nil + } + + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + + elem, ok := mem.txsMap[txKey] + if !ok { + return ErrTxNotFound + } + + memTx := elem.Value.(*mempoolTx) + if found := memTx.addSender(sender); found { + // It should not be possible to receive twice a tx from the same sender. + return ErrTxAlreadyReceivedFromSender + } + return nil } // NOTE: not thread safe - should only be called once, on startup. @@ -185,7 +200,10 @@ func (mem *CListMempool) Size() int { // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) SizeBytes() int64 { - return mem.txsBytes.Load() + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + return mem.txsBytes } // Lock() must be help by the caller during execution. @@ -203,14 +221,15 @@ func (mem *CListMempool) Flush() { mem.updateMtx.Lock() defer mem.updateMtx.Unlock() - mem.txsBytes.Store(0) mem.cache.Reset() - mem.removeAllTxs() } func (mem *CListMempool) Contains(txKey types.TxKey) bool { - _, ok := mem.getCElement(txKey) + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + _, ok := mem.txsMap[txKey] return ok } @@ -270,18 +289,12 @@ func (mem *CListMempool) CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, e if added := mem.addToCache(tx); !added { mem.metrics.AlreadyReceivedTxs.Add(1) - if sender != "" { - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if elem, ok := mem.getCElement(tx.Key()); ok { - memTx := elem.Value.(*mempoolTx) - if found := memTx.addSender(sender); found { - // It should not be possible to receive twice a tx from the same sender. - mem.logger.Error("Tx already received from peer", "tx", log.NewLazySprintf("%X", tx.Hash()), "sender", sender) - } - } + // Record a new sender for a tx we've already seen. + // Note it's possible a tx is still in the cache but no longer in the mempool + // (eg. after committing a block, txs are removed from mempool but not cache), + // so we only record the sender for txs still in the mempool. + if err := mem.addSender(tx.Key(), sender); err != nil { + mem.logger.Error("Could not add sender to tx", "tx", tx.Hash(), "sender", sender, "err", err) } // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, @@ -349,15 +362,19 @@ func (mem *CListMempool) handleCheckTxResponse(tx types.Tx, sender p2p.ID) func( // Check that tx is not already in the mempool. This can happen when the // cache overflows. See https://github.com/cometbft/cometbft/pull/890. - if elem, ok := mem.getCElement(tx.Key()); ok { + txKey := tx.Key() + if mem.Contains(txKey) { mem.metrics.RejectedTxs.Add(1) // Update senders on existing entry. - memTx := elem.Value.(*mempoolTx) - if found := memTx.addSender(sender); found { - // It should not be possible to receive twice a tx from the same sender. - mem.logger.Error("Tx already received from peer", "tx", tx.Hash(), "sender", sender) + if err := mem.addSender(txKey, sender); err != nil { + mem.logger.Error("Could not add sender to tx", "tx", tx.Hash(), "sender", sender, "err", err) } - mem.logger.Debug("Reject tx", "tx", log.NewLazySprintf("%X", tx.Hash()), "height", mem.height.Load(), "err", ErrTxInMempool) + mem.logger.Debug( + "transaction already in mempool, not adding it again", + "tx", tx.Hash(), + "height", mem.height.Load(), + "total", mem.Size(), + ) return ErrTxInMempool } @@ -378,17 +395,20 @@ func (mem *CListMempool) handleCheckTxResponse(tx types.Tx, sender p2p.ID) func( } } -// Called from: -// - handleCheckTxResponse (lock not held) if tx is valid +// Called from handleCheckTxResponse only on valid txs. +// updateMtx is held when using the local ABCI client but not with an async client. func (mem *CListMempool) addTx(memTx *mempoolTx, sender p2p.ID) { + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + tx := memTx.tx - txKey := tx.Key() // Add new transaction. _ = memTx.addSender(sender) e := mem.txs.PushBack(memTx) - mem.txsMap.Store(txKey, e) - mem.txsBytes.Add(int64(len(tx))) + mem.txsMap[tx.Key()] = e + mem.txsBytes += int64(len(tx)) + mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.logger.Debug( @@ -401,20 +421,23 @@ func (mem *CListMempool) addTx(memTx *mempoolTx, sender p2p.ID) { // RemoveTxByKey removes a transaction from the mempool by its TxKey index. // Called from: -// - Update (lock held) if tx was committed -// - handleRecheckTxResponse (lock not held) if tx was invalidated +// - Update (updateMtx held) if tx was committed +// - handleRecheckTxResponse (updateMtx not held) if tx was invalidated func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - elem, ok := mem.getCElement(txKey) + mem.txsMtx.Lock() + defer mem.txsMtx.Unlock() + + elem, ok := mem.txsMap[txKey] if !ok { return ErrTxNotFound } mem.txs.Remove(elem) elem.DetachPrev() - mem.txsMap.Delete(txKey) + delete(mem.txsMap, txKey) tx := elem.Value.(*mempoolTx).tx - mem.txsBytes.Add(int64(-len(tx))) - mem.logger.Debug("Removed transaction", "tx", log.NewLazySprintf("%X", tx.Hash()), "height", mem.height.Load(), "total", mem.Size()) + mem.txsBytes -= int64(len(tx)) + mem.logger.Debug("removed transaction", "tx", tx.Hash(), "height", mem.height.Load(), "total", mem.Size()) return nil } @@ -567,7 +590,10 @@ func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { // GetTxByHash returns the types.Tx with the given hash if found in the mempool, otherwise returns nil. func (mem *CListMempool) GetTxByHash(hash []byte) types.Tx { - if elem, ok := mem.getCElement(types.TxKey(hash)); ok { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + if elem, ok := mem.txsMap[types.TxKey(hash)]; ok { return elem.Value.(*mempoolTx).tx } return nil From 37226e3d00e145cabb004c4a0313de8e4df9780c Mon Sep 17 00:00:00 2001 From: hvanz Date: Wed, 25 Sep 2024 13:17:16 +0200 Subject: [PATCH 04/14] squashed feature/mempool-qos into main --- .../2803-mempool-NewCListMempool.md | 2 + .../2803-mempool-replace-iterator.md | 2 + .../2803-proto-mempool-lane.md | 3 + .../2803-remove-TxsFront-TxsWaitChan.md | 2 + .../3634-node-handshake-goapi.md | 2 + .../3980-kvstore-new-application.md | 2 + .../deprecations/3506-mempool-metrics-size.md | 4 + .../v1.0.0/features/2803-mempool-lanes.md | 2 + .../features/2803-proto-mempool-lane.md | 3 + .../3506-mempool-lanes-add-metrics.md | 2 + .../v1.0.0/features/3622-e2e-mempool-lanes.md | 2 + .../features/3622-kvstore-mempool-lanes.md | 2 + .../features/3634-query-app-for-lanes-info.md | 2 + .../v1.0.0/features/3825-e2e-lane-weights.md | 3 + .../features/4005-e2e-lanes-manifest.md | 2 + abci/example/kvstore/kvstore.go | 116 ++- abci/example/kvstore/kvstore_test.go | 28 + api/cometbft/abci/v1/types.pb.go | 661 ++++++++++++------ docs/explanation/core/metrics.md | 2 + internal/consensus/byzantine_test.go | 1 + internal/consensus/common_test.go | 25 +- internal/consensus/mempool_test.go | 22 +- internal/consensus/pbts_test.go | 2 +- internal/consensus/reactor_test.go | 2 + internal/consensus/replay.go | 12 +- internal/consensus/replay_test.go | 24 +- internal/consensus/state_test.go | 1 + mempool/bench_test.go | 4 +- mempool/clist_mempool.go | 472 ++++++++----- mempool/clist_mempool_test.go | 183 +++-- mempool/errors.go | 53 ++ mempool/iterators.go | 230 ++++++ mempool/iterators_test.go | 511 ++++++++++++++ mempool/lanes_info.go | 52 ++ mempool/mempoolTx.go | 4 + mempool/metrics.gen.go | 27 +- mempool/metrics.go | 15 + mempool/reactor.go | 3 +- mempool/reactor_test.go | 43 +- node/node.go | 9 +- node/node_test.go | 2 + node/setup.go | 10 +- proto/cometbft/abci/v1/types.proto | 5 + scripts/qa/reporting/latency_plotter.py | 78 ++- scripts/qa/reporting/latency_throughput.py | 126 +++- test/e2e/app/app.go | 84 ++- test/e2e/generator/generate.go | 3 + test/e2e/node/config.go | 5 + test/e2e/pkg/manifest.go | 13 + test/e2e/pkg/testnet.go | 74 +- test/e2e/runner/load.go | 1 + test/e2e/runner/setup.go | 2 + test/fuzz/mempool/checktx.go | 2 +- test/fuzz/tests/mempool_test.go | 2 +- test/loadtime/cmd/report/main.go | 13 +- test/loadtime/payload/payload.pb.go | 21 +- test/loadtime/payload/payload.proto | 1 + test/loadtime/report/report.go | 9 +- version/version.go | 2 +- 59 files changed, 2425 insertions(+), 565 deletions(-) create mode 100644 .changelog/v1.0.0/breaking-changes/2803-mempool-NewCListMempool.md create mode 100644 .changelog/v1.0.0/breaking-changes/2803-mempool-replace-iterator.md create mode 100644 .changelog/v1.0.0/breaking-changes/2803-proto-mempool-lane.md create mode 100644 .changelog/v1.0.0/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md create mode 100644 .changelog/v1.0.0/breaking-changes/3634-node-handshake-goapi.md create mode 100644 .changelog/v1.0.0/breaking-changes/3980-kvstore-new-application.md create mode 100644 .changelog/v1.0.0/deprecations/3506-mempool-metrics-size.md create mode 100644 .changelog/v1.0.0/features/2803-mempool-lanes.md create mode 100644 .changelog/v1.0.0/features/2803-proto-mempool-lane.md create mode 100644 .changelog/v1.0.0/features/3506-mempool-lanes-add-metrics.md create mode 100644 .changelog/v1.0.0/features/3622-e2e-mempool-lanes.md create mode 100644 .changelog/v1.0.0/features/3622-kvstore-mempool-lanes.md create mode 100644 .changelog/v1.0.0/features/3634-query-app-for-lanes-info.md create mode 100644 .changelog/v1.0.0/features/3825-e2e-lane-weights.md create mode 100644 .changelog/v1.0.0/features/4005-e2e-lanes-manifest.md create mode 100644 mempool/iterators.go create mode 100644 mempool/iterators_test.go create mode 100644 mempool/lanes_info.go diff --git a/.changelog/v1.0.0/breaking-changes/2803-mempool-NewCListMempool.md b/.changelog/v1.0.0/breaking-changes/2803-mempool-NewCListMempool.md new file mode 100644 index 0000000000..88748d666b --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/2803-mempool-NewCListMempool.md @@ -0,0 +1,2 @@ +- `[mempool]` Add new parameter `lanesInfo *LanesInfo` to `NewCListMempool` +([\#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/v1.0.0/breaking-changes/2803-mempool-replace-iterator.md b/.changelog/v1.0.0/breaking-changes/2803-mempool-replace-iterator.md new file mode 100644 index 0000000000..f4191edce6 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/2803-mempool-replace-iterator.md @@ -0,0 +1,2 @@ +- `[mempool]` Removed `CListIterator`; use `BlockingIterator` instead + ([\#2803](https://github.com/cometbft/cometbft/issues/2803)). diff --git a/.changelog/v1.0.0/breaking-changes/2803-proto-mempool-lane.md b/.changelog/v1.0.0/breaking-changes/2803-proto-mempool-lane.md new file mode 100644 index 0000000000..b716225454 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/2803-proto-mempool-lane.md @@ -0,0 +1,3 @@ +- `[types/proto]` Extend `CheckTxResponse` with new `lane_id` field and `InfoResponse` with + `lane_priorities` and `default_lane` fields + ([#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/v1.0.0/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md b/.changelog/v1.0.0/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md new file mode 100644 index 0000000000..68e9a16051 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/2803-remove-TxsFront-TxsWaitChan.md @@ -0,0 +1,2 @@ +- `[mempool]` Remove methods `TxsFront` and `TxsWaitChan` from `CListMempool`. They should be + replaced by the new iterators ([\#2803](https://github.com/cometbft/cometbft/issues/2803)). diff --git a/.changelog/v1.0.0/breaking-changes/3634-node-handshake-goapi.md b/.changelog/v1.0.0/breaking-changes/3634-node-handshake-goapi.md new file mode 100644 index 0000000000..4da9b1938e --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/3634-node-handshake-goapi.md @@ -0,0 +1,2 @@ +- `[consensus/replay]` `Handshake` now takes an additional parameter of type `*abci.InfoResponse` as input + ([#3634](https://github.com/cometbft/cometbft/pull/3634)) diff --git a/.changelog/v1.0.0/breaking-changes/3980-kvstore-new-application.md b/.changelog/v1.0.0/breaking-changes/3980-kvstore-new-application.md new file mode 100644 index 0000000000..8f8c2219e8 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/3980-kvstore-new-application.md @@ -0,0 +1,2 @@ +- `[kvstore]` Function `NewApplication` now has an extra `lanes map[string]uint32` parameter + ([\#3980](https://github.com/cometbft/cometbft/pull/3980)) diff --git a/.changelog/v1.0.0/deprecations/3506-mempool-metrics-size.md b/.changelog/v1.0.0/deprecations/3506-mempool-metrics-size.md new file mode 100644 index 0000000000..33d0a6332b --- /dev/null +++ b/.changelog/v1.0.0/deprecations/3506-mempool-metrics-size.md @@ -0,0 +1,4 @@ +- `[mempool/metrics]` Mark metrics `mempool_size` and `mempool_size_bytes` as + deprecated, as now they can be obtain, respectively, as the sum of + `mempool_lane_size` and `mempool_lane_bytes` + ([\#3506](https://github.com/cometbft/cometbft/issue/3506)). diff --git a/.changelog/v1.0.0/features/2803-mempool-lanes.md b/.changelog/v1.0.0/features/2803-mempool-lanes.md new file mode 100644 index 0000000000..8d079f691a --- /dev/null +++ b/.changelog/v1.0.0/features/2803-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[mempool]` Add Lanes to the mempool for providing Quality of Service guarantees +([#2803](https://github.com/cometbft/cometbft/issues/2803)) \ No newline at end of file diff --git a/.changelog/v1.0.0/features/2803-proto-mempool-lane.md b/.changelog/v1.0.0/features/2803-proto-mempool-lane.md new file mode 100644 index 0000000000..b716225454 --- /dev/null +++ b/.changelog/v1.0.0/features/2803-proto-mempool-lane.md @@ -0,0 +1,3 @@ +- `[types/proto]` Extend `CheckTxResponse` with new `lane_id` field and `InfoResponse` with + `lane_priorities` and `default_lane` fields + ([#2803](https://github.com/cometbft/cometbft/issues/2803)) diff --git a/.changelog/v1.0.0/features/3506-mempool-lanes-add-metrics.md b/.changelog/v1.0.0/features/3506-mempool-lanes-add-metrics.md new file mode 100644 index 0000000000..9da33294d6 --- /dev/null +++ b/.changelog/v1.0.0/features/3506-mempool-lanes-add-metrics.md @@ -0,0 +1,2 @@ +- `[metrics]` Add new mempool metrics `lane_size`, `lane_bytes`, and `tx_life_span` + ([#3506](https://github.com/cometbft/cometbft/issue/3506)). diff --git a/.changelog/v1.0.0/features/3622-e2e-mempool-lanes.md b/.changelog/v1.0.0/features/3622-e2e-mempool-lanes.md new file mode 100644 index 0000000000..7d3a01c5b7 --- /dev/null +++ b/.changelog/v1.0.0/features/3622-e2e-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[e2e]` Added support for mempool lanes in e2e. + ([#3622](https://github.com/cometbft/cometbft/pull/3622)) diff --git a/.changelog/v1.0.0/features/3622-kvstore-mempool-lanes.md b/.changelog/v1.0.0/features/3622-kvstore-mempool-lanes.md new file mode 100644 index 0000000000..6d71cc2c39 --- /dev/null +++ b/.changelog/v1.0.0/features/3622-kvstore-mempool-lanes.md @@ -0,0 +1,2 @@ +- `[kvstore]` Extended `CheckTx` in kvstoreApp to support mempool lanes. + ([#3622](https://github.com/cometbft/cometbft/pull/3622)) diff --git a/.changelog/v1.0.0/features/3634-query-app-for-lanes-info.md b/.changelog/v1.0.0/features/3634-query-app-for-lanes-info.md new file mode 100644 index 0000000000..ee89cb520e --- /dev/null +++ b/.changelog/v1.0.0/features/3634-query-app-for-lanes-info.md @@ -0,0 +1,2 @@ +- `[node]` Move the ABCI `Info` call from the `Handshake` function to the `NewNodeWithCliParams` function. + ([#3634](https://github.com/cometbft/cometbft/pull/3634)) diff --git a/.changelog/v1.0.0/features/3825-e2e-lane-weights.md b/.changelog/v1.0.0/features/3825-e2e-lane-weights.md new file mode 100644 index 0000000000..5bcd2da4ff --- /dev/null +++ b/.changelog/v1.0.0/features/3825-e2e-lane-weights.md @@ -0,0 +1,3 @@ +- `[e2e]` Add `load_lane_weights` option to manifest for generating transactions with + lanes picked randomly and proportional to their weight. + ([\#3825](https://github.com/cometbft/cometbft/pull/3825)). \ No newline at end of file diff --git a/.changelog/v1.0.0/features/4005-e2e-lanes-manifest.md b/.changelog/v1.0.0/features/4005-e2e-lanes-manifest.md new file mode 100644 index 0000000000..f085df68b7 --- /dev/null +++ b/.changelog/v1.0.0/features/4005-e2e-lanes-manifest.md @@ -0,0 +1,2 @@ +- `[e2e]` Add `lanes` and `no_lanes` to manifest to customize the list of lanes the app should use +([#4005](https://github.com/cometbft/cometbft/issues/4005)) \ No newline at end of file diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index ac2016f702..e79eb59f32 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/binary" "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -26,6 +27,7 @@ var ( const ( ValidatorPrefix = "val=" AppVersion uint64 = 1 + defaultLane string = "default" ) var _ types.Application = (*Application)(nil) @@ -48,31 +50,71 @@ type Application struct { // If true, the app will generate block events in BeginBlock. Used to test the event indexer // Should be false by default to avoid generating too much data. genBlockEvents bool + + lanes map[string]uint32 + lanePriorities []uint32 } -// NewApplication creates an instance of the kvstore from the provided database. -func NewApplication(db dbm.DB) *Application { +// NewApplication creates an instance of the kvstore from the provided database, +// with the given lanes and priorities. +func NewApplication(db dbm.DB, lanes map[string]uint32) *Application { + lanePriorities := make([]uint32, 0, len(lanes)) + for _, p := range lanes { + lanePriorities = append(lanePriorities, p) + } + return &Application{ logger: log.NewNopLogger(), state: loadState(db), valAddrToPubKeyMap: make(map[string]crypto.PubKey), + lanes: lanes, + lanePriorities: lanePriorities, } } -// NewPersistentApplication creates a new application using the goleveldb database engine. -func NewPersistentApplication(dbDir string) *Application { +// newDB creates a DB engine for persisting the application state. +func newDB(dbDir string) *dbm.PebbleDB { name := "kvstore" db, err := dbm.NewPebbleDB(name, dbDir) if err != nil { panic(fmt.Errorf("failed to create persistent app at %s: %w", dbDir, err)) } - return NewApplication(db) + return db } -// NewInMemoryApplication creates a new application from an in memory database. -// Nothing will be persisted. +// NewPersistentApplication creates a new application using the pebbledb +// database engine and default lanes. +func NewPersistentApplication(dbDir string) *Application { + return NewApplication(newDB(dbDir), DefaultLanes()) +} + +// NewPersistentApplicationWithoutLanes creates a new application using the +// pebbledb database engine and without lanes. +func NewPersistentApplicationWithoutLanes(dbDir string) *Application { + return NewApplication(newDB(dbDir), nil) +} + +// NewInMemoryApplication creates a new application from an in memory database +// that uses default lanes. Nothing will be persisted. func NewInMemoryApplication() *Application { - return NewApplication(dbm.NewMemDB()) + return NewApplication(dbm.NewMemDB(), DefaultLanes()) +} + +// NewInMemoryApplication creates a new application from an in memory database +// and without lanes. Nothing will be persisted. +func NewInMemoryApplicationWithoutLanes() *Application { + return NewApplication(dbm.NewMemDB(), nil) +} + +// DefaultLanes returns a map from lane names to their priorities. Priority 0 is +// reserved. The higher the value, the higher the priority. +func DefaultLanes() map[string]uint32 { + return map[string]uint32{ + "val": 9, // for validator updates + "foo": 7, + defaultLane: 3, + "bar": 1, + } } func (app *Application) SetGenBlockEvents() { @@ -96,12 +138,18 @@ func (app *Application) Info(context.Context, *types.InfoRequest) (*types.InfoRe } } + defLane := "" + if len(app.lanes) != 0 { + defLane = defaultLane + } return &types.InfoResponse{ Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), Version: version.ABCIVersion, AppVersion: AppVersion, LastBlockHeight: app.state.Height, LastBlockAppHash: app.state.Hash(), + LanePriorities: app.lanes, + DefaultLane: defLane, }, nil } @@ -126,18 +174,62 @@ func (app *Application) InitChain(_ context.Context, req *types.InitChainRequest // - Contains one and only one `=` // - `=` is not the first or last byte. // - if key is `val` that the validator update transaction is also valid. -func (*Application) CheckTx(_ context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { +func (app *Application) CheckTx(_ context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { // If it is a validator update transaction, check that it is correctly formatted if isValidatorTx(req.Tx) { if _, _, _, err := parseValidatorTx(req.Tx); err != nil { - //nolint:nilerr - return &types.CheckTxResponse{Code: CodeTypeInvalidTxFormat}, nil + return &types.CheckTxResponse{Code: CodeTypeInvalidTxFormat}, nil //nolint:nilerr // error is not nil but it returns nil } } else if !isValidTx(req.Tx) { return &types.CheckTxResponse{Code: CodeTypeInvalidTxFormat}, nil } - return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1}, nil + if len(app.lanes) == 0 { + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1}, nil + } + lane := assignLane(req.Tx) + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1, LaneId: lane}, nil +} + +// assignLane deterministically computes a lane for the given tx. +func assignLane(tx []byte) string { + lane := defaultLane + if isValidatorTx(tx) { + return "val" // priority 9 + } + key, _, err := parseTx(tx) + if err != nil { + return lane + } + + // If the transaction key is an integer (for example, a transaction of the + // form 2=2), we will assign a lane. Any other type of transaction will go + // to the default lane. + keyInt, err := strconv.Atoi(key) + if err != nil { + return lane + } + + switch { + case keyInt%11 == 0: + return "foo" // priority 7 + case keyInt%3 == 0: + return "bar" // priority 1 + default: + return lane // priority 3 + } +} + +// parseTx parses a tx in 'key=value' format into a key and value. +func parseTx(tx []byte) (key, value string, err error) { + parts := bytes.Split(tx, []byte("=")) + if len(parts) != 2 { + return "", "", fmt.Errorf("invalid tx format: %q", string(tx)) + } + if len(parts[0]) == 0 { + return "", "", errors.New("key cannot be empty") + } + return string(parts[0]), string(parts[1]), nil } // Tx must have a format like key:value or key=value. That is: diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a2da920174..2521564e4d 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -238,6 +238,34 @@ func TestCheckTx(t *testing.T) { } } +func TestClientAssignLane(t *testing.T) { + val := RandVal() + + testCases := []struct { + lane string + tx []byte + }{ + {"foo", NewTx("0", "0")}, + {defaultLane, NewTx("1", "1")}, + {defaultLane, NewTx("2", "2")}, + {"bar", NewTx("3", "3")}, + {defaultLane, NewTx("4", "4")}, + {defaultLane, NewTx("5", "5")}, + {"bar", NewTx("6", "6")}, + {defaultLane, NewTx("7", "7")}, + {defaultLane, NewTx("8", "8")}, + {"bar", NewTx("9", "9")}, + {defaultLane, NewTx("10", "10")}, + {"foo", NewTx("11", "11")}, + {"bar", NewTx("12", "12")}, + {"val", MakeValSetChangeTx(val)}, + } + + for idx, tc := range testCases { + require.Equal(t, tc.lane, assignLane(tc.tx), idx) + } +} + func TestClientServer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/api/cometbft/abci/v1/types.pb.go b/api/cometbft/abci/v1/types.pb.go index aaf8a85044..2f11e24e7d 100644 --- a/api/cometbft/abci/v1/types.pb.go +++ b/api/cometbft/abci/v1/types.pb.go @@ -2041,11 +2041,13 @@ var xxx_messageInfo_FlushResponse proto.InternalMessageInfo // InfoResponse contains the ABCI application version information. type InfoResponse struct { - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` + LanePriorities map[string]uint32 `protobuf:"bytes,6,rep,name=lane_priorities,json=lanePriorities,proto3" json:"lane_priorities,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + DefaultLane string `protobuf:"bytes,7,opt,name=default_lane,json=defaultLane,proto3" json:"default_lane,omitempty"` } func (m *InfoResponse) Reset() { *m = InfoResponse{} } @@ -2116,6 +2118,20 @@ func (m *InfoResponse) GetLastBlockAppHash() []byte { return nil } +func (m *InfoResponse) GetLanePriorities() map[string]uint32 { + if m != nil { + return m.LanePriorities + } + return nil +} + +func (m *InfoResponse) GetDefaultLane() string { + if m != nil { + return m.DefaultLane + } + return "" +} + // InitChainResponse contains the ABCI application's hash and updates to the // validator set and/or the consensus params, if any. type InitChainResponse struct { @@ -2299,6 +2315,7 @@ type CheckTxResponse struct { GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + LaneId string `protobuf:"bytes,12,opt,name=lane_id,json=laneId,proto3" json:"lane_id,omitempty"` } func (m *CheckTxResponse) Reset() { *m = CheckTxResponse{} } @@ -2390,6 +2407,13 @@ func (m *CheckTxResponse) GetCodespace() string { return "" } +func (m *CheckTxResponse) GetLaneId() string { + if m != nil { + return m.LaneId + } + return "" +} + // CommitResponse indicates how much blocks should CometBFT retain. type CommitResponse struct { RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` @@ -3734,6 +3758,7 @@ func init() { proto.RegisterType((*EchoResponse)(nil), "cometbft.abci.v1.EchoResponse") proto.RegisterType((*FlushResponse)(nil), "cometbft.abci.v1.FlushResponse") proto.RegisterType((*InfoResponse)(nil), "cometbft.abci.v1.InfoResponse") + proto.RegisterMapType((map[string]uint32)(nil), "cometbft.abci.v1.InfoResponse.LanePrioritiesEntry") proto.RegisterType((*InitChainResponse)(nil), "cometbft.abci.v1.InitChainResponse") proto.RegisterType((*QueryResponse)(nil), "cometbft.abci.v1.QueryResponse") proto.RegisterType((*CheckTxResponse)(nil), "cometbft.abci.v1.CheckTxResponse") @@ -3764,205 +3789,211 @@ func init() { func init() { proto.RegisterFile("cometbft/abci/v1/types.proto", fileDescriptor_95dd8f7b670b96e3) } var fileDescriptor_95dd8f7b670b96e3 = []byte{ - // 3168 bytes of a gzipped FileDescriptorProto + // 3254 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7, - 0xd9, 0xf7, 0xf2, 0x43, 0x22, 0x1f, 0x92, 0xd2, 0x6a, 0x24, 0xd9, 0xb4, 0xe2, 0x48, 0xf2, 0x3a, - 0x8e, 0x1d, 0x3b, 0x91, 0x5e, 0x3b, 0x79, 0xf3, 0xf1, 0xe6, 0x4d, 0x02, 0x8a, 0xa6, 0x22, 0xc9, - 0xb2, 0xc8, 0x2c, 0x69, 0x35, 0x36, 0xda, 0x6e, 0x96, 0xe4, 0x90, 0xdc, 0x98, 0xe4, 0x6e, 0x76, - 0x87, 0x0a, 0xd5, 0x9e, 0x5a, 0x34, 0x45, 0x91, 0x53, 0x2e, 0xbd, 0x14, 0x2d, 0x50, 0xa0, 0xe8, - 0xa9, 0x40, 0x0f, 0x3d, 0xf5, 0x2f, 0x28, 0x72, 0x6a, 0x73, 0xec, 0x29, 0x2d, 0x92, 0x5b, 0x0f, - 0xbd, 0x05, 0xe8, 0xb1, 0x98, 0x8f, 0xfd, 0x22, 0x77, 0x25, 0xd9, 0x49, 0x0f, 0x45, 0x7b, 0xe3, - 0xcc, 0xfc, 0x9e, 0x67, 0x66, 0x9e, 0x99, 0x79, 0x3e, 0x7e, 0x4b, 0xb8, 0xd4, 0x32, 0x07, 0x98, - 0x34, 0x3b, 0x64, 0x53, 0x6f, 0xb6, 0x8c, 0xcd, 0xa3, 0x5b, 0x9b, 0xe4, 0xd8, 0xc2, 0xce, 0x86, - 0x65, 0x9b, 0xc4, 0x44, 0xb2, 0x3b, 0xba, 0x41, 0x47, 0x37, 0x8e, 0x6e, 0xad, 0xac, 0x7a, 0xf8, - 0x96, 0x7d, 0x6c, 0x11, 0x93, 0x4a, 0x58, 0xb6, 0x69, 0x76, 0xb8, 0x44, 0x60, 0x9c, 0xe9, 0x61, - 0xc3, 0xba, 0xad, 0x0f, 0x84, 0xc6, 0x95, 0xcb, 0xd3, 0xe3, 0x47, 0x7a, 0xdf, 0x68, 0xeb, 0xc4, - 0xb4, 0x05, 0x64, 0xa9, 0x6b, 0x76, 0x4d, 0xf6, 0x73, 0x93, 0xfe, 0x12, 0xbd, 0x6b, 0x5d, 0xd3, - 0xec, 0xf6, 0xf1, 0x26, 0x6b, 0x35, 0x47, 0x9d, 0x4d, 0x62, 0x0c, 0xb0, 0x43, 0xf4, 0x81, 0xc5, - 0x01, 0xca, 0x9f, 0xb2, 0x30, 0xab, 0xe2, 0x0f, 0x46, 0xd8, 0x21, 0xe8, 0x45, 0x48, 0xe1, 0x56, - 0xcf, 0x2c, 0x4a, 0xeb, 0xd2, 0xf5, 0xdc, 0xed, 0xa7, 0x37, 0x26, 0xb7, 0xb1, 0x51, 0x69, 0xf5, - 0x4c, 0x01, 0xde, 0x39, 0xa7, 0x32, 0x30, 0x7a, 0x19, 0xd2, 0x9d, 0xfe, 0xc8, 0xe9, 0x15, 0x13, - 0x4c, 0x6a, 0x75, 0x5a, 0x6a, 0x9b, 0x0e, 0xfb, 0x62, 0x1c, 0x4e, 0x27, 0x33, 0x86, 0x1d, 0xb3, - 0x98, 0x8c, 0x9b, 0x6c, 0x77, 0xd8, 0x09, 0x4e, 0x46, 0xc1, 0xa8, 0x0c, 0x60, 0x0c, 0x0d, 0xa2, - 0xb5, 0x7a, 0xba, 0x31, 0x2c, 0xa6, 0x99, 0xa8, 0x12, 0x25, 0x6a, 0x90, 0x32, 0x85, 0xf8, 0xf2, - 0x59, 0xc3, 0xed, 0xa3, 0x2b, 0xfe, 0x60, 0x84, 0xed, 0xe3, 0xe2, 0x4c, 0xdc, 0x8a, 0xdf, 0xa1, - 0xc3, 0x81, 0x15, 0x33, 0x38, 0x7a, 0x03, 0x32, 0xad, 0x1e, 0x6e, 0x3d, 0xd2, 0xc8, 0xb8, 0x98, - 0x61, 0xa2, 0xeb, 0xd3, 0xa2, 0x65, 0x8a, 0x68, 0x8c, 0x7d, 0xe1, 0xd9, 0x16, 0xef, 0x41, 0xaf, - 0xc1, 0x4c, 0xcb, 0x1c, 0x0c, 0x0c, 0x52, 0xcc, 0x31, 0xe1, 0xb5, 0x08, 0x61, 0x36, 0xee, 0xcb, - 0x0a, 0x01, 0x54, 0x85, 0xb9, 0xbe, 0xe1, 0x10, 0xcd, 0x19, 0xea, 0x96, 0xd3, 0x33, 0x89, 0x53, - 0xcc, 0x33, 0x15, 0xcf, 0x4e, 0xab, 0xd8, 0x37, 0x1c, 0x52, 0x77, 0x61, 0xbe, 0xa6, 0x42, 0x3f, - 0xd8, 0x4f, 0x15, 0x9a, 0x9d, 0x0e, 0xb6, 0x3d, 0x8d, 0xc5, 0x42, 0x9c, 0xc2, 0x2a, 0xc5, 0xb9, - 0x92, 0x01, 0x85, 0x66, 0xb0, 0x1f, 0x7d, 0x1b, 0x16, 0xfb, 0xa6, 0xde, 0xf6, 0xf4, 0x69, 0xad, - 0xde, 0x68, 0xf8, 0xa8, 0x38, 0xc7, 0xb4, 0xde, 0x88, 0x58, 0xa6, 0xa9, 0xb7, 0x5d, 0xe1, 0x32, - 0x85, 0xfa, 0x9a, 0x17, 0xfa, 0x93, 0x63, 0x48, 0x83, 0x25, 0xdd, 0xb2, 0xfa, 0xc7, 0x93, 0xea, - 0xe7, 0x99, 0xfa, 0x9b, 0xd3, 0xea, 0x4b, 0x14, 0x1d, 0xa3, 0x1f, 0xe9, 0x53, 0x83, 0xe8, 0x3e, - 0xc8, 0x96, 0x8d, 0x2d, 0xdd, 0xc6, 0x9a, 0x65, 0x9b, 0x96, 0xe9, 0xe8, 0xfd, 0xa2, 0xcc, 0x94, - 0x5f, 0x9f, 0x56, 0x5e, 0xe3, 0xc8, 0x9a, 0x00, 0xfa, 0x9a, 0xe7, 0xad, 0xf0, 0x08, 0x57, 0x6b, - 0xb6, 0xb0, 0xe3, 0xf8, 0x6a, 0x17, 0xe2, 0xd5, 0x32, 0x64, 0xa4, 0xda, 0xd0, 0x08, 0xda, 0x86, - 0x1c, 0x1e, 0x13, 0x3c, 0x6c, 0x6b, 0x47, 0x26, 0xc1, 0x45, 0xc4, 0x34, 0x5e, 0x89, 0x78, 0xae, - 0x0c, 0x74, 0x68, 0x12, 0xec, 0x2b, 0x03, 0xec, 0x75, 0xa2, 0x26, 0x2c, 0x1f, 0x61, 0xdb, 0xe8, - 0x1c, 0x33, 0x3d, 0x1a, 0x1b, 0x71, 0x0c, 0x73, 0x58, 0x5c, 0x64, 0x1a, 0x9f, 0x9f, 0xd6, 0x78, - 0xc8, 0xe0, 0x54, 0xb8, 0xe2, 0x82, 0x7d, 0xd5, 0x8b, 0x47, 0xd3, 0xa3, 0xf4, 0xa6, 0x75, 0x8c, - 0xa1, 0xde, 0x37, 0xbe, 0x87, 0xb5, 0x66, 0xdf, 0x6c, 0x3d, 0x2a, 0x2e, 0xc5, 0xdd, 0xb4, 0x6d, - 0x81, 0xdb, 0xa2, 0xb0, 0xc0, 0x4d, 0xeb, 0x04, 0xfb, 0xb7, 0x66, 0x21, 0x7d, 0xa4, 0xf7, 0x47, - 0x78, 0x2f, 0x95, 0x49, 0xc9, 0xe9, 0xbd, 0x54, 0x66, 0x56, 0xce, 0xec, 0xa5, 0x32, 0x59, 0x19, - 0xf6, 0x52, 0x19, 0x90, 0x73, 0xca, 0x35, 0xc8, 0x05, 0xfc, 0x14, 0x2a, 0xc2, 0xec, 0x00, 0x3b, - 0x8e, 0xde, 0xc5, 0xcc, 0xaf, 0x65, 0x55, 0xb7, 0xa9, 0xcc, 0x41, 0x3e, 0xe8, 0x9a, 0x94, 0x4f, - 0x24, 0xc8, 0x05, 0x9c, 0x0e, 0x95, 0x3c, 0xc2, 0x36, 0x33, 0x88, 0x90, 0x14, 0x4d, 0x74, 0x05, - 0x0a, 0x6c, 0x2f, 0x9a, 0x3b, 0x4e, 0x7d, 0x5f, 0x4a, 0xcd, 0xb3, 0xce, 0x43, 0x01, 0x5a, 0x83, - 0x9c, 0x75, 0xdb, 0xf2, 0x20, 0x49, 0x06, 0x01, 0xeb, 0xb6, 0xe5, 0x02, 0x2e, 0x43, 0x9e, 0x6e, - 0xdd, 0x43, 0xa4, 0xd8, 0x24, 0x39, 0xda, 0x27, 0x20, 0xca, 0x1f, 0x13, 0x20, 0x4f, 0x3a, 0x33, - 0xf4, 0x2a, 0xa4, 0xa8, 0x17, 0x17, 0x6e, 0x7a, 0x65, 0x83, 0xbb, 0xf8, 0x0d, 0xd7, 0xc5, 0x6f, - 0x34, 0x5c, 0x17, 0xbf, 0x95, 0xf9, 0xf4, 0xf3, 0xb5, 0x73, 0x9f, 0xfc, 0x65, 0x4d, 0x52, 0x99, - 0x04, 0xba, 0x48, 0x3d, 0x98, 0x6e, 0x0c, 0x35, 0xa3, 0xcd, 0x96, 0x9c, 0xa5, 0xde, 0x49, 0x37, - 0x86, 0xbb, 0x6d, 0x74, 0x0f, 0xe4, 0x96, 0x39, 0x74, 0xf0, 0xd0, 0x19, 0x39, 0x1a, 0x8f, 0x3d, - 0xc2, 0x35, 0x07, 0xfc, 0x2b, 0x0f, 0x72, 0xcc, 0x51, 0x09, 0x68, 0x8d, 0x21, 0xd5, 0xf9, 0x56, - 0xb8, 0x03, 0xbd, 0x0d, 0xe0, 0x05, 0x28, 0xa7, 0x98, 0x5a, 0x4f, 0x5e, 0xcf, 0xdd, 0xbe, 0x1c, - 0x71, 0x9f, 0x5c, 0xcc, 0x7d, 0xab, 0xad, 0x13, 0xbc, 0x95, 0xa2, 0x0b, 0x56, 0x03, 0xa2, 0xe8, - 0x59, 0x98, 0xd7, 0x2d, 0x4b, 0x73, 0x88, 0x4e, 0xb0, 0xd6, 0x3c, 0x26, 0xd8, 0x61, 0x6e, 0x3f, - 0xaf, 0x16, 0x74, 0xcb, 0xaa, 0xd3, 0xde, 0x2d, 0xda, 0x89, 0xae, 0xc2, 0x1c, 0xf5, 0xf0, 0x86, - 0xde, 0xd7, 0x7a, 0xd8, 0xe8, 0xf6, 0x08, 0xf3, 0xee, 0x49, 0xb5, 0x20, 0x7a, 0x77, 0x58, 0xa7, - 0xd2, 0x86, 0x7c, 0xd0, 0xb9, 0x23, 0x04, 0xa9, 0xb6, 0x4e, 0x74, 0x66, 0xcb, 0xbc, 0xca, 0x7e, - 0xd3, 0x3e, 0x4b, 0x27, 0x3d, 0x61, 0x21, 0xf6, 0x1b, 0x9d, 0x87, 0x19, 0xa1, 0x36, 0xc9, 0xd4, - 0x8a, 0x16, 0x5a, 0x82, 0xb4, 0x65, 0x9b, 0x47, 0x98, 0x1d, 0x5e, 0x46, 0xe5, 0x0d, 0xe5, 0x01, - 0xcc, 0x85, 0xe3, 0x00, 0x9a, 0x83, 0x04, 0x19, 0x8b, 0x59, 0x12, 0x64, 0x8c, 0x6e, 0x41, 0x8a, - 0x1a, 0x93, 0x69, 0x9b, 0x8b, 0x8a, 0x7e, 0x42, 0xbe, 0x71, 0x6c, 0x61, 0x95, 0x41, 0xf7, 0x52, - 0x99, 0x84, 0x9c, 0x54, 0xe6, 0xa1, 0x10, 0x8a, 0x12, 0xca, 0x79, 0x58, 0x8a, 0xf2, 0xf9, 0x8a, - 0x01, 0x4b, 0x51, 0xae, 0x1b, 0xbd, 0x0c, 0x19, 0xcf, 0xe9, 0xbb, 0x37, 0x68, 0x6a, 0x76, 0x4f, - 0xc8, 0xc3, 0xd2, 0xbb, 0x43, 0x0f, 0xa2, 0xa7, 0x8b, 0x50, 0x9f, 0x57, 0x67, 0x75, 0xcb, 0xda, - 0xd1, 0x9d, 0x9e, 0xf2, 0x1e, 0x14, 0xe3, 0xfc, 0x79, 0xc0, 0x70, 0x12, 0x7b, 0x00, 0xae, 0xe1, - 0xce, 0xc3, 0x4c, 0xc7, 0xb4, 0x07, 0x3a, 0x61, 0xca, 0x0a, 0xaa, 0x68, 0x51, 0x83, 0x72, 0xdf, - 0x9e, 0x64, 0xdd, 0xbc, 0xa1, 0x68, 0x70, 0x31, 0xd6, 0xa5, 0x53, 0x11, 0x63, 0xd8, 0xc6, 0xdc, - 0xbc, 0x05, 0x95, 0x37, 0x7c, 0x45, 0x7c, 0xb1, 0xbc, 0x41, 0xa7, 0x75, 0xf0, 0xb0, 0x8d, 0x6d, - 0xa6, 0x3f, 0xab, 0x8a, 0x96, 0xf2, 0xb3, 0x24, 0x9c, 0x8f, 0xf6, 0xeb, 0x68, 0x1d, 0xf2, 0x03, - 0x7d, 0xac, 0x91, 0xb1, 0xb8, 0x7e, 0x12, 0xbb, 0x00, 0x30, 0xd0, 0xc7, 0x8d, 0x31, 0xbf, 0x7b, - 0x32, 0x24, 0xc9, 0xd8, 0x29, 0x26, 0xd6, 0x93, 0xd7, 0xf3, 0x2a, 0xfd, 0x89, 0x0e, 0x61, 0xa1, - 0x6f, 0xb6, 0xf4, 0xbe, 0xd6, 0xd7, 0x1d, 0xa2, 0x89, 0xb0, 0xcf, 0x9f, 0xd3, 0x33, 0x71, 0x7e, - 0x1a, 0xb7, 0xf9, 0xc1, 0x52, 0x17, 0x24, 0x1e, 0xc2, 0x3c, 0x53, 0xb2, 0xaf, 0x3b, 0x84, 0x0f, - 0xa1, 0x0a, 0xe4, 0x06, 0x86, 0xd3, 0xc4, 0x3d, 0xfd, 0xc8, 0x30, 0x6d, 0xf1, 0xae, 0x22, 0x6e, - 0xcf, 0x3d, 0x1f, 0x24, 0x54, 0x05, 0xe5, 0x02, 0x87, 0x92, 0x0e, 0xdd, 0x66, 0xd7, 0xb3, 0xcc, - 0x3c, 0xb6, 0x67, 0xf9, 0x1f, 0x58, 0x1a, 0xe2, 0x31, 0xd1, 0xfc, 0x97, 0xcb, 0x6f, 0xca, 0x2c, - 0x33, 0x3e, 0xa2, 0x63, 0xde, 0x5b, 0x77, 0xe8, 0xa5, 0x41, 0xcf, 0xb1, 0xd8, 0x68, 0x99, 0x0e, - 0xb6, 0x35, 0xbd, 0xdd, 0xb6, 0xb1, 0xe3, 0xb0, 0xac, 0x2a, 0xcf, 0xe2, 0x1d, 0xeb, 0x2f, 0xf1, - 0x6e, 0xe5, 0x63, 0x76, 0x38, 0x51, 0xd1, 0xd1, 0x35, 0xbd, 0xe4, 0x9b, 0xbe, 0x01, 0x4b, 0x42, - 0xbe, 0x1d, 0xb2, 0x3e, 0x4f, 0x4f, 0x2f, 0xc5, 0x25, 0x5d, 0x01, 0xab, 0x23, 0x57, 0x3e, 0xde, - 0xf0, 0xc9, 0x27, 0x34, 0x3c, 0x82, 0x14, 0x33, 0x4b, 0x8a, 0xbb, 0x1b, 0xfa, 0xfb, 0xdf, 0xed, - 0x30, 0x3e, 0x4a, 0xc2, 0xc2, 0x54, 0x62, 0xe1, 0x6d, 0x4c, 0x8a, 0xdc, 0x58, 0x22, 0x72, 0x63, - 0xc9, 0xc7, 0xde, 0x98, 0x38, 0xed, 0xd4, 0xe9, 0xa7, 0x9d, 0xfe, 0x26, 0x4f, 0x7b, 0xe6, 0x09, - 0x4f, 0xfb, 0x5f, 0x7a, 0x0e, 0x3f, 0x97, 0x60, 0x25, 0x3e, 0x1d, 0x8b, 0x3c, 0x90, 0x9b, 0xb0, - 0xe0, 0x2d, 0xc5, 0x53, 0xcf, 0xdd, 0xa3, 0xec, 0x0d, 0x08, 0xfd, 0xb1, 0x11, 0xef, 0x2a, 0xcc, - 0x4d, 0x64, 0x8b, 0xfc, 0x32, 0x17, 0x8e, 0x82, 0xcb, 0x50, 0x7e, 0x97, 0x84, 0xa5, 0xa8, 0x84, - 0x2e, 0xe2, 0xc5, 0xaa, 0xb0, 0xd8, 0xc6, 0x2d, 0xa3, 0xfd, 0xc4, 0x0f, 0x76, 0x41, 0x88, 0xff, - 0xf7, 0xbd, 0x4e, 0xdf, 0x13, 0x74, 0x03, 0x16, 0x9c, 0xe3, 0x61, 0xcb, 0x18, 0x76, 0x35, 0x62, - 0xba, 0xb9, 0x51, 0x96, 0xad, 0x7c, 0x5e, 0x0c, 0x34, 0x4c, 0x91, 0x1d, 0xfd, 0x1a, 0x20, 0xa3, - 0x62, 0xc7, 0xa2, 0xc9, 0x1c, 0x2a, 0x43, 0x16, 0x8f, 0x5b, 0xd8, 0x22, 0x6e, 0x02, 0x1c, 0x53, - 0x63, 0x08, 0x88, 0x2b, 0x47, 0x6b, 0x6d, 0x4f, 0x0e, 0xbd, 0x24, 0x28, 0x85, 0x58, 0x72, 0x80, - 0xa7, 0xea, 0x9e, 0x28, 0xe7, 0x14, 0x5e, 0x71, 0x39, 0x85, 0x64, 0x5c, 0xa5, 0x2c, 0x12, 0x77, - 0x4f, 0x4e, 0x90, 0x0a, 0x2f, 0x09, 0x52, 0x21, 0x15, 0x37, 0x1d, 0xcf, 0xef, 0xfd, 0xe9, 0x18, - 0xab, 0x70, 0x27, 0xc4, 0x2a, 0xcc, 0xc4, 0x6d, 0x35, 0x90, 0x88, 0xfb, 0x5b, 0xf5, 0x69, 0x85, - 0x57, 0x5c, 0x5a, 0x61, 0x36, 0x6e, 0xd1, 0x22, 0xf3, 0xf4, 0x17, 0xcd, 0x79, 0x85, 0x37, 0x03, - 0xbc, 0x42, 0x96, 0xc9, 0x5e, 0x3e, 0x81, 0x57, 0xf0, 0xa4, 0x3d, 0x62, 0xe1, 0xff, 0x3c, 0x62, - 0x21, 0x1f, 0xcb, 0x4a, 0x88, 0x94, 0xd1, 0x13, 0x76, 0x99, 0x85, 0xda, 0x14, 0xb3, 0xc0, 0x89, - 0x80, 0x6b, 0xa7, 0x32, 0x0b, 0x9e, 0xaa, 0x09, 0x6a, 0xa1, 0x36, 0x45, 0x2d, 0xcc, 0xc5, 0x69, - 0x9c, 0xc8, 0x4f, 0x7d, 0x8d, 0x61, 0x6e, 0xe1, 0x3b, 0xd1, 0xdc, 0x42, 0x6c, 0xf1, 0x1f, 0x91, - 0x8b, 0x7a, 0xaa, 0x23, 0xc8, 0x85, 0xf7, 0x62, 0xc8, 0x05, 0x39, 0xae, 0x08, 0x8e, 0xca, 0x44, - 0xbd, 0x09, 0xa2, 0xd8, 0x85, 0xc3, 0x08, 0x76, 0x81, 0xd3, 0x00, 0xcf, 0x9d, 0x81, 0x5d, 0xf0, - 0x54, 0x4f, 0xd1, 0x0b, 0x87, 0x11, 0xf4, 0x02, 0x8a, 0xd7, 0x3b, 0x91, 0x40, 0x05, 0xf5, 0x86, - 0xf9, 0x85, 0xb7, 0xc3, 0xfc, 0xc2, 0xe2, 0xc9, 0x79, 0x2b, 0x4f, 0x03, 0x3c, 0x6d, 0x41, 0x82, - 0xa1, 0x15, 0x47, 0x30, 0x70, 0x0e, 0xe0, 0x85, 0x33, 0x12, 0x0c, 0x9e, 0xee, 0x48, 0x86, 0xa1, - 0x36, 0xc5, 0x30, 0x2c, 0xc7, 0x5d, 0xb8, 0x89, 0x80, 0xe4, 0x5f, 0xb8, 0x58, 0x8a, 0x21, 0x2d, - 0xcf, 0xec, 0xa5, 0x32, 0x19, 0x39, 0xcb, 0xc9, 0x85, 0xbd, 0x54, 0x26, 0x27, 0xe7, 0x95, 0xe7, - 0x68, 0x0a, 0x34, 0xe1, 0xf7, 0x68, 0xc1, 0x81, 0x6d, 0xdb, 0xb4, 0x05, 0x59, 0xc0, 0x1b, 0xca, - 0x75, 0xc8, 0x07, 0x5d, 0xdc, 0x09, 0x74, 0xc4, 0x3c, 0x14, 0x42, 0x5e, 0x4d, 0xf9, 0xbd, 0x04, - 0xf9, 0xa0, 0xbf, 0x0a, 0x15, 0xab, 0x59, 0x51, 0xac, 0x06, 0x48, 0x8a, 0x44, 0x98, 0xa4, 0x58, - 0x83, 0x1c, 0x2d, 0xd8, 0x26, 0xf8, 0x07, 0xdd, 0xf2, 0xf8, 0x87, 0x1b, 0xb0, 0xc0, 0xe2, 0x2d, - 0xa7, 0x32, 0x44, 0x64, 0x48, 0xf1, 0xc8, 0x40, 0x07, 0x98, 0x31, 0x78, 0x64, 0x40, 0x2f, 0xc0, - 0x62, 0x00, 0xeb, 0x15, 0x82, 0xbc, 0x14, 0x97, 0x3d, 0x74, 0x49, 0x54, 0x84, 0x7f, 0x90, 0x60, - 0x61, 0xca, 0x5d, 0x46, 0x72, 0x0c, 0xd2, 0x37, 0xc5, 0x31, 0x24, 0x9e, 0x9c, 0x63, 0x08, 0x96, - 0xb6, 0xc9, 0x70, 0x69, 0xfb, 0x0f, 0x09, 0x0a, 0x21, 0xb7, 0x4d, 0x0f, 0xa1, 0x65, 0xb6, 0xb1, - 0x28, 0x36, 0xd9, 0x6f, 0x9a, 0xd3, 0xf4, 0xcd, 0xae, 0x28, 0x29, 0xe9, 0x4f, 0x8a, 0xf2, 0x02, - 0x51, 0x56, 0x84, 0x19, 0xaf, 0x4e, 0xe5, 0x79, 0x83, 0xa8, 0x53, 0x65, 0x48, 0x3e, 0xc2, 0x9c, - 0x8b, 0xce, 0xab, 0xf4, 0x27, 0xc5, 0xb1, 0xeb, 0x27, 0xe2, 0x3f, 0x6f, 0xa0, 0xd7, 0x20, 0xcb, - 0xbe, 0x18, 0x68, 0xa6, 0xe5, 0x08, 0xfa, 0x39, 0x90, 0x1b, 0xf1, 0xcf, 0x0a, 0xe2, 0x9d, 0x9b, - 0x9d, 0xaa, 0xe5, 0xa8, 0x19, 0x4b, 0xfc, 0x0a, 0x64, 0x2c, 0xd9, 0x50, 0xc6, 0x72, 0x09, 0xb2, - 0x74, 0xf9, 0x8e, 0xa5, 0xb7, 0x70, 0x11, 0xd8, 0x4a, 0xfd, 0x0e, 0xe5, 0x37, 0x09, 0x98, 0x9f, - 0x88, 0x3a, 0x91, 0x9b, 0x77, 0x6f, 0x65, 0x22, 0x40, 0xa1, 0x9c, 0xcd, 0x20, 0xab, 0x00, 0x5d, - 0xdd, 0xd1, 0x3e, 0xd4, 0x87, 0x04, 0xb7, 0x85, 0x55, 0x02, 0x3d, 0x68, 0x05, 0x32, 0xb4, 0x35, - 0x72, 0x70, 0x5b, 0xb0, 0x39, 0x5e, 0x1b, 0xed, 0xc2, 0x0c, 0x3e, 0xc2, 0x43, 0xe2, 0x14, 0x67, - 0xd9, 0xc1, 0x5f, 0x88, 0x70, 0x4f, 0x74, 0x7c, 0xab, 0x48, 0x8f, 0xfb, 0x6f, 0x9f, 0xaf, 0xc9, - 0x1c, 0xfe, 0xbc, 0x39, 0x30, 0x08, 0x1e, 0x58, 0xe4, 0x58, 0x15, 0x0a, 0xc2, 0x66, 0xc8, 0x4c, - 0x98, 0x81, 0x51, 0x8b, 0x79, 0x97, 0x27, 0xa0, 0x46, 0x35, 0x4c, 0xdb, 0x20, 0xc7, 0x6a, 0x61, - 0x80, 0x07, 0x96, 0x69, 0xf6, 0x35, 0xfe, 0xce, 0x4b, 0x30, 0x17, 0x0e, 0xb2, 0xe8, 0x0a, 0x14, - 0x6c, 0x4c, 0x74, 0x63, 0xa8, 0x85, 0xf2, 0xe8, 0x3c, 0xef, 0xe4, 0xef, 0x6a, 0x2f, 0x95, 0x91, - 0xe4, 0x84, 0xa0, 0x76, 0xde, 0x81, 0xe5, 0xc8, 0x18, 0x8b, 0x5e, 0x85, 0xac, 0x1f, 0x9f, 0x25, - 0xb6, 0xdd, 0x93, 0x38, 0x1b, 0x1f, 0xac, 0x1c, 0xc2, 0x72, 0x64, 0x90, 0x45, 0x6f, 0xc0, 0x8c, - 0x8d, 0x9d, 0x51, 0x9f, 0xd3, 0x32, 0x73, 0xb7, 0xaf, 0x9e, 0x1e, 0x9d, 0x47, 0x7d, 0xa2, 0x0a, - 0x21, 0xe5, 0x16, 0x5c, 0x8c, 0x8d, 0xb2, 0x3e, 0xf3, 0x22, 0x05, 0x98, 0x17, 0xe5, 0xb7, 0x12, - 0xac, 0xc4, 0x47, 0x4e, 0xb4, 0x35, 0xb1, 0xa0, 0x1b, 0x67, 0x8c, 0xbb, 0x81, 0x55, 0xd1, 0xd2, - 0xc4, 0xc6, 0x1d, 0x4c, 0x5a, 0x3d, 0x1e, 0xc2, 0xb9, 0x53, 0x28, 0xa8, 0x05, 0xd1, 0xcb, 0x64, - 0x1c, 0x0e, 0x7b, 0x1f, 0xb7, 0x88, 0xc6, 0x0f, 0xd5, 0x61, 0xe5, 0x41, 0x96, 0xc2, 0x68, 0x6f, - 0x9d, 0x77, 0x2a, 0x37, 0xe1, 0x42, 0x4c, 0x2c, 0x9e, 0xae, 0x61, 0x94, 0x87, 0x14, 0x1c, 0x19, - 0x60, 0xd1, 0x5b, 0x30, 0xe3, 0x10, 0x9d, 0x8c, 0x1c, 0xb1, 0xb3, 0x6b, 0xa7, 0xc6, 0xe6, 0x3a, - 0x83, 0xab, 0x42, 0x4c, 0x79, 0x1d, 0xd0, 0x74, 0xa4, 0x8d, 0xa8, 0xc3, 0xa4, 0xa8, 0x3a, 0xac, - 0x09, 0x4f, 0x9d, 0x10, 0x53, 0x51, 0x79, 0x62, 0x71, 0x37, 0xcf, 0x14, 0x92, 0x27, 0x16, 0xf8, - 0xf7, 0x04, 0x2c, 0x47, 0x86, 0xd6, 0xc0, 0x2b, 0x95, 0xbe, 0xee, 0x2b, 0x7d, 0x03, 0x80, 0x8c, - 0x35, 0x7e, 0xd2, 0xae, 0xb7, 0x8f, 0xaa, 0x27, 0xc6, 0xb8, 0xc5, 0x1c, 0x16, 0xbd, 0x18, 0x59, - 0x22, 0x7e, 0x39, 0xa8, 0x11, 0xac, 0x7d, 0x47, 0x2c, 0x12, 0x38, 0xa2, 0x2c, 0x3c, 0x73, 0xcc, - 0xf0, 0x8b, 0x64, 0xde, 0xed, 0xa0, 0x87, 0x70, 0x61, 0x22, 0xa2, 0x79, 0xba, 0x53, 0x67, 0x0e, - 0x6c, 0xcb, 0xe1, 0xc0, 0xe6, 0xea, 0x0e, 0x46, 0xa5, 0x74, 0x38, 0x2a, 0x3d, 0x04, 0xf0, 0x8b, - 0x60, 0xfa, 0xde, 0x6c, 0x73, 0x34, 0x6c, 0xb3, 0x23, 0x4c, 0xab, 0xbc, 0x81, 0x5e, 0x86, 0x34, - 0xbd, 0x09, 0xae, 0xa9, 0x22, 0x1c, 0x06, 0x3d, 0xd2, 0x40, 0x15, 0xcd, 0xe1, 0xca, 0xfb, 0xee, - 0x6d, 0x0b, 0xf2, 0x91, 0x31, 0x73, 0xbc, 0x19, 0x9e, 0x43, 0x89, 0xa7, 0x36, 0xa3, 0xe7, 0xfa, - 0x3e, 0xa4, 0xd9, 0xf1, 0xd3, 0xe8, 0xc0, 0xe8, 0x70, 0x91, 0xd9, 0xd0, 0xdf, 0xe8, 0xbb, 0x00, - 0x3a, 0x21, 0xb6, 0xd1, 0x1c, 0xf9, 0x33, 0xac, 0xc7, 0xdc, 0x9f, 0x92, 0x0b, 0xdc, 0xba, 0x24, - 0x2e, 0xd2, 0x92, 0x2f, 0x1b, 0xb8, 0x4c, 0x01, 0x8d, 0xca, 0x01, 0xcc, 0x85, 0x65, 0xdd, 0x50, - 0xcc, 0x17, 0x11, 0x0e, 0xc5, 0x3c, 0xb7, 0x12, 0xa1, 0xd8, 0x0b, 0xe4, 0x49, 0x4e, 0xfa, 0xb3, - 0x86, 0xf2, 0x83, 0x04, 0xe4, 0x83, 0xb7, 0xef, 0x3f, 0x30, 0x58, 0x2a, 0x3f, 0x96, 0x20, 0xe3, - 0xed, 0x3f, 0x4c, 0xfd, 0x87, 0xbe, 0x99, 0x70, 0xf3, 0x25, 0x82, 0x7c, 0x3d, 0xff, 0x42, 0x92, - 0xf4, 0xbe, 0x90, 0xfc, 0xbf, 0x17, 0x10, 0x62, 0x8b, 0xf9, 0xa0, 0xb5, 0xc5, 0xc5, 0x72, 0x03, - 0xd4, 0xeb, 0x90, 0xf5, 0xde, 0x30, 0xcd, 0x91, 0x5d, 0x92, 0x44, 0x12, 0x0f, 0x49, 0x90, 0x23, - 0x4b, 0x90, 0xb6, 0xcc, 0x0f, 0xc5, 0xd7, 0x80, 0xa4, 0xca, 0x1b, 0x8a, 0x03, 0xf3, 0x13, 0x0e, - 0xc0, 0x07, 0x26, 0x02, 0x40, 0xa4, 0x40, 0xc1, 0x1a, 0x35, 0xb5, 0x47, 0xf8, 0x58, 0x7c, 0x1b, - 0xe0, 0xcb, 0xcf, 0x59, 0xa3, 0xe6, 0x5d, 0x7c, 0xcc, 0x3f, 0x0e, 0xac, 0x43, 0xde, 0xc5, 0xb0, - 0x2b, 0xce, 0xcf, 0x14, 0x38, 0xa4, 0xc1, 0x3f, 0xec, 0x48, 0x72, 0x42, 0xf9, 0xa9, 0x04, 0x19, - 0xf7, 0x95, 0xa0, 0xb7, 0x20, 0xeb, 0xf9, 0x1a, 0x91, 0x22, 0x3f, 0x75, 0x82, 0x97, 0x12, 0x9b, - 0xf7, 0x65, 0xd0, 0x96, 0xfb, 0x85, 0xd2, 0x68, 0x6b, 0x9d, 0xbe, 0xde, 0x15, 0x1f, 0x9a, 0x56, - 0x23, 0xdc, 0x11, 0xf3, 0xd8, 0xbb, 0x77, 0xb6, 0xfb, 0x7a, 0x57, 0xcd, 0x31, 0xa1, 0xdd, 0x36, - 0x6d, 0x88, 0xac, 0xe4, 0x2b, 0x09, 0xe4, 0xc9, 0x57, 0xfc, 0xf5, 0xd7, 0x37, 0x1d, 0xbd, 0x92, - 0x11, 0xd1, 0x0b, 0x6d, 0xc2, 0xa2, 0x87, 0xd0, 0x1c, 0xa3, 0x3b, 0xd4, 0xc9, 0xc8, 0xc6, 0x82, - 0x8e, 0x43, 0xde, 0x50, 0xdd, 0x1d, 0x99, 0xde, 0x77, 0xfa, 0x49, 0xf7, 0xfd, 0x51, 0x02, 0x72, - 0x01, 0x76, 0x10, 0xfd, 0x6f, 0xc0, 0x45, 0xcd, 0x45, 0xc5, 0x8c, 0x00, 0xd8, 0xff, 0x6a, 0x17, - 0xb6, 0x54, 0xe2, 0x09, 0x2c, 0x15, 0xc7, 0xc3, 0xba, 0x74, 0x63, 0xea, 0xb1, 0xe9, 0xc6, 0xe7, - 0x01, 0x11, 0x93, 0xe8, 0x7d, 0x5a, 0x94, 0x1b, 0xc3, 0xae, 0xc6, 0x2f, 0x36, 0xf7, 0x28, 0x32, - 0x1b, 0x39, 0x64, 0x03, 0x35, 0xf6, 0x18, 0x7e, 0x28, 0x41, 0xc6, 0xa3, 0x62, 0x1e, 0xf7, 0x6b, - 0xde, 0x79, 0x98, 0x11, 0x99, 0x18, 0xff, 0x9c, 0x27, 0x5a, 0x91, 0xbc, 0xea, 0x0a, 0x64, 0x06, - 0x98, 0xe8, 0xcc, 0x3d, 0xf2, 0x78, 0xe7, 0xb5, 0x6f, 0x34, 0x21, 0x17, 0xf8, 0x20, 0x8a, 0x2e, - 0xc2, 0x72, 0x79, 0xa7, 0x52, 0xbe, 0xab, 0x35, 0xde, 0xd5, 0x1a, 0x0f, 0x6a, 0x15, 0xed, 0xfe, - 0xc1, 0xdd, 0x83, 0xea, 0xb7, 0x0e, 0xe4, 0x73, 0xd3, 0x43, 0x6a, 0x85, 0xb5, 0x65, 0x09, 0x5d, - 0x80, 0xc5, 0xf0, 0x10, 0x1f, 0x48, 0xac, 0xa4, 0x7e, 0xf2, 0xab, 0xd5, 0x73, 0x37, 0xbe, 0x92, - 0x60, 0x31, 0x22, 0xe7, 0x45, 0x97, 0xe1, 0xe9, 0xea, 0xf6, 0x76, 0x45, 0xd5, 0xea, 0x07, 0xa5, - 0x5a, 0x7d, 0xa7, 0xda, 0xd0, 0xd4, 0x4a, 0xfd, 0xfe, 0x7e, 0x23, 0x30, 0xe9, 0x3a, 0x5c, 0x8a, - 0x86, 0x94, 0xca, 0xe5, 0x4a, 0xad, 0x21, 0x4b, 0x68, 0x0d, 0x9e, 0x8a, 0x41, 0x6c, 0x55, 0xd5, - 0x86, 0x9c, 0x88, 0x57, 0xa1, 0x56, 0xf6, 0x2a, 0xe5, 0x86, 0x9c, 0x44, 0xd7, 0xe0, 0xca, 0x49, - 0x08, 0x6d, 0xbb, 0xaa, 0xde, 0x2b, 0x35, 0xe4, 0xd4, 0xa9, 0xc0, 0x7a, 0xe5, 0xe0, 0x4e, 0x45, - 0x95, 0xd3, 0x62, 0xdf, 0xbf, 0x4c, 0x40, 0x31, 0x2e, 0xb5, 0xa6, 0xba, 0x4a, 0xb5, 0xda, 0xfe, - 0x03, 0x5f, 0x57, 0x79, 0xe7, 0xfe, 0xc1, 0xdd, 0x69, 0x13, 0x3c, 0x0b, 0xca, 0x49, 0x40, 0xcf, - 0x10, 0x57, 0xe1, 0xf2, 0x89, 0x38, 0x61, 0x8e, 0x53, 0x60, 0x6a, 0xa5, 0xa1, 0x3e, 0x90, 0x93, - 0x68, 0x03, 0x6e, 0x9c, 0x0a, 0xf3, 0xc6, 0xe4, 0x14, 0xda, 0x84, 0x9b, 0x27, 0xe3, 0xb9, 0x81, - 0x5c, 0x01, 0xd7, 0x44, 0x1f, 0x4b, 0xb0, 0x1c, 0x99, 0xa3, 0xa3, 0x2b, 0xb0, 0x56, 0x53, 0xab, - 0xe5, 0x4a, 0xbd, 0xae, 0xd5, 0xd4, 0x6a, 0xad, 0x5a, 0x2f, 0xed, 0x6b, 0xf5, 0x46, 0xa9, 0x71, - 0xbf, 0x1e, 0xb0, 0x8d, 0x02, 0xab, 0x71, 0x20, 0xcf, 0x2e, 0x27, 0x60, 0xc4, 0x0d, 0x70, 0xef, - 0xe9, 0x2f, 0x24, 0xb8, 0x18, 0x9b, 0x93, 0xa3, 0xeb, 0xf0, 0xcc, 0x61, 0x45, 0xdd, 0xdd, 0x7e, - 0xa0, 0x1d, 0x56, 0x1b, 0x15, 0xad, 0xf2, 0x6e, 0xa3, 0x72, 0x50, 0xdf, 0xad, 0x1e, 0x4c, 0xaf, - 0xea, 0x1a, 0x5c, 0x39, 0x11, 0xe9, 0x2d, 0xed, 0x34, 0xe0, 0xc4, 0xfa, 0x7e, 0x24, 0xc1, 0xfc, - 0x84, 0x2f, 0x44, 0x97, 0xa0, 0x78, 0x6f, 0xb7, 0xbe, 0x55, 0xd9, 0x29, 0x1d, 0xee, 0x56, 0xd5, - 0xc9, 0x37, 0x7b, 0x05, 0xd6, 0xa6, 0x46, 0xef, 0xdc, 0xaf, 0xed, 0xef, 0x96, 0x4b, 0x8d, 0x0a, - 0x9b, 0x54, 0x96, 0xe8, 0xc6, 0xa6, 0x40, 0xfb, 0xbb, 0x6f, 0xef, 0x34, 0xb4, 0xf2, 0xfe, 0x6e, - 0xe5, 0xa0, 0xa1, 0x95, 0x1a, 0x8d, 0x92, 0xff, 0x9c, 0xb7, 0xee, 0x7e, 0xfa, 0xc5, 0xaa, 0xf4, - 0xd9, 0x17, 0xab, 0xd2, 0x5f, 0xbf, 0x58, 0x95, 0x3e, 0xf9, 0x72, 0xf5, 0xdc, 0x67, 0x5f, 0xae, - 0x9e, 0xfb, 0xf3, 0x97, 0xab, 0xe7, 0x1e, 0xde, 0xea, 0x1a, 0xa4, 0x37, 0x6a, 0x52, 0x2f, 0xbc, - 0xe9, 0xff, 0x2f, 0xd3, 0xfb, 0x43, 0xa7, 0x65, 0x6c, 0x4e, 0xfe, 0xbb, 0xb3, 0x39, 0xc3, 0xdc, - 0xea, 0x8b, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xee, 0x5e, 0x4c, 0xf8, 0x29, 0x00, 0x00, + 0x15, 0xf7, 0xf2, 0x4b, 0xe4, 0x23, 0x29, 0xad, 0x46, 0x92, 0x4d, 0x2b, 0x8e, 0x24, 0xaf, 0xe3, + 0xd8, 0xb1, 0x13, 0xa9, 0x76, 0xd2, 0x7c, 0x36, 0x09, 0x28, 0x9a, 0x8a, 0x24, 0xcb, 0x12, 0xb3, + 0xa4, 0xd5, 0xd8, 0xfd, 0xd8, 0xac, 0xc8, 0x21, 0xb9, 0x31, 0xc9, 0xdd, 0xec, 0x0e, 0x15, 0xb2, + 0x3d, 0xb5, 0x68, 0x8a, 0x22, 0xa7, 0x5c, 0x7a, 0x29, 0x5a, 0xa0, 0x40, 0xd1, 0x6b, 0x0f, 0xfd, + 0x1b, 0x8a, 0x22, 0xa7, 0x36, 0xc7, 0x9e, 0xd2, 0x22, 0xb9, 0xf5, 0xd0, 0x5b, 0x80, 0x02, 0xbd, + 0x14, 0xf3, 0xb1, 0x5f, 0xe4, 0xae, 0x64, 0x3b, 0xe9, 0xa1, 0x68, 0x6f, 0x3b, 0x33, 0xbf, 0xf7, + 0x66, 0xe6, 0xcd, 0xcc, 0x7b, 0x6f, 0x7e, 0xb3, 0x70, 0xa1, 0x69, 0xf6, 0x31, 0x39, 0x6a, 0x93, + 0x0d, 0xfd, 0xa8, 0x69, 0x6c, 0x1c, 0xdf, 0xd8, 0x20, 0x63, 0x0b, 0x3b, 0xeb, 0x96, 0x6d, 0x12, + 0x13, 0xc9, 0x6e, 0xeb, 0x3a, 0x6d, 0x5d, 0x3f, 0xbe, 0xb1, 0xbc, 0xe2, 0xe1, 0x9b, 0xf6, 0xd8, + 0x22, 0x26, 0x95, 0xb0, 0x6c, 0xd3, 0x6c, 0x73, 0x89, 0x40, 0x3b, 0xd3, 0xc3, 0x9a, 0x75, 0x5b, + 0xef, 0x0b, 0x8d, 0xcb, 0x17, 0xa7, 0xdb, 0x8f, 0xf5, 0x9e, 0xd1, 0xd2, 0x89, 0x69, 0x0b, 0xc8, + 0x62, 0xc7, 0xec, 0x98, 0xec, 0x73, 0x83, 0x7e, 0x89, 0xda, 0xd5, 0x8e, 0x69, 0x76, 0x7a, 0x78, + 0x83, 0x95, 0x8e, 0x86, 0xed, 0x0d, 0x62, 0xf4, 0xb1, 0x43, 0xf4, 0xbe, 0xc5, 0x01, 0xca, 0x9f, + 0x73, 0x30, 0xa3, 0xe2, 0xf7, 0x87, 0xd8, 0x21, 0xe8, 0x79, 0x48, 0xe1, 0x66, 0xd7, 0x2c, 0x49, + 0x6b, 0xd2, 0xd5, 0xfc, 0xcd, 0x27, 0xd7, 0x27, 0xa7, 0xb1, 0x5e, 0x6d, 0x76, 0x4d, 0x01, 0xde, + 0x3e, 0xa3, 0x32, 0x30, 0x7a, 0x11, 0xd2, 0xed, 0xde, 0xd0, 0xe9, 0x96, 0x12, 0x4c, 0x6a, 0x65, + 0x5a, 0x6a, 0x8b, 0x36, 0xfb, 0x62, 0x1c, 0x4e, 0x3b, 0x33, 0x06, 0x6d, 0xb3, 0x94, 0x8c, 0xeb, + 0x6c, 0x67, 0xd0, 0x0e, 0x76, 0x46, 0xc1, 0xa8, 0x02, 0x60, 0x0c, 0x0c, 0xa2, 0x35, 0xbb, 0xba, + 0x31, 0x28, 0xa5, 0x99, 0xa8, 0x12, 0x25, 0x6a, 0x90, 0x0a, 0x85, 0xf8, 0xf2, 0x39, 0xc3, 0xad, + 0xa3, 0x23, 0x7e, 0x7f, 0x88, 0xed, 0x71, 0x29, 0x13, 0x37, 0xe2, 0xb7, 0x69, 0x73, 0x60, 0xc4, + 0x0c, 0x8e, 0x5e, 0x87, 0x6c, 0xb3, 0x8b, 0x9b, 0x0f, 0x34, 0x32, 0x2a, 0x65, 0x99, 0xe8, 0xda, + 0xb4, 0x68, 0x85, 0x22, 0x1a, 0x23, 0x5f, 0x78, 0xa6, 0xc9, 0x6b, 0xd0, 0x2b, 0x90, 0x69, 0x9a, + 0xfd, 0xbe, 0x41, 0x4a, 0x79, 0x26, 0xbc, 0x1a, 0x21, 0xcc, 0xda, 0x7d, 0x59, 0x21, 0x80, 0x0e, + 0x60, 0xb6, 0x67, 0x38, 0x44, 0x73, 0x06, 0xba, 0xe5, 0x74, 0x4d, 0xe2, 0x94, 0x0a, 0x4c, 0xc5, + 0xd3, 0xd3, 0x2a, 0xf6, 0x0c, 0x87, 0xd4, 0x5d, 0x98, 0xaf, 0xa9, 0xd8, 0x0b, 0xd6, 0x53, 0x85, + 0x66, 0xbb, 0x8d, 0x6d, 0x4f, 0x63, 0xa9, 0x18, 0xa7, 0xf0, 0x80, 0xe2, 0x5c, 0xc9, 0x80, 0x42, + 0x33, 0x58, 0x8f, 0xbe, 0x0b, 0x0b, 0x3d, 0x53, 0x6f, 0x79, 0xfa, 0xb4, 0x66, 0x77, 0x38, 0x78, + 0x50, 0x9a, 0x65, 0x5a, 0xaf, 0x45, 0x0c, 0xd3, 0xd4, 0x5b, 0xae, 0x70, 0x85, 0x42, 0x7d, 0xcd, + 0xf3, 0xbd, 0xc9, 0x36, 0xa4, 0xc1, 0xa2, 0x6e, 0x59, 0xbd, 0xf1, 0xa4, 0xfa, 0x39, 0xa6, 0xfe, + 0xfa, 0xb4, 0xfa, 0x32, 0x45, 0xc7, 0xe8, 0x47, 0xfa, 0x54, 0x23, 0xba, 0x0b, 0xb2, 0x65, 0x63, + 0x4b, 0xb7, 0xb1, 0x66, 0xd9, 0xa6, 0x65, 0x3a, 0x7a, 0xaf, 0x24, 0x33, 0xe5, 0x57, 0xa7, 0x95, + 0xd7, 0x38, 0xb2, 0x26, 0x80, 0xbe, 0xe6, 0x39, 0x2b, 0xdc, 0xc2, 0xd5, 0x9a, 0x4d, 0xec, 0x38, + 0xbe, 0xda, 0xf9, 0x78, 0xb5, 0x0c, 0x19, 0xa9, 0x36, 0xd4, 0x82, 0xb6, 0x20, 0x8f, 0x47, 0x04, + 0x0f, 0x5a, 0xda, 0xb1, 0x49, 0x70, 0x09, 0x31, 0x8d, 0x97, 0x22, 0x8e, 0x2b, 0x03, 0x1d, 0x9a, + 0x04, 0xfb, 0xca, 0x00, 0x7b, 0x95, 0xe8, 0x08, 0x96, 0x8e, 0xb1, 0x6d, 0xb4, 0xc7, 0x4c, 0x8f, + 0xc6, 0x5a, 0x1c, 0xc3, 0x1c, 0x94, 0x16, 0x98, 0xc6, 0x67, 0xa7, 0x35, 0x1e, 0x32, 0x38, 0x15, + 0xae, 0xba, 0x60, 0x5f, 0xf5, 0xc2, 0xf1, 0x74, 0x2b, 0xdd, 0x69, 0x6d, 0x63, 0xa0, 0xf7, 0x8c, + 0x1f, 0x60, 0xed, 0xa8, 0x67, 0x36, 0x1f, 0x94, 0x16, 0xe3, 0x76, 0xda, 0x96, 0xc0, 0x6d, 0x52, + 0x58, 0x60, 0xa7, 0xb5, 0x83, 0xf5, 0x9b, 0x33, 0x90, 0x3e, 0xd6, 0x7b, 0x43, 0xbc, 0x9b, 0xca, + 0xa6, 0xe4, 0xf4, 0x6e, 0x2a, 0x3b, 0x23, 0x67, 0x77, 0x53, 0xd9, 0x9c, 0x0c, 0xbb, 0xa9, 0x2c, + 0xc8, 0x79, 0xe5, 0x0a, 0xe4, 0x03, 0x7e, 0x0a, 0x95, 0x60, 0xa6, 0x8f, 0x1d, 0x47, 0xef, 0x60, + 0xe6, 0xd7, 0x72, 0xaa, 0x5b, 0x54, 0x66, 0xa1, 0x10, 0x74, 0x4d, 0xca, 0xc7, 0x12, 0xe4, 0x03, + 0x4e, 0x87, 0x4a, 0x1e, 0x63, 0x9b, 0x19, 0x44, 0x48, 0x8a, 0x22, 0xba, 0x04, 0x45, 0x36, 0x17, + 0xcd, 0x6d, 0xa7, 0xbe, 0x2f, 0xa5, 0x16, 0x58, 0xe5, 0xa1, 0x00, 0xad, 0x42, 0xde, 0xba, 0x69, + 0x79, 0x90, 0x24, 0x83, 0x80, 0x75, 0xd3, 0x72, 0x01, 0x17, 0xa1, 0x40, 0xa7, 0xee, 0x21, 0x52, + 0xac, 0x93, 0x3c, 0xad, 0x13, 0x10, 0xe5, 0x4f, 0x09, 0x90, 0x27, 0x9d, 0x19, 0x7a, 0x19, 0x52, + 0xd4, 0x8b, 0x0b, 0x37, 0xbd, 0xbc, 0xce, 0x5d, 0xfc, 0xba, 0xeb, 0xe2, 0xd7, 0x1b, 0xae, 0x8b, + 0xdf, 0xcc, 0x7e, 0xf2, 0xd9, 0xea, 0x99, 0x8f, 0xff, 0xba, 0x2a, 0xa9, 0x4c, 0x02, 0x9d, 0xa7, + 0x1e, 0x4c, 0x37, 0x06, 0x9a, 0xd1, 0x62, 0x43, 0xce, 0x51, 0xef, 0xa4, 0x1b, 0x83, 0x9d, 0x16, + 0xba, 0x03, 0x72, 0xd3, 0x1c, 0x38, 0x78, 0xe0, 0x0c, 0x1d, 0x8d, 0xc7, 0x1e, 0xe1, 0x9a, 0x03, + 0xfe, 0x95, 0x07, 0x39, 0xe6, 0xa8, 0x04, 0xb4, 0xc6, 0x90, 0xea, 0x5c, 0x33, 0x5c, 0x81, 0xde, + 0x02, 0xf0, 0x02, 0x94, 0x53, 0x4a, 0xad, 0x25, 0xaf, 0xe6, 0x6f, 0x5e, 0x8c, 0xd8, 0x4f, 0x2e, + 0xe6, 0xae, 0xd5, 0xd2, 0x09, 0xde, 0x4c, 0xd1, 0x01, 0xab, 0x01, 0x51, 0xf4, 0x34, 0xcc, 0xe9, + 0x96, 0xa5, 0x39, 0x44, 0x27, 0x58, 0x3b, 0x1a, 0x13, 0xec, 0x30, 0xb7, 0x5f, 0x50, 0x8b, 0xba, + 0x65, 0xd5, 0x69, 0xed, 0x26, 0xad, 0x44, 0x97, 0x61, 0x96, 0x7a, 0x78, 0x43, 0xef, 0x69, 0x5d, + 0x6c, 0x74, 0xba, 0x84, 0x79, 0xf7, 0xa4, 0x5a, 0x14, 0xb5, 0xdb, 0xac, 0x52, 0x69, 0x41, 0x21, + 0xe8, 0xdc, 0x11, 0x82, 0x54, 0x4b, 0x27, 0x3a, 0xb3, 0x65, 0x41, 0x65, 0xdf, 0xb4, 0xce, 0xd2, + 0x49, 0x57, 0x58, 0x88, 0x7d, 0xa3, 0xb3, 0x90, 0x11, 0x6a, 0x93, 0x4c, 0xad, 0x28, 0xa1, 0x45, + 0x48, 0x5b, 0xb6, 0x79, 0x8c, 0xd9, 0xe2, 0x65, 0x55, 0x5e, 0x50, 0xee, 0xc1, 0x6c, 0x38, 0x0e, + 0xa0, 0x59, 0x48, 0x90, 0x91, 0xe8, 0x25, 0x41, 0x46, 0xe8, 0x06, 0xa4, 0xa8, 0x31, 0x99, 0xb6, + 0xd9, 0xa8, 0xe8, 0x27, 0xe4, 0x1b, 0x63, 0x0b, 0xab, 0x0c, 0xba, 0x9b, 0xca, 0x26, 0xe4, 0xa4, + 0x32, 0x07, 0xc5, 0x50, 0x94, 0x50, 0xce, 0xc2, 0x62, 0x94, 0xcf, 0x57, 0x0c, 0x58, 0x8c, 0x72, + 0xdd, 0xe8, 0x45, 0xc8, 0x7a, 0x4e, 0xdf, 0xdd, 0x41, 0x53, 0xbd, 0x7b, 0x42, 0x1e, 0x96, 0xee, + 0x1d, 0xba, 0x10, 0x5d, 0x5d, 0x84, 0xfa, 0x82, 0x3a, 0xa3, 0x5b, 0xd6, 0xb6, 0xee, 0x74, 0x95, + 0x77, 0xa1, 0x14, 0xe7, 0xcf, 0x03, 0x86, 0x93, 0xd8, 0x01, 0x70, 0x0d, 0x77, 0x16, 0x32, 0x6d, + 0xd3, 0xee, 0xeb, 0x84, 0x29, 0x2b, 0xaa, 0xa2, 0x44, 0x0d, 0xca, 0x7d, 0x7b, 0x92, 0x55, 0xf3, + 0x82, 0xa2, 0xc1, 0xf9, 0x58, 0x97, 0x4e, 0x45, 0x8c, 0x41, 0x0b, 0x73, 0xf3, 0x16, 0x55, 0x5e, + 0xf0, 0x15, 0xf1, 0xc1, 0xf2, 0x02, 0xed, 0xd6, 0xc1, 0x83, 0x16, 0xb6, 0x99, 0xfe, 0x9c, 0x2a, + 0x4a, 0xca, 0x2f, 0x92, 0x70, 0x36, 0xda, 0xaf, 0xa3, 0x35, 0x28, 0xf4, 0xf5, 0x91, 0x46, 0x46, + 0x62, 0xfb, 0x49, 0x6c, 0x03, 0x40, 0x5f, 0x1f, 0x35, 0x46, 0x7c, 0xef, 0xc9, 0x90, 0x24, 0x23, + 0xa7, 0x94, 0x58, 0x4b, 0x5e, 0x2d, 0xa8, 0xf4, 0x13, 0x1d, 0xc2, 0x7c, 0xcf, 0x6c, 0xea, 0x3d, + 0xad, 0xa7, 0x3b, 0x44, 0x13, 0x61, 0x9f, 0x1f, 0xa7, 0xa7, 0xe2, 0xfc, 0x34, 0x6e, 0xf1, 0x85, + 0xa5, 0x2e, 0x48, 0x1c, 0x84, 0x39, 0xa6, 0x64, 0x4f, 0x77, 0x08, 0x6f, 0x42, 0x55, 0xc8, 0xf7, + 0x0d, 0xe7, 0x08, 0x77, 0xf5, 0x63, 0xc3, 0xb4, 0xc5, 0xb9, 0x8a, 0xd8, 0x3d, 0x77, 0x7c, 0x90, + 0x50, 0x15, 0x94, 0x0b, 0x2c, 0x4a, 0x3a, 0xb4, 0x9b, 0x5d, 0xcf, 0x92, 0x79, 0x64, 0xcf, 0xf2, + 0x0d, 0x58, 0x1c, 0xe0, 0x11, 0xd1, 0xfc, 0x93, 0xcb, 0x77, 0xca, 0x0c, 0x33, 0x3e, 0xa2, 0x6d, + 0xde, 0x59, 0x77, 0xe8, 0xa6, 0x41, 0xcf, 0xb0, 0xd8, 0x68, 0x99, 0x0e, 0xb6, 0x35, 0xbd, 0xd5, + 0xb2, 0xb1, 0xe3, 0xb0, 0xac, 0xaa, 0xc0, 0xe2, 0x1d, 0xab, 0x2f, 0xf3, 0x6a, 0xe5, 0x23, 0xb6, + 0x38, 0x51, 0xd1, 0xd1, 0x35, 0xbd, 0xe4, 0x9b, 0xbe, 0x01, 0x8b, 0x42, 0xbe, 0x15, 0xb2, 0x3e, + 0x4f, 0x4f, 0x2f, 0xc4, 0x25, 0x5d, 0x01, 0xab, 0x23, 0x57, 0x3e, 0xde, 0xf0, 0xc9, 0xc7, 0x34, + 0x3c, 0x82, 0x14, 0x33, 0x4b, 0x8a, 0xbb, 0x1b, 0xfa, 0xfd, 0xdf, 0xb6, 0x18, 0x1f, 0x26, 0x61, + 0x7e, 0x2a, 0xb1, 0xf0, 0x26, 0x26, 0x45, 0x4e, 0x2c, 0x11, 0x39, 0xb1, 0xe4, 0x23, 0x4f, 0x4c, + 0xac, 0x76, 0xea, 0xf4, 0xd5, 0x4e, 0x7f, 0x9d, 0xab, 0x9d, 0x79, 0xcc, 0xd5, 0xfe, 0x8f, 0xae, + 0xc3, 0x2f, 0x25, 0x58, 0x8e, 0x4f, 0xc7, 0x22, 0x17, 0xe4, 0x3a, 0xcc, 0x7b, 0x43, 0xf1, 0xd4, + 0x73, 0xf7, 0x28, 0x7b, 0x0d, 0x42, 0x7f, 0x6c, 0xc4, 0xbb, 0x0c, 0xb3, 0x13, 0xd9, 0x22, 0xdf, + 0xcc, 0xc5, 0xe3, 0xe0, 0x30, 0x94, 0xdf, 0x27, 0x61, 0x31, 0x2a, 0xa1, 0x8b, 0x38, 0xb1, 0x2a, + 0x2c, 0xb4, 0x70, 0xd3, 0x68, 0x3d, 0xf6, 0x81, 0x9d, 0x17, 0xe2, 0xff, 0x3f, 0xaf, 0xd3, 0xfb, + 0x04, 0x5d, 0x83, 0x79, 0x67, 0x3c, 0x68, 0x1a, 0x83, 0x8e, 0x46, 0x4c, 0x37, 0x37, 0xca, 0xb1, + 0x91, 0xcf, 0x89, 0x86, 0x86, 0x29, 0xb2, 0xa3, 0xdf, 0x02, 0x64, 0x55, 0xec, 0x58, 0x34, 0x99, + 0x43, 0x15, 0xc8, 0xe1, 0x51, 0x13, 0x5b, 0xc4, 0x4d, 0x80, 0x63, 0xee, 0x18, 0x02, 0xe2, 0xca, + 0xd1, 0xbb, 0xb6, 0x27, 0x87, 0x5e, 0x10, 0x94, 0x42, 0x2c, 0x39, 0xc0, 0x53, 0x75, 0x4f, 0x94, + 0x73, 0x0a, 0x2f, 0xb9, 0x9c, 0x42, 0x32, 0xee, 0xa6, 0x2c, 0x12, 0x77, 0x4f, 0x4e, 0x90, 0x0a, + 0x2f, 0x08, 0x52, 0x21, 0x15, 0xd7, 0x1d, 0xcf, 0xef, 0xfd, 0xee, 0x18, 0xab, 0x70, 0x2b, 0xc4, + 0x2a, 0x64, 0xe2, 0xa6, 0x1a, 0x48, 0xc4, 0xfd, 0xa9, 0xfa, 0xb4, 0xc2, 0x4b, 0x2e, 0xad, 0x30, + 0x13, 0x37, 0x68, 0x91, 0x79, 0xfa, 0x83, 0xe6, 0xbc, 0xc2, 0x1b, 0x01, 0x5e, 0x21, 0xc7, 0x64, + 0x2f, 0x9e, 0xc0, 0x2b, 0x78, 0xd2, 0x1e, 0xb1, 0xf0, 0xaa, 0x47, 0x2c, 0x14, 0x62, 0x59, 0x09, + 0x91, 0x32, 0x7a, 0xc2, 0x2e, 0xb3, 0x50, 0x9b, 0x62, 0x16, 0x38, 0x11, 0x70, 0xe5, 0x54, 0x66, + 0xc1, 0x53, 0x35, 0x41, 0x2d, 0xd4, 0xa6, 0xa8, 0x85, 0xd9, 0x38, 0x8d, 0x13, 0xf9, 0xa9, 0xaf, + 0x31, 0xcc, 0x2d, 0x7c, 0x2f, 0x9a, 0x5b, 0x88, 0xbd, 0xfc, 0x47, 0xe4, 0xa2, 0x9e, 0xea, 0x08, + 0x72, 0xe1, 0xdd, 0x18, 0x72, 0x41, 0x8e, 0xbb, 0x04, 0x47, 0x65, 0xa2, 0x5e, 0x07, 0x51, 0xec, + 0xc2, 0x61, 0x04, 0xbb, 0xc0, 0x69, 0x80, 0x67, 0x1e, 0x82, 0x5d, 0xf0, 0x54, 0x4f, 0xd1, 0x0b, + 0x87, 0x11, 0xf4, 0x02, 0x8a, 0xd7, 0x3b, 0x91, 0x40, 0x05, 0xf5, 0x86, 0xf9, 0x85, 0xb7, 0xc2, + 0xfc, 0xc2, 0xc2, 0xc9, 0x79, 0x2b, 0x4f, 0x03, 0x3c, 0x6d, 0x41, 0x82, 0xa1, 0x19, 0x47, 0x30, + 0x70, 0x0e, 0xe0, 0xb9, 0x87, 0x24, 0x18, 0x3c, 0xdd, 0x91, 0x0c, 0x43, 0x6d, 0x8a, 0x61, 0x58, + 0x8a, 0xdb, 0x70, 0x13, 0x01, 0xc9, 0xdf, 0x70, 0xb1, 0x14, 0x43, 0x5a, 0xce, 0xec, 0xa6, 0xb2, + 0x59, 0x39, 0xc7, 0xc9, 0x85, 0xdd, 0x54, 0x36, 0x2f, 0x17, 0x94, 0x67, 0x68, 0x0a, 0x34, 0xe1, + 0xf7, 0xe8, 0x85, 0x03, 0xdb, 0xb6, 0x69, 0x0b, 0xb2, 0x80, 0x17, 0x94, 0xab, 0x50, 0x08, 0xba, + 0xb8, 0x13, 0xe8, 0x88, 0x39, 0x28, 0x86, 0xbc, 0x9a, 0xf2, 0xaf, 0x04, 0x14, 0x82, 0xfe, 0x2a, + 0x74, 0x59, 0xcd, 0x89, 0xcb, 0x6a, 0x80, 0xa4, 0x48, 0x84, 0x49, 0x8a, 0x55, 0xc8, 0xd3, 0x0b, + 0xdb, 0x04, 0xff, 0xa0, 0x5b, 0x1e, 0xff, 0x70, 0x0d, 0xe6, 0x59, 0xbc, 0xe5, 0x54, 0x86, 0x88, + 0x0c, 0x29, 0x1e, 0x19, 0x68, 0x03, 0x33, 0x06, 0x8f, 0x0c, 0xe8, 0x39, 0x58, 0x08, 0x60, 0xbd, + 0x8b, 0x20, 0xbf, 0x8a, 0xcb, 0x1e, 0xba, 0xcc, 0x6f, 0x84, 0xe8, 0x3b, 0x30, 0xd7, 0xd3, 0x07, + 0x74, 0xbb, 0x1b, 0xa6, 0x6d, 0x10, 0x03, 0x3b, 0x22, 0x89, 0xba, 0x79, 0xb2, 0x4b, 0x5e, 0xdf, + 0xd3, 0x07, 0xb8, 0xe6, 0x09, 0x55, 0x07, 0xc4, 0x1e, 0xab, 0xb3, 0xbd, 0x50, 0x25, 0xba, 0x08, + 0x85, 0x16, 0x6e, 0xeb, 0xc3, 0x1e, 0xd1, 0x68, 0x0b, 0xf3, 0xb7, 0x39, 0x35, 0x2f, 0xea, 0xa8, + 0x86, 0xe5, 0x32, 0x2c, 0x44, 0x68, 0xa2, 0xb9, 0xc7, 0x03, 0x3c, 0x16, 0xf6, 0xa3, 0x9f, 0x74, + 0xd1, 0xd8, 0x52, 0x8b, 0x5b, 0x28, 0x2f, 0xbc, 0x9a, 0x78, 0x59, 0x52, 0xfe, 0x28, 0xc1, 0xfc, + 0x94, 0xc7, 0x8f, 0xa4, 0x49, 0xa4, 0xaf, 0x8b, 0x26, 0x49, 0x3c, 0x3e, 0x4d, 0x12, 0xbc, 0x9d, + 0x27, 0xc3, 0xb7, 0xf3, 0x7f, 0x4a, 0x50, 0x0c, 0x45, 0x1e, 0xba, 0x8f, 0x9a, 0x66, 0x0b, 0x8b, + 0xfb, 0x32, 0xfb, 0xa6, 0xa6, 0xe9, 0x99, 0x1d, 0x71, 0x2b, 0xa6, 0x9f, 0x14, 0xe5, 0xc5, 0xd2, + 0x9c, 0x88, 0x94, 0xde, 0x55, 0x9b, 0xa7, 0x3e, 0xe2, 0xaa, 0x2d, 0xcc, 0x9a, 0x61, 0xfd, 0x86, + 0xcd, 0xca, 0x53, 0x18, 0x5e, 0x40, 0xaf, 0x40, 0x8e, 0x3d, 0x7a, 0x68, 0xa6, 0xe5, 0x08, 0x06, + 0x3d, 0x90, 0xde, 0xf1, 0x97, 0x11, 0xe1, 0xaa, 0xcc, 0xf6, 0x81, 0xe5, 0xa8, 0x59, 0x4b, 0x7c, + 0x05, 0x92, 0xae, 0x5c, 0x28, 0xe9, 0xba, 0x00, 0x39, 0x3a, 0x7c, 0xc7, 0xd2, 0x9b, 0xb8, 0x04, + 0x6c, 0xa4, 0x7e, 0x85, 0xf2, 0x87, 0x04, 0xcc, 0x4d, 0x04, 0xce, 0xc8, 0xc9, 0xbb, 0x07, 0x2b, + 0x11, 0x60, 0x81, 0x1e, 0xce, 0x20, 0x2b, 0x00, 0x1d, 0xdd, 0xd1, 0x3e, 0xd0, 0x07, 0x04, 0xb7, + 0x84, 0x55, 0x02, 0x35, 0x68, 0x19, 0xb2, 0xb4, 0x34, 0x74, 0x70, 0x4b, 0x10, 0x52, 0x5e, 0x19, + 0xed, 0x40, 0x06, 0x1f, 0xe3, 0x01, 0x71, 0x4a, 0x33, 0x6c, 0xe1, 0xcf, 0x45, 0x78, 0x58, 0xda, + 0xbe, 0x59, 0xa2, 0xcb, 0xfd, 0xf7, 0xcf, 0x56, 0x65, 0x0e, 0x7f, 0xd6, 0xec, 0x1b, 0x04, 0xf7, + 0x2d, 0x32, 0x56, 0x85, 0x82, 0xb0, 0x19, 0xb2, 0x13, 0x66, 0x40, 0xe7, 0x60, 0x86, 0x9d, 0x46, + 0xa3, 0xc5, 0x32, 0x84, 0x9c, 0x9a, 0xa1, 0xc5, 0x9d, 0x16, 0xa3, 0x4d, 0x0b, 0x2e, 0x07, 0x42, + 0xad, 0xcd, 0x8e, 0xcb, 0x58, 0x2d, 0xf6, 0x71, 0xdf, 0x32, 0xcd, 0x9e, 0xc6, 0x7d, 0x58, 0x19, + 0x66, 0xc3, 0x09, 0x04, 0xba, 0x04, 0x45, 0x1b, 0x13, 0xdd, 0x18, 0x68, 0xa1, 0x3b, 0x42, 0x81, + 0x57, 0x72, 0x9f, 0xb1, 0x9b, 0xca, 0x4a, 0x72, 0x42, 0xd0, 0x56, 0x6f, 0xc3, 0x52, 0x64, 0xfe, + 0x80, 0x5e, 0x86, 0x9c, 0x9f, 0x7b, 0x48, 0xcc, 0x0e, 0x27, 0xf1, 0x51, 0x3e, 0x58, 0x39, 0x84, + 0xa5, 0xc8, 0x04, 0x02, 0xbd, 0x0e, 0x19, 0x1b, 0x3b, 0xc3, 0x1e, 0xa7, 0x9c, 0x66, 0x6f, 0x5e, + 0x3e, 0x3d, 0xf3, 0x18, 0xf6, 0x88, 0x2a, 0x84, 0x94, 0x1b, 0x70, 0x3e, 0x36, 0x83, 0xf0, 0x59, + 0x25, 0x29, 0xc0, 0x2a, 0x29, 0xbf, 0x93, 0x60, 0x39, 0x3e, 0x2b, 0x40, 0x9b, 0x13, 0x03, 0xba, + 0xf6, 0x90, 0x39, 0x45, 0x60, 0x54, 0xf4, 0xda, 0x65, 0xe3, 0x36, 0x26, 0xcd, 0x2e, 0x4f, 0x4f, + 0xb8, 0xb7, 0x28, 0xaa, 0x45, 0x51, 0xcb, 0x64, 0x1c, 0x0e, 0x7b, 0x0f, 0x37, 0x89, 0xc6, 0x17, + 0xd5, 0x61, 0x57, 0x9f, 0x1c, 0x85, 0xd1, 0xda, 0x3a, 0xaf, 0x54, 0xae, 0xc3, 0xb9, 0x98, 0x3c, + 0x63, 0xfa, 0x7e, 0xa6, 0xdc, 0xa7, 0xe0, 0xc8, 0xe4, 0x01, 0xbd, 0x09, 0x19, 0x87, 0xe8, 0x64, + 0xe8, 0x88, 0x99, 0x5d, 0x39, 0x35, 0xef, 0xa8, 0x33, 0xb8, 0x2a, 0xc4, 0x94, 0xd7, 0x00, 0x4d, + 0x67, 0x11, 0x11, 0x77, 0x4c, 0x29, 0xea, 0x8e, 0x79, 0x04, 0x4f, 0x9c, 0x90, 0x2f, 0xa0, 0xca, + 0xc4, 0xe0, 0xae, 0x3f, 0x54, 0xba, 0x31, 0x31, 0xc0, 0x7f, 0x24, 0x60, 0x29, 0x32, 0x6d, 0x08, + 0x1c, 0x5f, 0xe9, 0xab, 0x1e, 0xdf, 0xd7, 0x01, 0xc8, 0x48, 0xe3, 0x2b, 0xed, 0x86, 0x81, 0xa8, + 0xbb, 0xd2, 0x08, 0x37, 0x99, 0x27, 0xa3, 0x1b, 0x23, 0x47, 0xc4, 0x97, 0x83, 0x1a, 0xc1, 0x7b, + 0xfd, 0x90, 0x85, 0x08, 0x47, 0x5c, 0x79, 0x1f, 0x3a, 0x98, 0xf8, 0x04, 0x00, 0xaf, 0x76, 0xd0, + 0x7d, 0x38, 0x37, 0x11, 0xea, 0x3c, 0xdd, 0xa9, 0x87, 0x8e, 0x78, 0x4b, 0xe1, 0x88, 0xe7, 0xea, + 0x0e, 0x86, 0xab, 0x74, 0x38, 0x5c, 0xdd, 0x07, 0xf0, 0x2f, 0xf8, 0xf4, 0xbc, 0xd9, 0xe6, 0x70, + 0xd0, 0x62, 0x4b, 0x98, 0x56, 0x79, 0x01, 0xbd, 0x08, 0x69, 0xba, 0x13, 0x5c, 0x53, 0x45, 0x38, + 0x0c, 0xba, 0xa4, 0x01, 0x86, 0x80, 0xc3, 0x95, 0xf7, 0xdc, 0xdd, 0x16, 0xe4, 0x5a, 0x63, 0xfa, + 0x78, 0x23, 0xdc, 0x87, 0x12, 0x4f, 0xdb, 0x46, 0xf7, 0xf5, 0x43, 0x48, 0xb3, 0xe5, 0xa7, 0x61, + 0x83, 0x51, 0xfd, 0x22, 0x6b, 0xa3, 0xdf, 0xe8, 0xfb, 0x00, 0x3a, 0x21, 0xb6, 0x71, 0x34, 0xf4, + 0x7b, 0x58, 0x8b, 0xd9, 0x3f, 0x65, 0x17, 0xb8, 0x79, 0x41, 0x6c, 0xa4, 0x45, 0x5f, 0x36, 0xb0, + 0x99, 0x02, 0x1a, 0x95, 0x7d, 0x98, 0x0d, 0xcb, 0x9e, 0x96, 0xfa, 0xe4, 0xdc, 0x18, 0xed, 0x45, + 0xf8, 0x24, 0x7f, 0xd0, 0x60, 0x05, 0xe5, 0x47, 0x09, 0x28, 0x04, 0x77, 0xdf, 0xff, 0x60, 0x14, + 0x55, 0x7e, 0x2a, 0x41, 0xd6, 0x9b, 0x7f, 0xf8, 0x59, 0x23, 0xf4, 0x1e, 0xc4, 0xcd, 0x97, 0x08, + 0xbe, 0x45, 0xf0, 0xd7, 0x9f, 0xa4, 0xf7, 0xfa, 0xf3, 0x2d, 0x2f, 0x20, 0xc4, 0x12, 0x15, 0x41, + 0x6b, 0x8b, 0x8d, 0xe5, 0x06, 0xa8, 0xd7, 0x20, 0xe7, 0x9d, 0x61, 0x9a, 0xff, 0xbb, 0x04, 0x90, + 0x24, 0x0e, 0x92, 0x20, 0x7e, 0x16, 0x21, 0x6d, 0x99, 0x1f, 0x88, 0x97, 0x8e, 0xa4, 0xca, 0x0b, + 0x8a, 0x03, 0x73, 0x13, 0x0e, 0xc0, 0x07, 0x26, 0x02, 0x40, 0xa4, 0x40, 0xd1, 0x1a, 0x1e, 0x69, + 0x0f, 0xf0, 0x58, 0xbc, 0x7b, 0xf0, 0xe1, 0xe7, 0xad, 0xe1, 0xd1, 0x6d, 0x3c, 0xe6, 0x0f, 0x1f, + 0x6b, 0x50, 0x70, 0x31, 0x6c, 0x8b, 0xf3, 0x35, 0x05, 0x0e, 0x69, 0xf0, 0x47, 0x2b, 0x49, 0x4e, + 0x28, 0x3f, 0x97, 0x20, 0xeb, 0x9e, 0x12, 0xf4, 0x26, 0xe4, 0x3c, 0x5f, 0x23, 0x72, 0xe7, 0x27, + 0x4e, 0xf0, 0x52, 0x62, 0xf2, 0xbe, 0x0c, 0xda, 0x74, 0x5f, 0x5f, 0x8d, 0x96, 0xd6, 0xee, 0xe9, + 0x1d, 0xf1, 0x88, 0xb6, 0x12, 0xe1, 0x8e, 0x98, 0xc7, 0xde, 0xb9, 0xb5, 0xd5, 0xd3, 0x3b, 0x6a, + 0x9e, 0x09, 0xed, 0xb4, 0x68, 0x41, 0x64, 0x25, 0x5f, 0x4a, 0x20, 0x4f, 0x9e, 0xe2, 0xaf, 0x3e, + 0xbe, 0xe9, 0xe8, 0x95, 0x8c, 0x88, 0x5e, 0x68, 0x03, 0x16, 0x3c, 0x84, 0xe6, 0x18, 0x9d, 0x81, + 0x4e, 0x86, 0x36, 0x16, 0x54, 0x23, 0xf2, 0x9a, 0xea, 0x6e, 0xcb, 0xf4, 0xbc, 0xd3, 0x8f, 0x3b, + 0xef, 0x0f, 0x13, 0x90, 0x0f, 0x30, 0x9f, 0xe8, 0x9b, 0x01, 0x17, 0x35, 0x1b, 0x15, 0x33, 0x02, + 0x60, 0xff, 0x45, 0x32, 0x6c, 0xa9, 0xc4, 0x63, 0x58, 0x2a, 0x8e, 0x63, 0x76, 0xa9, 0xd4, 0xd4, + 0x23, 0x53, 0xa9, 0xcf, 0x02, 0x22, 0x26, 0xd1, 0x7b, 0xda, 0xb1, 0x49, 0x8c, 0x41, 0x47, 0xe3, + 0x1b, 0x9b, 0x7b, 0x14, 0x99, 0xb5, 0x1c, 0xb2, 0x86, 0x1a, 0x3b, 0x0c, 0x3f, 0x96, 0x20, 0xeb, + 0xd1, 0x4c, 0x8f, 0xfa, 0x52, 0x79, 0x16, 0x32, 0x22, 0x13, 0xe3, 0x4f, 0x95, 0xa2, 0x14, 0xc9, + 0x19, 0x2f, 0x43, 0xb6, 0x8f, 0x89, 0xce, 0xdc, 0x23, 0x8f, 0x77, 0x5e, 0xf9, 0xda, 0x11, 0xe4, + 0x03, 0x8f, 0xbd, 0xe8, 0x3c, 0x2c, 0x55, 0xb6, 0xab, 0x95, 0xdb, 0x5a, 0xe3, 0x1d, 0xad, 0x71, + 0xaf, 0x56, 0xd5, 0xee, 0xee, 0xdf, 0xde, 0x3f, 0xf8, 0xf6, 0xbe, 0x7c, 0x66, 0xba, 0x49, 0xad, + 0xb2, 0xb2, 0x2c, 0xa1, 0x73, 0xb0, 0x10, 0x6e, 0xe2, 0x0d, 0x89, 0xe5, 0xd4, 0xcf, 0x7e, 0xb3, + 0x72, 0xe6, 0xda, 0x97, 0x12, 0x2c, 0x44, 0xe4, 0xbc, 0xe8, 0x22, 0x3c, 0x79, 0xb0, 0xb5, 0x55, + 0x55, 0xb5, 0xfa, 0x7e, 0xb9, 0x56, 0xdf, 0x3e, 0x68, 0x68, 0x6a, 0xb5, 0x7e, 0x77, 0xaf, 0x11, + 0xe8, 0x74, 0x0d, 0x2e, 0x44, 0x43, 0xca, 0x95, 0x4a, 0xb5, 0xd6, 0x90, 0x25, 0xb4, 0x0a, 0x4f, + 0xc4, 0x20, 0x36, 0x0f, 0xd4, 0x86, 0x9c, 0x88, 0x57, 0xa1, 0x56, 0x77, 0xab, 0x95, 0x86, 0x9c, + 0x44, 0x57, 0xe0, 0xd2, 0x49, 0x08, 0x6d, 0xeb, 0x40, 0xbd, 0x53, 0x6e, 0xc8, 0xa9, 0x53, 0x81, + 0xf5, 0xea, 0xfe, 0xad, 0xaa, 0x2a, 0xa7, 0xc5, 0xbc, 0x7f, 0x9d, 0x80, 0x52, 0x5c, 0x6a, 0x4d, + 0x75, 0x95, 0x6b, 0xb5, 0xbd, 0x7b, 0xbe, 0xae, 0xca, 0xf6, 0xdd, 0xfd, 0xdb, 0xd3, 0x26, 0x78, + 0x1a, 0x94, 0x93, 0x80, 0x9e, 0x21, 0x2e, 0xc3, 0xc5, 0x13, 0x71, 0xc2, 0x1c, 0xa7, 0xc0, 0xd4, + 0x6a, 0x43, 0xbd, 0x27, 0x27, 0xd1, 0x3a, 0x5c, 0x3b, 0x15, 0xe6, 0xb5, 0xc9, 0x29, 0xb4, 0x01, + 0xd7, 0x4f, 0xc6, 0x73, 0x03, 0xb9, 0x02, 0xae, 0x89, 0x3e, 0x92, 0x60, 0x29, 0x32, 0x47, 0x47, + 0x97, 0x60, 0xb5, 0xa6, 0x1e, 0x54, 0xaa, 0xf5, 0xba, 0x56, 0x53, 0x0f, 0x6a, 0x07, 0xf5, 0xf2, + 0x9e, 0x56, 0x6f, 0x94, 0x1b, 0x77, 0xeb, 0x01, 0xdb, 0x28, 0xb0, 0x12, 0x07, 0xf2, 0xec, 0x72, + 0x02, 0x46, 0xec, 0x00, 0x77, 0x9f, 0xfe, 0x4a, 0x82, 0xf3, 0xb1, 0x39, 0x39, 0xba, 0x0a, 0x4f, + 0x1d, 0x56, 0xd5, 0x9d, 0xad, 0x7b, 0xda, 0xe1, 0x41, 0xa3, 0xaa, 0x55, 0xdf, 0x69, 0x54, 0xf7, + 0xeb, 0x3b, 0x07, 0xfb, 0xd3, 0xa3, 0xba, 0x02, 0x97, 0x4e, 0x44, 0x7a, 0x43, 0x3b, 0x0d, 0x38, + 0x31, 0xbe, 0x9f, 0x48, 0x30, 0x37, 0xe1, 0x0b, 0xd1, 0x05, 0x28, 0xdd, 0xd9, 0xa9, 0x6f, 0x56, + 0xb7, 0xcb, 0x87, 0x3b, 0x07, 0xea, 0xe4, 0x99, 0xbd, 0x04, 0xab, 0x53, 0xad, 0xb7, 0xee, 0xd6, + 0xf6, 0x76, 0x2a, 0xe5, 0x46, 0x95, 0x75, 0x2a, 0x4b, 0x74, 0x62, 0x53, 0xa0, 0xbd, 0x9d, 0xb7, + 0xb6, 0x1b, 0x5a, 0x65, 0x6f, 0xa7, 0xba, 0xdf, 0xd0, 0xca, 0x8d, 0x46, 0xd9, 0x3f, 0xce, 0x9b, + 0xb7, 0x3f, 0xf9, 0x7c, 0x45, 0xfa, 0xf4, 0xf3, 0x15, 0xe9, 0x6f, 0x9f, 0xaf, 0x48, 0x1f, 0x7f, + 0xb1, 0x72, 0xe6, 0xd3, 0x2f, 0x56, 0xce, 0xfc, 0xe5, 0x8b, 0x95, 0x33, 0xf7, 0x6f, 0x74, 0x0c, + 0xd2, 0x1d, 0x1e, 0x51, 0x2f, 0xbc, 0xe1, 0xff, 0x73, 0xea, 0xfd, 0xac, 0x6a, 0x19, 0x1b, 0x93, + 0x7f, 0xae, 0x1e, 0x65, 0x98, 0x5b, 0x7d, 0xfe, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xed, 0x3c, + 0x51, 0xb6, 0xd4, 0x2a, 0x00, 0x00, } func (m *Request) Marshal() (dAtA []byte, err error) { @@ -5690,6 +5721,30 @@ func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.DefaultLane) > 0 { + i -= len(m.DefaultLane) + copy(dAtA[i:], m.DefaultLane) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DefaultLane))) + i-- + dAtA[i] = 0x3a + } + if len(m.LanePriorities) > 0 { + for k := range m.LanePriorities { + v := m.LanePriorities[k] + baseI := i + i = encodeVarintTypes(dAtA, i, uint64(v)) + i-- + dAtA[i] = 0x10 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintTypes(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } if len(m.LastBlockAppHash) > 0 { i -= len(m.LastBlockAppHash) copy(dAtA[i:], m.LastBlockAppHash) @@ -5885,6 +5940,13 @@ func (m *CheckTxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.LaneId) > 0 { + i -= len(m.LaneId) + copy(dAtA[i:], m.LaneId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LaneId))) + i-- + dAtA[i] = 0x62 + } if len(m.Codespace) > 0 { i -= len(m.Codespace) copy(dAtA[i:], m.Codespace) @@ -7770,6 +7832,18 @@ func (m *InfoResponse) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if len(m.LanePriorities) > 0 { + for k, v := range m.LanePriorities { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + sovTypes(uint64(v)) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.DefaultLane) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -7875,6 +7949,10 @@ func (m *CheckTxResponse) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.LaneId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -12567,6 +12645,151 @@ func (m *InfoResponse) Unmarshal(dAtA []byte) error { m.LastBlockAppHash = []byte{} } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LanePriorities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LanePriorities == nil { + m.LanePriorities = make(map[string]uint32) + } + var mapkey string + var mapvalue uint32 + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthTypes + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvalue |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LanePriorities[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultLane", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultLane = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -13299,6 +13522,38 @@ func (m *CheckTxResponse) Unmarshal(dAtA []byte) error { } m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LaneId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LaneId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/docs/explanation/core/metrics.md b/docs/explanation/core/metrics.md index be1e30345c..ed3335a1ae 100644 --- a/docs/explanation/core/metrics.md +++ b/docs/explanation/core/metrics.md @@ -58,6 +58,8 @@ The following metrics are available: | p2p\_peer\_pending\_send\_bytes | Gauge | peer\_id | Number of pending bytes to be sent to a given peer | | p2p\_num\_txs | Gauge | peer\_id | Number of transactions submitted by each peer\_id | | p2p\_pending\_send\_bytes | Gauge | peer\_id | Amount of data pending to be sent to peer | +| mempool\_lane\_size | Counter | lane | Number of uncommitted transactions per lane | +| mempool\_lane\_bytes | Counter | lane | Number of used bytes per lane | | mempool\_size | Gauge | | Number of uncommitted transactions | | mempool\_tx\_size\_bytes | Histogram | | Transaction sizes in bytes | | mempool\_evicted\_txs | Counter | | Number of transactions that make it into the mempool and were later evicted for being invalid | diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index d9633301b3..c83385e84e 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -73,6 +73,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make Mempool mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + nil, state.LastBlockHeight, mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state))) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 0a1dfd3676..d764cb6ac5 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -429,12 +429,24 @@ func subscribeToVoterBuffered(cs *State, addr []byte) <-chan cmtpubsub.Message { return ch } +// ------------------------------------------------------------------------------- +// application + +func fetchAppInfo(t *testing.T, app abci.Application) (*abci.InfoResponse, *mempl.LanesInfo) { + t.Helper() + resp, err := app.Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + require.NoError(t, err) + return resp, lanesInfo +} + // ------------------------------------------------------------------------------- // consensus states func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { config := test.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app) + return newStateWithConfig(config, state, pv, app, nil) } func newStateWithConfig( @@ -442,9 +454,10 @@ func newStateWithConfig( state sm.State, pv types.PrivValidator, app abci.Application, + laneInfo *mempl.LanesInfo, ) *State { blockDB := dbm.NewMemDB() - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) + return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB, laneInfo) } func newStateWithConfigAndBlockStore( @@ -453,6 +466,7 @@ func newStateWithConfigAndBlockStore( pv types.PrivValidator, app abci.Application, blockDB dbm.DB, + laneInfo *mempl.LanesInfo, ) *State { // Get BlockStore blockStore := store.NewBlockStore(blockDB) @@ -468,6 +482,7 @@ func newStateWithConfigAndBlockStore( // Make Mempool mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + laneInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -837,11 +852,12 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc } ensureDir(filepath.Dir(thisConfig.Consensus.WalFile())) // dir for wal app := appFunc() + _, lanesInfo := fetchAppInfo(t, app) vals := types.TM2PB.ValidatorUpdates(state.Validators) _, err := app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB) + css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, stateDB, lanesInfo) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } @@ -895,6 +911,7 @@ func randConsensusNetWithPeers( } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + _, lanesInfo := fetchAppInfo(t, app) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.Application); ok { // simulate handshake, receive app version. If don't do this, replay test will fail @@ -903,7 +920,7 @@ func randConsensusNetWithPeers( _, err = app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) - css[i] = newStateWithConfig(thisConfig, state, privVal, app) + css[i] = newStateWithConfig(thisConfig, state, privVal, app, lanesInfo) css[i].SetTimeoutTicker(tickerFunc()) css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 9024c48469..0e5648f322 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -14,7 +14,6 @@ import ( "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" mempl "github.com/cometbft/cometbft/mempool" - "github.com/cometbft/cometbft/proxy" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" ) @@ -30,10 +29,9 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, err := app.Info(context.Background(), proxy.InfoRequest) - require.NoError(t, err) + resp, lanesInfo := fetchAppInfo(t, app) state.AppHash = resp.LastBlockAppHash - cs := newStateWithConfig(config, state, privVals[0], app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) @@ -54,10 +52,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, err := app.Info(context.Background(), proxy.InfoRequest) - require.NoError(t, err) + resp, lanesInfo := fetchAppInfo(t, app) state.AppHash = resp.LastBlockAppHash - cs := newStateWithConfig(config, state, privVals[0], app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() @@ -74,7 +71,9 @@ func TestMempoolProgressInHigherRound(t *testing.T) { defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, nil) - cs := newStateWithConfig(config, state, privVals[0], kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(t, app) + cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) @@ -121,7 +120,9 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, nil) blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], kvstore.NewInMemoryApplication(), blockDB) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(t, app) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) newBlockEventsCh := subscribe(cs.eventBus, types.EventQueryNewBlockEvents) @@ -147,7 +148,8 @@ func TestMempoolRmBadTx(t *testing.T) { app := kvstore.NewInMemoryApplication() blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) + _, lanesInfo := fetchAppInfo(t, app) + cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go index 13f7994168..3fb198c873 100644 --- a/internal/consensus/pbts_test.go +++ b/internal/consensus/pbts_test.go @@ -112,7 +112,7 @@ func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfigurat consensusParams.Feature.PbtsEnableHeight = 1 state, privVals := randGenesisStateWithTime(validators, consensusParams, tc.genesisTime) - cs := newStateWithConfig(cfg, state, privVals[0], kvstore.NewInMemoryApplication()) + cs := newStateWithConfig(cfg, state, privVals[0], kvstore.NewInMemoryApplication(), nil) vss := make([]*validatorStub, validators) for i := 0; i < validators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i)) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index bd529d2acc..82c041e790 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -165,6 +165,7 @@ func TestReactorWithEvidence(t *testing.T) { // Make Mempool mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, + nil, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -446,6 +447,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { func TestReactorVotingPowerChange(t *testing.T) { nVals := 4 logger := log.TestingLogger() + css, cleanup := randConsensusNet( t, nVals, diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 871679bb80..b4737d74c8 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -238,12 +238,10 @@ func (h *Handshaker) NBlocks() int { return h.nBlocks } -// TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error { - // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().Info(ctx, proxy.InfoRequest) - if err != nil { - return fmt.Errorf("error calling Info: %v", err) +// Handshake receives information from the app via ABCI Info on the query conn that is passed to the function. +func (h *Handshaker) Handshake(ctx context.Context, res *abci.InfoResponse, proxyApp proxy.AppConns) error { + if res == nil { + return errors.New("empty ABCI Info response passed to handshake") } blockHeight := res.LastBlockHeight @@ -265,7 +263,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err } // Replay blocks up to the latest in the blockstore. - appHash, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) + appHash, err := h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index b0033a259a..2a7dab26df 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -76,12 +76,15 @@ func startNewStateAndWaitForBlock( state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) privValidator, err := loadPrivValidator(consensusReplayConfig) require.NoError(t, err) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(t, app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, privValidator, - kvstore.NewInMemoryApplication(), + app, blockDB, + lanesInfo, ) cs.SetLogger(logger) @@ -183,12 +186,15 @@ LOOP: require.NoError(t, err) privValidator, err := loadPrivValidator(consensusReplayConfig) require.NoError(t, err) + app := kvstore.NewInMemoryApplication() + _, lanesInfo := fetchAppInfo(t, app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, privValidator, kvstore.NewInMemoryApplication(), blockDB, + lanesInfo, ) cs.SetLogger(logger) @@ -691,8 +697,10 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } }) + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) // perform the replay protocol to sync Tendermint and the application - err = handshaker.Handshake(context.Background(), proxyApp) + err = handshaker.Handshake(context.Background(), abciInfoResp, proxyApp) if expectError { require.Error(t, err) // finish the test early @@ -928,7 +936,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + if err = h.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Log(err) } }) @@ -952,7 +962,9 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { assert.Panics(t, func() { h := NewHandshaker(stateStore, state, store, genDoc) - if err = h.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + if err = h.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Log(err) } }) @@ -1248,7 +1260,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { t.Error(err) } }) - if err := handshaker.Handshake(context.Background(), proxyApp); err != nil { + abciInfoResp, err2 := proxyApp.Query().Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err2) + if err := handshaker.Handshake(context.Background(), abciInfoResp, proxyApp); err != nil { t.Fatalf("Error on abci handshake: %v", err) } var err error diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 2c7c132294..19d9881804 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2524,6 +2524,7 @@ func TestVoteExtensionEnableHeight(t *testing.T) { } m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithAppWithHeight(numValidators, m, testCase.enableHeight) height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID cs1.state.ConsensusParams.Feature.VoteExtensionsEnableHeight = testCase.enableHeight diff --git a/mempool/bench_test.go b/mempool/bench_test.go index ac76c07126..db14228dbe 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -169,7 +169,7 @@ func BenchmarkIterator(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - iter := mp.NewIterator(context.TODO()) + iter := NewBlockingIterator(context.TODO(), mp, "bench") b.StartTimer() // Iterate until all txs in the mempool are accessed. @@ -203,7 +203,7 @@ func BenchmarkConcurrentkIterators(b *testing.B) { // Create concurrent iterators. iters := make([]Iterator, numIterators) for j := 0; j < numIterators; j++ { - iters[j] = mp.NewIterator(context.TODO()) + iters[j] = NewBlockingIterator(context.TODO(), mp, "bench") } wg := sync.WaitGroup{} wg.Add(numIterators) diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index ee1afcad95..7e3b4014fd 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "fmt" + "slices" "sync/atomic" "time" @@ -19,6 +20,8 @@ import ( "github.com/cometbft/cometbft/types" ) +const defaultLane = "default" + // CListMempool is an ordered in-memory pool for transactions before they are // proposed in a consensus round. Transaction validity is checked using the // CheckTx abci message before the transaction is added to the pool. The @@ -44,11 +47,22 @@ type CListMempool struct { // Keeps track of the rechecking process. recheck *recheck - // Data in `txs` and `txsMap` must to be kept in sync and updated atomically. - txsMtx cmtsync.RWMutex - txs *clist.CList // concurrent linked-list of valid txs - txsMap map[types.TxKey]*clist.CElement // for quick access to txs - txsBytes int64 // total size of mempool, in bytes + // Data in the following variables must to be kept in sync and updated atomically. + txsMtx cmtsync.RWMutex + lanes map[LaneID]*clist.CList // each lane is a linked-list of (valid) txs + txsMap map[types.TxKey]*clist.CElement // for quick access to the mempool entry of a given tx + laneBytes map[LaneID]int64 // number of bytes per lane (for metrics) + txsBytes int64 // total size of mempool, in bytes + numTxs int64 // total number of txs in the mempool + + addTxChMtx cmtsync.RWMutex // Protects the fields below + addTxCh chan struct{} // Blocks until the next TX is added + addTxSeq int64 // Helps detect is new TXs have been added to a given lane + addTxLaneSeqs map[LaneID]int64 // Sequence of the last TX added to a given lane + + // Immutable fields, only set during initialization. + defaultLane LaneID + sortedLanes []lane // lanes sorted by priority, in descending order // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -63,25 +77,68 @@ var _ Mempool = &CListMempool{} // CListMempoolOption sets an optional parameter on the mempool. type CListMempoolOption func(*CListMempool) +// A lane is defined by its ID and priority. +// A laneID is a string uinquely identifying a lane. +// Multiple lanes can have the same priority. +type LaneID string + +// The priority of a lane. +type LanePriority uint32 + +// Lane corresponds to a transaction class as defined by the application. +// A lane is identified by a string name and has priority level. +// Different lanes can have the same priority. +type lane struct { + id LaneID + priority LanePriority +} + // NewCListMempool returns a new mempool with the given configuration and // connection to an application. func NewCListMempool( cfg *config.MempoolConfig, proxyAppConn proxy.AppConnMempool, + lanesInfo *LanesInfo, height int64, options ...CListMempoolOption, ) *CListMempool { mp := &CListMempool{ - config: cfg, - proxyAppConn: proxyAppConn, - txs: clist.New(), - txsMap: make(map[types.TxKey]*clist.CElement), - recheck: newRecheck(), - logger: log.NewNopLogger(), - metrics: NopMetrics(), + config: cfg, + proxyAppConn: proxyAppConn, + txsMap: make(map[types.TxKey]*clist.CElement), + laneBytes: make(map[LaneID]int64), + logger: log.NewNopLogger(), + metrics: NopMetrics(), + addTxCh: make(chan struct{}), + addTxLaneSeqs: make(map[LaneID]int64), } mp.height.Store(height) + // Initialize lanes + if lanesInfo == nil || len(lanesInfo.lanes) == 0 { + // The only lane will be "default" with priority 1. + lanesInfo = &LanesInfo{lanes: map[LaneID]LanePriority{defaultLane: 1}, defaultLane: defaultLane} + } + numLanes := len(lanesInfo.lanes) + mp.lanes = make(map[LaneID]*clist.CList, numLanes) + mp.defaultLane = lanesInfo.defaultLane + mp.sortedLanes = make([]lane, 0, numLanes) + for id, priority := range lanesInfo.lanes { + mp.lanes[id] = clist.New() + mp.sortedLanes = append(mp.sortedLanes, lane{id: id, priority: priority}) + } + slices.SortStableFunc(mp.sortedLanes, func(i, j lane) int { + if i.priority > j.priority { + return -1 + } + if i.priority < j.priority { + return 1 + } + return 0 + }) + + mp.recheck = newRecheck(mp) + if cfg.CacheSize > 0 { mp.cache = NewLRUTxCache(cfg.CacheSize) } else { @@ -112,15 +169,16 @@ func (mem *CListMempool) tryRemoveFromCache(tx types.Tx) { } } -func (mem *CListMempool) removeAllTxs() { +func (mem *CListMempool) removeAllTxs(lane LaneID) { mem.txsMtx.Lock() defer mem.txsMtx.Unlock() - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) + for e := mem.lanes[lane].Front(); e != nil; e = e.Next() { + mem.lanes[lane].Remove(e) e.DetachPrev() } mem.txsMap = make(map[types.TxKey]*clist.CElement) + delete(mem.laneBytes, lane) mem.txsBytes = 0 } @@ -193,9 +251,13 @@ func (mem *CListMempool) PreUpdate() { } } +// Size returns the total number of transactions in the mempool (that is, all lanes). // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) Size() int { - return mem.txs.Len() + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + return int(mem.numTxs) } // Safe for concurrent use by multiple goroutines. @@ -206,6 +268,23 @@ func (mem *CListMempool) SizeBytes() int64 { return mem.txsBytes } +// LaneSizes returns, the number of transactions in the given lane and the total +// number of bytes used by all transactions in the lane. +// +// Safe for concurrent use by multiple goroutines. +func (mem *CListMempool) LaneSizes(lane LaneID) (numTxs int, bytes int64) { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + bytes = mem.laneBytes[lane] + + txs, ok := mem.lanes[lane] + if !ok { + panic(ErrLaneNotFound{laneID: lane}) + } + return txs.Len(), bytes +} + // Lock() must be help by the caller during execution. func (mem *CListMempool) FlushAppConn() error { err := mem.proxyAppConn.Flush(context.TODO()) @@ -221,8 +300,13 @@ func (mem *CListMempool) Flush() { mem.updateMtx.Lock() defer mem.updateMtx.Unlock() + mem.txsBytes = 0 + mem.numTxs = 0 mem.cache.Reset() - mem.removeAllTxs() + + for lane := range mem.lanes { + mem.removeAllTxs(lane) + } } func (mem *CListMempool) Contains(txKey types.TxKey) bool { @@ -233,28 +317,6 @@ func (mem *CListMempool) Contains(txKey types.TxKey) bool { return ok } -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -// -// Deprecated: Use CListIterator instead. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -// -// Deprecated: Use CListIterator instead. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - // It blocks if we're waiting on Update() or Reap(). // Safe for concurrent use by multiple goroutines. func (mem *CListMempool) CheckTx(tx types.Tx, sender p2p.ID) (*abcicli.ReqRes, error) { @@ -351,9 +413,17 @@ func (mem *CListMempool) handleCheckTxResponse(tx types.Tx, sender p2p.ID) func( return ErrInvalidTx{Code: res.Code, Data: res.Data, Log: res.Log, Codespace: res.Codespace, Hash: tx.Hash()} } - // Check again that mempool isn't full, to reduce the chance of exceeding the limits. - if err := mem.isFull(len(tx)); err != nil { - mem.forceRemoveFromCache(tx) // mempool might have space later + // If the app returned a non-empty lane, use it; otherwise use the default lane. + lane := mem.defaultLane + if res.LaneId != "" { + if _, ok := mem.lanes[lane]; !ok { + panic(ErrLaneNotFound{laneID: lane}) + } + lane = LaneID(res.LaneId) + } + + if err := mem.isLaneFull(len(tx), lane); err != nil { + mem.forceRemoveFromCache(tx) // lane might have space later // use debug level to avoid spamming logs when traffic is high mem.logger.Debug(err.Error()) mem.metrics.RejectedTxs.Add(1) @@ -379,43 +449,63 @@ func (mem *CListMempool) handleCheckTxResponse(tx types.Tx, sender p2p.ID) func( } // Add tx to mempool and notify that new txs are available. - memTx := mempoolTx{ - height: mem.height.Load(), - gasWanted: res.GasWanted, - tx: tx, - } - mem.addTx(&memTx, sender) + mem.addTx(tx, res.GasWanted, sender, lane) mem.notifyTxsAvailable() - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) + mem.updateSizeMetrics(lane) return nil } } -// Called from handleCheckTxResponse only on valid txs. -// updateMtx is held when using the local ABCI client but not with an async client. -func (mem *CListMempool) addTx(memTx *mempoolTx, sender p2p.ID) { +// Called from: +// - handleCheckTxResponse (lock not held) if tx is valid +func (mem *CListMempool) addTx(tx types.Tx, gasWanted int64, sender p2p.ID, lane LaneID) { mem.txsMtx.Lock() defer mem.txsMtx.Unlock() - tx := memTx.tx + // Get lane's clist. + txs, ok := mem.lanes[lane] + if !ok { + panic(ErrLaneNotFound{laneID: lane}) + } + + // Increase sequence number. + mem.addTxChMtx.Lock() + defer mem.addTxChMtx.Unlock() + mem.addTxSeq++ + mem.addTxLaneSeqs[lane] = mem.addTxSeq // Add new transaction. + memTx := &mempoolTx{ + tx: tx, + height: mem.height.Load(), + gasWanted: gasWanted, + lane: lane, + seq: mem.addTxSeq, + } _ = memTx.addSender(sender) - e := mem.txs.PushBack(memTx) + e := txs.PushBack(memTx) + + // Update auxiliary variables. mem.txsMap[tx.Key()] = e mem.txsBytes += int64(len(tx)) + mem.numTxs++ + mem.laneBytes[lane] += int64(len(tx)) + // Notify iterators there's a new transaction. + close(mem.addTxCh) + mem.addTxCh = make(chan struct{}) + + // Update metrics. mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.logger.Debug( "Added transaction", "tx", log.NewLazySprintf("%X", tx.Hash()), + "lane", lane, "height", mem.height.Load(), - "total", mem.Size(), + "total", mem.numTxs, ) } @@ -432,12 +522,28 @@ func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { return ErrTxNotFound } - mem.txs.Remove(elem) + memTx := elem.Value.(*mempoolTx) + + label := string(memTx.lane) + mem.metrics.TxLifeSpan.With("lane", label).Observe(float64(memTx.timestamp.Sub(time.Now().UTC()))) + + // Remove tx from lane. + mem.lanes[memTx.lane].Remove(elem) elem.DetachPrev() + + // Update auxiliary variables. delete(mem.txsMap, txKey) - tx := elem.Value.(*mempoolTx).tx - mem.txsBytes -= int64(len(tx)) - mem.logger.Debug("removed transaction", "tx", tx.Hash(), "height", mem.height.Load(), "total", mem.Size()) + mem.txsBytes -= int64(len(memTx.tx)) + mem.numTxs-- + mem.laneBytes[memTx.lane] -= int64(len(memTx.tx)) + + mem.logger.Debug( + "Removed transaction", + "tx", memTx.tx.Hash(), + "lane", memTx.lane, + "height", mem.height.Load(), + "total", mem.numTxs, + ) return nil } @@ -460,6 +566,30 @@ func (mem *CListMempool) isFull(txSize int) error { return nil } +func (mem *CListMempool) isLaneFull(txSize int, lane LaneID) error { + laneTxs, laneBytes := mem.LaneSizes(lane) + + // The mempool is partitioned evenly across all lanes. + laneTxsCapacity := mem.config.Size / len(mem.sortedLanes) + laneBytesCapacity := mem.config.MaxTxsBytes / int64(len(mem.sortedLanes)) + + if laneTxs > laneTxsCapacity || int64(txSize)+laneBytes > laneBytesCapacity { + return ErrLaneIsFull{ + Lane: lane, + NumTxs: laneTxs, + MaxTxs: laneTxsCapacity, + Bytes: laneBytes, + MaxBytes: laneBytesCapacity, + } + } + + if mem.recheck.consideredFull() { + return ErrRecheckFull + } + + return nil +} + // handleRecheckTxResponse handles CheckTx responses for transactions in the mempool that need to be // revalidated after a mempool update. func (mem *CListMempool) handleRecheckTxResponse(tx types.Tx) func(res *abci.Response) error { @@ -497,9 +627,12 @@ func (mem *CListMempool) handleRecheckTxResponse(tx types.Tx) func(res *abci.Res } // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) mem.metrics.EvictedTxs.Add(1) + if elem, ok := mem.txsMap[tx.Key()]; ok { + mem.updateSizeMetrics(elem.Value.(*mempoolTx).lane) + } else { + mem.logger.Error("Cannot update metrics", "err", ErrTxNotFound) + } mem.tryRemoveFromCache(tx) if postCheckErr != nil { @@ -542,14 +675,17 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { // TODO: we will get a performance boost if we have a good estimate of avg // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) + // txs := make([]types.Tx, 0, cmtmath.MinInt(mem.Size(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.Size()) + iter := NewNonBlockingIterator(mem) + for { + memTx := iter.Next() + if memTx == nil { + break + } + txs = append(txs, memTx.Tx()) - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) + dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.Tx()}) // Check total size requirement if maxBytes > -1 && runningSize+dataSize > maxBytes { @@ -562,7 +698,7 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { // If maxGas is negative, skip this check. // Since newTotalGas < masGas, which // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted + newTotalGas := totalGas + memTx.GasWanted() if maxGas > -1 && newTotalGas > maxGas { return txs[:len(txs)-1] } @@ -577,13 +713,17 @@ func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { defer mem.updateMtx.RUnlock() if max < 0 { - max = mem.txs.Len() + max = mem.Size() } - txs := make([]types.Tx, 0, cmtmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) + txs := make([]types.Tx, 0, cmtmath.MinInt(mem.Size(), max)) + iter := NewNonBlockingIterator(mem) + for len(txs) <= max { + memTx := iter.Next() + if memTx == nil { + break + } + txs = append(txs, memTx.Tx()) } return txs } @@ -657,12 +797,23 @@ func (mem *CListMempool) Update( } // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) + for lane := range mem.lanes { + mem.updateSizeMetrics(lane) + } return nil } +// updateSizeMetrics updates the size-related metrics of a given lane. +func (mem *CListMempool) updateSizeMetrics(laneID LaneID) { + laneTxs, laneBytes := mem.LaneSizes(laneID) + label := string(laneID) + mem.metrics.LaneSize.With("lane", label).Set(float64(laneTxs)) + mem.metrics.LaneBytes.With("lane", label).Set(float64(laneBytes)) + mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) +} + // recheckTxs sends all transactions in the mempool to the app for re-validation. When the function // returns, all recheck responses from the app have been processed. func (mem *CListMempool) recheckTxs() { @@ -672,23 +823,28 @@ func (mem *CListMempool) recheckTxs() { return } - mem.recheck.init(mem.txs.Front(), mem.txs.Back()) + mem.recheck.init() - // NOTE: CheckTx for new transactions cannot be executed concurrently - // because this function has the lock (via Update and Lock). - for e := mem.txs.Front(); e != nil; e = e.Next() { - tx := e.Value.(*mempoolTx).tx + iter := NewNonBlockingIterator(mem) + for { + memTx := iter.Next() + if memTx == nil { + break + } + + // NOTE: handleCheckTxResponse may be called concurrently, but CheckTx cannot be executed concurrently + // because this function has the lock (via Update and Lock). mem.recheck.numPendingTxs.Add(1) // Send CheckTx request to the app to re-validate transaction. resReq, err := mem.proxyAppConn.CheckTxAsync(context.TODO(), &abci.CheckTxRequest{ - Tx: tx, + Tx: memTx.Tx(), Type: abci.CHECK_TX_TYPE_RECHECK, }) if err != nil { - panic(fmt.Errorf("(re-)CheckTx request for tx %s failed: %w", log.NewLazySprintf("%X", tx.Hash()), err)) + panic(fmt.Errorf("(re-)CheckTx request for tx %s failed: %w", log.NewLazySprintf("%X", memTx.Tx().Hash()), err)) } - resReq.SetCallback(mem.handleRecheckTxResponse(tx)) + resReq.SetCallback(mem.handleRecheckTxResponse(memTx.Tx())) } // Flush any pending asynchronous recheck requests to process. @@ -706,38 +862,48 @@ func (mem *CListMempool) recheckTxs() { if n := mem.recheck.numPendingTxs.Load(); n > 0 { mem.logger.Error("Not all txs were rechecked", "not-rechecked", n) } - mem.logger.Debug("Done rechecking txs", "height", mem.height.Load(), "num-txs", mem.Size()) + + mem.logger.Debug("Done rechecking", "height", mem.height.Load(), "num-txs", mem.Size()) } -// The cursor and end pointers define a dynamic list of transactions that could be rechecked. The -// end pointer is fixed. When a recheck response for a transaction is received, cursor will point to -// the entry in the mempool corresponding to that transaction, thus narrowing the list. Transactions -// corresponding to entries between the old and current positions of cursor will be ignored for -// rechecking. This is to guarantee that recheck responses are processed in the same sequential -// order as they appear in the mempool. +// When a recheck response for a transaction is received, cursor will point to +// the entry in the mempool corresponding to that transaction, advancing the +// cursor, thus narrowing the list of transactions to recheck. In case there are +// entries between the previous and the current positions of cursor, they will +// be ignored for rechecking. This is to guarantee that recheck responses are +// processed in the same sequential order as they appear in the mempool. type recheck struct { - cursor *clist.CElement // next expected recheck response - end *clist.CElement // last entry in the mempool to recheck - doneCh chan struct{} // to signal that rechecking has finished successfully (for async app connections) - numPendingTxs atomic.Int32 // number of transactions still pending to recheck - isRechecking atomic.Bool // true iff the rechecking process has begun and is not yet finished - recheckFull atomic.Bool // whether rechecking TXs cannot be completed before a new block is decided + iter *NonBlockingIterator + cursor Entry // next expected recheck response + doneCh chan struct{} // to signal that rechecking has finished successfully (for async app connections) + numPendingTxs atomic.Int32 // number of transactions still pending to recheck + isRechecking atomic.Bool // true iff the rechecking process has begun and is not yet finished + recheckFull atomic.Bool // whether rechecking TXs cannot be completed before a new block is decided + mem *CListMempool } -func newRecheck() *recheck { - return &recheck{ - doneCh: make(chan struct{}, 1), - } +func newRecheck(mp *CListMempool) *recheck { + r := recheck{} + r.iter = NewNonBlockingIterator(mp) + r.mem = mp + return &r } -func (rc *recheck) init(first, last *clist.CElement) { +func (rc *recheck) init() { if !rc.done() { panic("Having more than one rechecking process at a time is not possible.") } - rc.cursor = first - rc.end = last rc.numPendingTxs.Store(0) + rc.iter = NewNonBlockingIterator(rc.mem) + + rc.cursor = rc.iter.Next() + rc.doneCh = make(chan struct{}) + if rc.cursor == nil { + rc.setDone() + return + } rc.isRechecking.Store(true) + rc.recheckFull.Store(false) } // done returns true when there is no recheck response to process. @@ -751,42 +917,20 @@ func (rc *recheck) setDone() { rc.cursor = nil rc.recheckFull.Store(false) rc.isRechecking.Store(false) -} - -// setNextEntry sets cursor to the next entry in the list. If there is no next, cursor will be nil. -func (rc *recheck) setNextEntry() { - rc.cursor = rc.cursor.Next() -} - -// tryFinish will check if the cursor is at the end of the list and notify the channel that -// rechecking has finished. It returns true iff it's done rechecking. -func (rc *recheck) tryFinish() bool { - if rc.cursor == rc.end { - // Reached end of the list without finding a matching tx. - rc.setDone() - } - if rc.done() { - // Notify that recheck has finished. - select { - case rc.doneCh <- struct{}{}: - default: - } - return true - } - return false + close(rc.doneCh) // notify channel that recheck has finished } // findNextEntryMatching searches for the next transaction matching the given transaction, which // corresponds to the recheck response to be processed next. Then it checks if it has reached the -// end of the list, so it can finish rechecking. +// end of the list, so it can set recheck as finished. // // The goal is to guarantee that transactions are rechecked in the order in which they are in the // mempool. Transactions whose recheck response arrive late or don't arrive at all are skipped and // not rechecked. -func (rc *recheck) findNextEntryMatching(tx *types.Tx) bool { - found := false - for ; !rc.done(); rc.setNextEntry() { - expectedTx := rc.cursor.Value.(*mempoolTx).tx +func (rc *recheck) findNextEntryMatching(tx *types.Tx) (found bool) { + for rc.cursor != nil { + expectedTx := rc.cursor.Tx() + rc.cursor = rc.iter.Next() if bytes.Equal(*tx, expectedTx) { // Found an entry in the list of txs to recheck that matches tx. found = true @@ -795,9 +939,8 @@ func (rc *recheck) findNextEntryMatching(tx *types.Tx) bool { } } - if !rc.tryFinish() { - // Not finished yet; set the cursor for processing the next recheck response. - rc.setNextEntry() + if rc.cursor == nil { // reached end of list + rc.setDone() } return found } @@ -820,58 +963,3 @@ func (rc *recheck) setRecheckFull() bool { func (rc *recheck) consideredFull() bool { return rc.recheckFull.Load() } - -// CListIterator implements an Iterator that traverses the CList sequentially. When the current -// entry is removed from the mempool, the iterator starts from the beginning of the CList. When it -// reaches the end, it waits until a new entry is appended. -type CListIterator struct { - ctx context.Context - txs *clist.CList // to wait on and retrieve the first entry - cursor *clist.CElement // pointer to the current entry in the list -} - -func (mem *CListMempool) NewIterator(ctx context.Context) Iterator { - return &CListIterator{ - ctx: ctx, - txs: mem.txs, - } -} - -// WaitNextCh returns a channel to wait for the next available entry. The channel will be explicitly -// closed when the entry gets removed before it is added to the channel, or when reaching the end of -// the list. -// -// Unsafe for concurrent use by multiple goroutines. -func (iter *CListIterator) WaitNextCh() <-chan Entry { - ch := make(chan Entry) - // Spawn goroutine that waits for the next entry, saves it locally, and puts it in the channel. - go func() { - if iter.cursor == nil { - // We are at the beginning of the iteration or the saved entry got removed: wait until - // the list becomes not empty and select the first entry. - select { - case <-iter.txs.WaitChan(): - case <-iter.ctx.Done(): - close(ch) - return - } - // Note that Front can return nil. - iter.cursor = iter.txs.Front() - } else { - // Wait for the next entry after the current one. - select { - case <-iter.cursor.NextWaitChan(): - case <-iter.ctx.Done(): - close(ch) - return - } - // If the current entry is the last one or was removed, Next will return nil. - iter.cursor = iter.cursor.Next() - } - if iter.cursor != nil { - ch <- iter.cursor.Value.(Entry) - } - close(ch) - }() - return ch -} diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 88ffa6060f..51bc961f35 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -1,7 +1,6 @@ package mempool import ( - "bytes" "context" "encoding/binary" "errors" @@ -50,12 +49,25 @@ func newMempoolWithAppAndConfigMock( ) (*CListMempool, cleanupFunc) { appConnMem := client appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() + if err := appConnMem.Start(); err != nil { + panic(err) + } + + appConnQuery := client + appConnQuery.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "query")) + if err := appConnQuery.Start(); err != nil { + panic(err) + } + appInfoRes, err := appConnQuery.Info(context.TODO(), proxy.InfoRequest) if err != nil { panic(err) } - mp := NewCListMempool(cfg.Mempool, appConnMem, 0) + lanesInfo, err := BuildLanesInfo(appInfoRes.LanePriorities, appInfoRes.DefaultLane) + if err != nil { + panic(err) + } + mp := NewCListMempool(cfg.Mempool, appConnMem, lanesInfo, 0) mp.SetLogger(log.TestingLogger()) return mp, func() { os.RemoveAll(cfg.RootDir) } @@ -71,12 +83,24 @@ func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { func newMempoolWithAppAndConfig(cc proxy.ClientCreator, cfg *config.Config) (*CListMempool, cleanupFunc) { appConnMem, _ := cc.NewABCIMempoolClient() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { + if err := appConnMem.Start(); err != nil { panic(err) } - mp := NewCListMempool(cfg.Mempool, appConnMem, 0) + appConnQuery, _ := cc.NewABCIQueryClient() + appConnQuery.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "query")) + if err := appConnQuery.Start(); err != nil { + panic(err) + } + appInfoRes, err := appConnQuery.Info(context.TODO(), proxy.InfoRequest) + if err != nil { + panic(err) + } + lanesInfo, err := BuildLanesInfo(appInfoRes.LanePriorities, appInfoRes.DefaultLane) + if err != nil { + panic(err) + } + mp := NewCListMempool(cfg.Mempool, appConnMem, lanesInfo, 0) mp.SetLogger(*mempoolLogger("info")) return mp, func() { os.RemoveAll(cfg.RootDir) } @@ -136,21 +160,39 @@ func addRandomTxs(t *testing.T, mp Mempool, count int) []types.Tx { t.Helper() txs := NewRandomTxs(count, 20) callCheckTx(t, mp, txs) + require.Equal(t, count, len(txs)) return txs } +// addTxs adds to the mempool num transactions with sequential ids starting from +// first. func addTxs(tb testing.TB, mp Mempool, first, num int) []types.Tx { tb.Helper() txs := make([]types.Tx, 0, num) - for i := first; i < num; i++ { + for i := first; i < first+num; i++ { tx := kvstore.NewTxFromID(i) _, err := mp.CheckTx(tx, "") require.NoError(tb, err) txs = append(txs, tx) } + require.Equal(tb, num, len(txs)) return txs } +func waitTimeout(wg *sync.WaitGroup, timeout time.Duration, doneFunc func(), timeoutFunc func()) { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + doneFunc() + case <-time.After(timeout): + timeoutFunc() + } +} + func TestReapMaxBytesMaxGas(t *testing.T) { app := kvstore.NewInMemoryApplication() cc := proxy.NewLocalClientCreator(app) @@ -159,8 +201,9 @@ func TestReapMaxBytesMaxGas(t *testing.T) { // Ensure gas calculation behaves as expected addRandomTxs(t, mp, 1) - iter := mp.NewIterator(context.Background()) + iter := NewBlockingIterator(context.Background(), mp, t.Name()) tx0 := <-iter.WaitNextCh() + require.NotNil(t, tx0) require.Equal(t, tx0.GasWanted(), int64(1), "transactions gas was set incorrectly") // ensure each tx is 20 bytes long require.Len(t, tx0.Tx(), 20, "Tx is longer than 20 bytes") @@ -192,6 +235,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { } for tcIndex, tt := range tests { addRandomTxs(t, mp, tt.numTxsToCreate) + require.Equal(t, tt.numTxsToCreate, mp.Size()) got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) require.Len(t, got, tt.expectedNumTxs, "Got %d txs, expected %d, tc #%d", len(got), tt.expectedNumTxs, tcIndex) @@ -238,6 +282,36 @@ func TestMempoolFilters(t *testing.T) { } } +func TestMempoolAddTxLane(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + for i := 0; i < 100; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err) + rr.Wait() + + // Check that the lane stored in the mempool entry is the same as the + // one assigned by the application. + entry := mp.txsMap[types.Tx(tx).Key()].Value.(*mempoolTx) + require.Equal(t, kvstoreAssignLane(i), entry.lane, "id %x", tx) + } +} + +func kvstoreAssignLane(key int) LaneID { + lane := defaultLane // 3 + if key%11 == 0 { + lane = "foo" // 7 + } else if key%3 == 0 { + lane = "bar" // 1 + } + return LaneID(lane) +} + func TestMempoolUpdate(t *testing.T) { app := kvstore.NewInMemoryApplication() cc := proxy.NewLocalClientCreator(app) @@ -279,6 +353,23 @@ func TestMempoolUpdate(t *testing.T) { } } +func TestMempoolBuildLanesInfo(t *testing.T) { + emptyMap := make(map[string]uint32) + _, err := BuildLanesInfo(emptyMap, "") + require.NoError(t, err) + + _, err = BuildLanesInfo(emptyMap, "1") + + require.ErrorAs(t, err, &ErrEmptyLanesDefaultLaneSet{}) + + _, err = BuildLanesInfo(map[string]uint32{"1": 1}, "") + + require.ErrorAs(t, err, &ErrBadDefaultLaneNonEmptyLaneList{}) + + _, err = BuildLanesInfo(map[string]uint32{"1": 1, "2": 2, "3": 3, "4": 4}, "5") + require.ErrorAs(t, err, &ErrDefaultLaneNotInList{}) +} + // Test dropping CheckTx requests when rechecking transactions. It mocks an asynchronous connection // to the app. func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { @@ -286,6 +377,7 @@ func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { mockClient.On("Start").Return(nil) mockClient.On("SetLogger", mock.Anything) mockClient.On("Error").Return(nil).Times(4) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil) mp, cleanup := newMempoolWithAppMock(mockClient) defer cleanup() @@ -613,16 +705,17 @@ func TestMempoolTxsBytes(t *testing.T) { mp.Flush() assert.EqualValues(t, 0, mp.SizeBytes()) - // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - tx3 := kvstore.NewRandomTx(100) - _, err = mp.CheckTx(tx3, "") + // 5. ErrLaneIsFull is returned when/if the limit on the lane bytes capacity is reached. + laneMaxBytes := int(cfg.Mempool.MaxTxsBytes) / len(mp.sortedLanes) + tx3 := kvstore.NewRandomTx(laneMaxBytes) + rr, err := mp.CheckTx(tx3, "") require.NoError(t, err) + require.NoError(t, rr.Error()) tx4 := kvstore.NewRandomTx(10) - _, err = mp.CheckTx(tx4, "") - if assert.Error(t, err) { //nolint:testifylint // require.Error doesn't work with the conditional here - assert.IsType(t, ErrMempoolIsFull{}, err) - } + rr, err = mp.CheckTx(tx4, "") + require.NoError(t, err) + require.ErrorAs(t, rr.Error(), &ErrLaneIsFull{}) // 6. zero after tx is rechecked and removed due to not being valid anymore app2 := kvstore.NewInMemoryApplication() @@ -696,11 +789,13 @@ func TestMempoolNoCacheOverflow(t *testing.T) { err = mp.FlushAppConn() require.NoError(t, err) - // tx0 should appear only once in mp.txs + // tx0 should appear only once in mp.lanes found := 0 - for e := mp.txs.Front(); e != nil; e = e.Next() { - if types.Tx.Key(e.Value.(*mempoolTx).tx) == types.Tx.Key(tx0) { - found++ + for _, lane := range mp.sortedLanes { + for e := mp.lanes[lane.id].Front(); e != nil; e = e.Next() { + if types.Tx.Key(e.Value.(*mempoolTx).Tx()) == types.Tx.Key(tx0) { + found++ + } } } assert.Equal(t, 1, found) @@ -808,6 +903,7 @@ func TestMempoolSyncCheckTxReturnError(t *testing.T) { mockClient := new(abciclimocks.Client) mockClient.On("Start").Return(nil) mockClient.On("SetLogger", mock.Anything) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil) mp, cleanup := newMempoolWithAppMock(mockClient) defer cleanup() @@ -832,6 +928,7 @@ func TestMempoolSyncRecheckTxReturnError(t *testing.T) { mockClient.On("Start").Return(nil) mockClient.On("SetLogger", mock.Anything) mockClient.On("Error").Return(nil) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil) mp, cleanup := newMempoolWithAppMock(mockClient) defer cleanup() @@ -873,6 +970,7 @@ func TestMempoolAsyncRecheckTxReturnError(t *testing.T) { mockClient.On("Start").Return(nil) mockClient.On("SetLogger", mock.Anything) mockClient.On("Error").Return(nil).Times(4) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil) mp, cleanup := newMempoolWithAppMock(mockClient) defer cleanup() @@ -895,7 +993,6 @@ func TestMempoolAsyncRecheckTxReturnError(t *testing.T) { // Check that recheck has not started. require.True(t, mp.recheck.done()) require.Nil(t, mp.recheck.cursor) - require.Nil(t, mp.recheck.end) require.False(t, mp.recheck.isRechecking.Load()) mockClient.AssertExpectations(t) @@ -924,8 +1021,6 @@ func TestMempoolAsyncRecheckTxReturnError(t *testing.T) { require.True(t, mp.recheck.done()) require.False(t, mp.recheck.isRechecking.Load()) require.Nil(t, mp.recheck.cursor) - require.NotNil(t, mp.recheck.end) - require.Equal(t, mp.recheck.end, mp.txs.Back()) require.Equal(t, len(txs)-1, mp.Size()) // one invalid tx was removed require.Equal(t, int32(2), mp.recheck.numPendingTxs.Load()) @@ -996,44 +1091,6 @@ func TestMempoolConcurrentCheckTxAndUpdate(t *testing.T) { require.Zero(t, mp.Size()) } -func TestMempoolIterator(t *testing.T) { - app := kvstore.NewInMemoryApplication() - cc := proxy.NewLocalClientCreator(app) - - cfg := test.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - var wg sync.WaitGroup - wg.Add(1) - - n := numTxs - - // Spawn a goroutine that iterates on the list until counting n entries. - counter := 0 - go func() { - defer wg.Done() - - iter := mp.NewIterator(context.Background()) - for counter < n { - entry := <-iter.WaitNextCh() - require.True(t, bytes.Equal(kvstore.NewTxFromID(counter), entry.Tx())) - counter++ - } - }() - - // Add n transactions with sequential ids. - for i := 0; i < n; i++ { - tx := kvstore.NewTxFromID(i) - rr, err := mp.CheckTx(tx, "") - require.NoError(t, err) - rr.Wait() - } - - wg.Wait() - require.Equal(t, n, counter) -} - func newMempoolWithAsyncConnection(tb testing.TB) (*CListMempool, cleanupFunc) { tb.Helper() sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmtrand.Str(6)) @@ -1070,6 +1127,12 @@ func newReqRes(tx types.Tx, code uint32, requestType abci.CheckTxType) *abciclie return reqRes } +func newReqResWithLanes(tx types.Tx, code uint32, requestType abci.CheckTxType, lane string) *abciclient.ReqRes { + reqRes := abciclient.NewReqRes(abci.ToCheckTxRequest(&abci.CheckTxRequest{Tx: tx, Type: requestType})) + reqRes.Response = abci.ToCheckTxResponse(&abci.CheckTxResponse{Code: code, LaneId: lane}) + return reqRes +} + func abciResponses(n int, code uint32) []*abci.ExecTxResult { responses := make([]*abci.ExecTxResult, 0, n) for i := 0; i < n; i++ { diff --git a/mempool/errors.go b/mempool/errors.go index b2b859cdc0..5384bc8ea1 100644 --- a/mempool/errors.go +++ b/mempool/errors.go @@ -78,6 +78,27 @@ func (e ErrMempoolIsFull) Error() string { ) } +// ErrLaneIsFull is returned when a lane has reached its full capacity (either +// in number of txs or bytes). +type ErrLaneIsFull struct { + Lane LaneID + NumTxs int + MaxTxs int + Bytes int64 + MaxBytes int64 +} + +func (e ErrLaneIsFull) Error() string { + return fmt.Sprintf( + "lane %s is full: number of txs %d (max: %d), total bytes %d (max: %d)", + e.Lane, + e.NumTxs, + e.MaxTxs, + e.Bytes, + e.MaxBytes, + ) +} + // ErrPreCheck defines an error where a transaction fails a pre-check. type ErrPreCheck struct { Err error @@ -119,3 +140,35 @@ func (e ErrFlushAppConn) Error() string { func (e ErrFlushAppConn) Unwrap() error { return e.Err } + +type ErrEmptyLanesDefaultLaneSet struct { + Info LanesInfo +} + +func (e ErrEmptyLanesDefaultLaneSet) Error() string { + return fmt.Sprintf("invalid lane info: if list of lanes is empty, then defaultLane must be 0, but %v given; info %v", e.Info.defaultLane, e.Info) +} + +type ErrBadDefaultLaneNonEmptyLaneList struct { + Info LanesInfo +} + +func (e ErrBadDefaultLaneNonEmptyLaneList) Error() string { + return fmt.Sprintf("invalid lane info: default lane cannot be 0 if list of lanes is non empty; info: %v", e.Info) +} + +type ErrDefaultLaneNotInList struct { + Info LanesInfo +} + +func (e ErrDefaultLaneNotInList) Error() string { + return fmt.Sprintf("invalid lane info: list of lanes does not contain default lane; info %v", e.Info) +} + +type ErrLaneNotFound struct { + laneID LaneID +} + +func (e ErrLaneNotFound) Error() string { + return fmt.Sprintf("lane %s not found", e.laneID) +} diff --git a/mempool/iterators.go b/mempool/iterators.go new file mode 100644 index 0000000000..5c33fb86d5 --- /dev/null +++ b/mempool/iterators.go @@ -0,0 +1,230 @@ +package mempool + +import ( + "context" + "fmt" + + "github.com/cometbft/cometbft/internal/clist" +) + +// IWRRIterator is the base struct for implementing iterators that traverse lanes with +// the Interleaved Weighted Round Robin (WRR) algorithm. +// https://en.wikipedia.org/wiki/Weighted_round_robin +type IWRRIterator struct { + sortedLanes []lane + laneIndex int // current lane being iterated; index on sortedLanes + cursors map[LaneID]*clist.CElement // last accessed entries on each lane + round int // counts the rounds for IWRR +} + +// This function picks the next lane to fetch an item from. +// If it was the last lane, it advances the round counter as well. +func (iter *IWRRIterator) advanceIndexes() lane { + if iter.laneIndex == len(iter.sortedLanes)-1 { + iter.round = (iter.round + 1) % (int(iter.sortedLanes[0].priority) + 1) + if iter.round == 0 { + iter.round++ + } + } + iter.laneIndex = (iter.laneIndex + 1) % len(iter.sortedLanes) + return iter.sortedLanes[iter.laneIndex] +} + +// Non-blocking version of the IWRR iterator to be used for reaping and +// rechecking transactions. +// +// This iterator does not support changes on the underlying mempool once initialized (or `Reset`), +// therefore the lock must be held on the mempool when iterating. +type NonBlockingIterator struct { + IWRRIterator +} + +func NewNonBlockingIterator(mem *CListMempool) *NonBlockingIterator { + baseIter := IWRRIterator{ + sortedLanes: mem.sortedLanes, + cursors: make(map[LaneID]*clist.CElement, len(mem.lanes)), + round: 1, + } + iter := &NonBlockingIterator{ + IWRRIterator: baseIter, + } + iter.reset(mem.lanes) + return iter +} + +// Reset must be called before every use of the iterator. +func (iter *NonBlockingIterator) reset(lanes map[LaneID]*clist.CList) { + iter.laneIndex = 0 + iter.round = 1 + // Set cursors at the beginning of each lane. + for lane := range lanes { + iter.cursors[lane] = lanes[lane].Front() + } +} + +// Next returns the next element according to the WRR algorithm. +func (iter *NonBlockingIterator) Next() Entry { + numEmptyLanes := 0 + + lane := iter.sortedLanes[iter.laneIndex] + for { + // Skip empty lane or if cursor is at end of lane. + if iter.cursors[lane.id] == nil { + numEmptyLanes++ + if numEmptyLanes >= len(iter.sortedLanes) { + return nil + } + lane = iter.advanceIndexes() + continue + } + // Skip over-consumed lane on current round. + if int(lane.priority) < iter.round { + numEmptyLanes = 0 + lane = iter.advanceIndexes() + continue + } + break + } + elem := iter.cursors[lane.id] + if elem == nil { + panic(fmt.Errorf("Iterator picked a nil entry on lane %s", lane.id)) + } + iter.cursors[lane.id] = iter.cursors[lane.id].Next() + _ = iter.advanceIndexes() + return elem.Value.(*mempoolTx) +} + +// BlockingIterator implements a blocking version of the WRR iterator, +// meaning that when no transaction is available, it will wait until a new one +// is added to the mempool. +// Unlike `NonBlockingIterator`, this iterator is expected to work with an evolving mempool. +type BlockingIterator struct { + IWRRIterator + ctx context.Context + mp *CListMempool + name string // for debugging +} + +func NewBlockingIterator(ctx context.Context, mem *CListMempool, name string) Iterator { + iter := IWRRIterator{ + sortedLanes: mem.sortedLanes, + cursors: make(map[LaneID]*clist.CElement, len(mem.sortedLanes)), + round: 1, + } + return &BlockingIterator{ + IWRRIterator: iter, + ctx: ctx, + mp: mem, + name: name, + } +} + +// WaitNextCh returns a channel to wait for the next available entry. The channel will be explicitly +// closed when the entry gets removed before it is added to the channel, or when reaching the end of +// the list. +// +// Unsafe for concurrent use by multiple goroutines. +func (iter *BlockingIterator) WaitNextCh() <-chan Entry { + ch := make(chan Entry) + go func() { + var lane lane + for { + l, addTxCh := iter.pickLane() + if addTxCh == nil { + lane = l + break + } + // There are no transactions to take from any lane. Wait until at + // least one is added to the mempool and try again. + select { + case <-addTxCh: + case <-iter.ctx.Done(): + close(ch) + return + } + } + if elem := iter.next(lane.id); elem != nil { + ch <- elem.Value.(Entry) + } + // Unblock receiver in case no entry was sent (it will receive nil). + close(ch) + }() + return ch +} + +// pickLane returns a _valid_ lane on which to iterate, according to the WRR +// algorithm. A lane is valid if it is not empty and it is not over-consumed, +// meaning that the number of accessed entries in the lane has not yet reached +// its priority value in the current WRR iteration. It returns a channel to wait +// for new transactions if all lanes are empty or don't have transactions that +// have not yet been accessed. +func (iter *BlockingIterator) pickLane() (lane, chan struct{}) { + iter.mp.addTxChMtx.RLock() + defer iter.mp.addTxChMtx.RUnlock() + + // Start from the last accessed lane. + currLane := iter.sortedLanes[iter.laneIndex] + + // Loop until finding a valid lane. If the current lane is not valid, + // continue with the next lower-priority lane, in a round robin fashion. + numEmptyLanes := 0 + for { + laneID := currLane.id + // Skip empty lanes or lanes with their cursor pointing at their last entry. + if iter.mp.lanes[laneID].Len() == 0 || + (iter.cursors[laneID] != nil && + iter.cursors[laneID].Value.(*mempoolTx).seq == iter.mp.addTxLaneSeqs[laneID]) { + numEmptyLanes++ + if numEmptyLanes >= len(iter.sortedLanes) { + // There are no lanes with non-accessed entries. Wait until a + // new tx is added. + return lane{}, iter.mp.addTxCh + } + currLane = iter.advanceIndexes() + continue + } + + // Skip over-consumed lanes. + if int(currLane.priority) < iter.round { + numEmptyLanes = 0 + currLane = iter.advanceIndexes() + continue + } + + _ = iter.advanceIndexes() + return currLane, nil + } +} + +// In classical WRR, the iterator cycles over the lanes. When a lane is selected, Next returns an +// entry from the selected lane. On subsequent calls, Next will return the next entries from the +// same lane until `lane` entries are accessed or the lane is empty, where `lane` is the priority. +// The next time, Next will select the successive lane with lower priority. +// next returns the next entry from the given lane and updates WRR variables. +func (iter *BlockingIterator) next(laneID LaneID) *clist.CElement { + // Load the last accessed entry in the lane and set the next one. + var next *clist.CElement + + if cursor := iter.cursors[laneID]; cursor != nil { + // If the current entry is the last one or was removed, Next will return nil. + // Note we don't need to wait until the next entry is available (with <-cursor.NextWaitChan()). + next = cursor.Next() + } else { + // We are at the beginning of the iteration or the saved entry got removed. Pick the first + // entry in the lane if it's available (don't wait for it); if not, Front will return nil. + next = iter.mp.lanes[laneID].Front() + } + + // Update auxiliary variables. + if next != nil { + // Save entry. + iter.cursors[laneID] = next + } else { + // The entry got removed or it was the last one in the lane. + // At the moment this should not happen - the loop in PickLane will loop forever until there + // is data in at least one lane + delete(iter.cursors, laneID) + } + + return next +} diff --git a/mempool/iterators_test.go b/mempool/iterators_test.go new file mode 100644 index 0000000000..1b6ac89c74 --- /dev/null +++ b/mempool/iterators_test.go @@ -0,0 +1,511 @@ +package mempool + +import ( + "context" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abciclimocks "github.com/cometbft/cometbft/abci/client/mocks" + "github.com/cometbft/cometbft/abci/example/kvstore" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/internal/test" + "github.com/cometbft/cometbft/proxy" + "github.com/cometbft/cometbft/types" +) + +func TestIteratorNonBlocking(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + // Add all txs with id up to n. + n := 100 + for i := 0; i < n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err) + rr.Wait() + } + require.Equal(t, n, mp.Size()) + + iter := NewNonBlockingIterator(mp) + expectedOrder := []int{ + // round counter 1: + 0, // lane 7 + 1, // lane 3 + 3, // lane 1 + // round counter 2: + 11, // lane 7 + 2, // lane 3 + // round counter 3: + 22, // lane 7 + 4, // lane 3 + // round counter 4 - 7: + 33, 44, 55, 66, // lane 7 + // round counter 1: + 77, // lane 7 + 5, // lane 3 + 6, // lane 1 + // round counter 2: + 88, // lane 7 + 7, // lane 3 + // round counter 3: + 99, // lane 7 + 8, // lane 3 + // round counter 4- 7 have nothing + // round counter 1: + 10, // lane 3 + 9, // lane 1 + // round counter 2: + 13, // lane 3 + // round counter 3: + 14, // lane 3 + } + var next Entry + counter := 0 + + // Check that txs are picked by the iterator in the expected order. + for _, id := range expectedOrder { + next = iter.Next() + require.NotNil(t, next) + require.Equal(t, types.Tx(kvstore.NewTxFromID(id)), next.Tx(), "id=%v", id) + counter++ + } + + // Check that the rest of the entries are also consumed. + for { + if next = iter.Next(); next == nil { + break + } + counter++ + } + require.Equal(t, n, counter) +} + +func TestIteratorNonBlockingOneLane(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + // Add all txs with id up to n to one lane. + n := 100 + for i := 0; i < n; i++ { + if i%11 != 0 { + continue + } + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err) + rr.Wait() + } + require.Equal(t, 10, mp.Size()) + + iter := NewNonBlockingIterator(mp) + expectedOrder := []int{0, 11, 22, 33, 44, 55, 66, 77, 88, 99} + + var next Entry + counter := 0 + + // Check that txs are picked by the iterator in the expected order. + for _, id := range expectedOrder { + next = iter.Next() + require.NotNil(t, next) + require.Equal(t, types.Tx(kvstore.NewTxFromID(id)), next.Tx(), "id=%v", id) + counter++ + } + + next = iter.Next() + require.Nil(t, next) +} + +// We have two iterators fetching transactions that +// then get removed. +func TestIteratorRace(t *testing.T) { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("Error").Return(nil).Times(100) + + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{LanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 3}, DefaultLane: "1"}, nil) + + mp, cleanup := newMempoolWithAppMock(mockClient) + defer cleanup() + + // Disable rechecking to make sure the recheck logic is not interferint. + mp.config.Recheck = false + + const numLanes = 3 + const numTxs = 100 + + var wg sync.WaitGroup + wg.Add(2) + + var counter atomic.Int64 + go func() { + waitForNumTxsInMempool(numTxs, mp) + + go func() { + defer wg.Done() + + for counter.Load() < int64(numTxs) { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + tx := entry.Tx() + err := mp.Update(1, []types.Tx{tx}, abciResponses(1, 0), nil, nil) + require.NoError(t, err, tx) + counter.Add(1) + } + }() + + go func() { + defer wg.Done() + + for counter.Load() < int64(numTxs) { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + tx := entry.Tx() + err := mp.Update(1, []types.Tx{tx}, abciResponses(1, 0), nil, nil) + require.NoError(t, err, tx) + counter.Add(1) + } + }() + }() + + // This was introduced because without a separate function + // we have to sleep to wait for all txs to get into the mempool. + // This way we loop in the function above until it is fool + // without arbitrary timeouts. + go func() { + for i := 1; i <= int(numTxs); i++ { + tx := kvstore.NewTxFromID(i) + + currLane := (i % numLanes) + 1 + reqRes := newReqResWithLanes(tx, abci.CodeTypeOK, abci.CHECK_TX_TYPE_CHECK, strconv.Itoa(currLane)) + require.NotNil(t, reqRes) + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + _, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + reqRes.InvokeCallback() + } + }() + + wg.Wait() + + require.Equal(t, counter.Load(), int64(numTxs+1)) +} + +func TestIteratorEmptyLanes(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_empty_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + go func() { + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + require.Zero(t, mp.Size()) + entry := <-iter.WaitNextCh() + require.NotNil(t, entry) + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(1)) + }() + time.Sleep(100 * time.Millisecond) + + tx := kvstore.NewTxFromID(1) + res := abci.ToCheckTxResponse(&abci.CheckTxResponse{Code: abci.CodeTypeOK}) + err := mp.handleCheckTxResponse(tx, "")(res) + require.NoError(t, err) + require.Equal(t, 1, mp.Size(), "pool size mismatch") +} + +func TestBlockingIteratorsConsumeAllTxs(t *testing.T) { + const numTxs = 1000 + const numIterators = 50 + + tests := map[string]struct { + app *kvstore.Application + }{ + "lanes": { + app: kvstore.NewInMemoryApplication(), + }, + "no_lanes": { + app: kvstore.NewInMemoryApplicationWithoutLanes(), + }, + } + + for test, config := range tests { + cc := proxy.NewLocalClientCreator(config.app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + + wg := &sync.WaitGroup{} + wg.Add(numIterators) + + // Start concurrent iterators. + for i := 0; i < numIterators; i++ { + go func(j int) { + defer wg.Done() + + // Iterate until all txs added to the mempool are accessed. + iter := NewBlockingIterator(context.Background(), mp, strconv.Itoa(j)) + counter := 0 + nilCounter := 0 + for counter < numTxs { + entry := <-iter.WaitNextCh() + if entry == nil { + nilCounter++ + continue + } + if test == "no_lanes" { + // Entries are accessed sequentially when there is only one lane. + expectedTx := kvstore.NewTxFromID(counter) + require.EqualValues(t, expectedTx, entry.Tx(), "i=%d, c=%d, tx=%v", i, counter, entry.Tx()) + } + counter++ + } + require.Equal(t, numTxs, counter) + assert.Zero(t, nilCounter, "got nil entries") + t.Logf("%s: iterator %d finished (nils=%d)\n", test, j, nilCounter) + }(i) + } + + // Add transactions with sequential ids. + _ = addTxs(t, mp, 0, numTxs) + require.Equal(t, numTxs, mp.Size()) + + // Wait for all iterators to complete. + waitTimeout(wg, 5*time.Second, func() {}, func() { + t.Fatalf("Timed out waiting for all iterators to finish") + }) + } +} + +// Confirms that the transactions are returned in the same order. +// Note that for the cases with equal priorities the actual order +// will depend on the way we iterate over the map of lanes. +// With only two lanes of the same priority the order was predictable +// and matches the given order. In case these tests start to fail +// first thing to confirm is the order of lanes in mp.SortedLanes. +func TestIteratorExactOrder(t *testing.T) { + tests := map[string]struct { + lanePriorities map[string]uint32 + expectedTxIDs []int + expectedTxIDsAlternate []int + }{ + "unique_priority_lanes": { + lanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 3}, + expectedTxIDs: []int{2, 1, 3, 5, 4, 8, 11, 7, 6, 10, 9}, + }, + "same_priority_lanes": { + lanePriorities: map[string]uint32{"1": 1, "2": 2, "3": 2}, + expectedTxIDs: []int{1, 2, 3, 4, 5, 7, 8, 6, 10, 11, 9}, + expectedTxIDsAlternate: []int{2, 1, 3, 5, 4, 8, 7, 6, 11, 10, 9}, + }, + "one_lane": { + lanePriorities: map[string]uint32{"1": 1}, + expectedTxIDs: []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + }, + } + + for n, l := range tests { + mockClient := new(abciclimocks.Client) + mockClient.On("Start").Return(nil) + mockClient.On("SetLogger", mock.Anything) + mockClient.On("Error").Return(nil).Times(100) + mockClient.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{LanePriorities: l.lanePriorities, DefaultLane: "1"}, nil) + mp, cleanup := newMempoolWithAppMock(mockClient) + defer cleanup() + + // Disable rechecking to make sure the recheck logic is not interfering. + mp.config.Recheck = false + + numLanes := len(l.lanePriorities) + const numTxs = 11 + + // Transactions are ordered into lanes by their IDs. This is the order in + // which they should appear following WRR + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + waitForNumTxsInMempool(numTxs, mp) + t.Log("Mempool full, starting to pick up transactions", mp.Size()) + alternate := false + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + for i := 0; i < numTxs; i++ { + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + // When lanes have same priorities their order in the map of lanes + // is arbitrary so we needv to check + if n == "same_priority_lanes" { + if mp.sortedLanes[1].id != "3" { + alternate = true + } + } + if alternate { + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + } else { + require.EqualValues(t, entry.Tx(), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + } + } + }() + + // This was introduced because without a separate function + // we have to sleep to wait for all txs to get into the mempool. + // This way we loop in the function above until it is fool + // without arbitrary timeouts. + go func() { + for i := 1; i <= numTxs; i++ { + tx := kvstore.NewTxFromID(i) + + currLane := (i % numLanes) + 1 + reqRes := newReqResWithLanes(tx, abci.CodeTypeOK, abci.CHECK_TX_TYPE_CHECK, strconv.Itoa(currLane)) + require.NotNil(t, reqRes) + + mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil).Once() + _, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + reqRes.InvokeCallback() + } + }() + + wg.Wait() + + // Confirm also that the non blocking iterator works with lanes of same priorities + iterNonBlocking := NewNonBlockingIterator(mp) + reapedTx := mp.ReapMaxTxs(numTxs) + alternate := false + for i := 0; i < numTxs; i++ { + tx := iterNonBlocking.Next().Tx() + if n == "same_priority_lanes" { + if mp.sortedLanes[1].id != "3" { + alternate = true + } + } + if !alternate { + require.Equal(t, []byte(tx), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + require.Equal(t, []byte(reapedTx[i]), kvstore.NewTxFromID(l.expectedTxIDs[i]), n) + } else { + require.Equal(t, []byte(tx), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + require.Equal(t, []byte(reapedTx[i]), kvstore.NewTxFromID(l.expectedTxIDsAlternate[i]), n) + } + } + } +} + +// This only tests that all transactions were submitted. +func TestIteratorCountOnly(t *testing.T) { + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) + + cfg := test.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) + defer cleanup() + + var wg sync.WaitGroup + wg.Add(1) + + const n = numTxs + + // Spawn a goroutine that iterates on the list until counting n entries. + counter := 0 + go func() { + defer wg.Done() + + iter := NewBlockingIterator(context.Background(), mp, t.Name()) + for counter < n { + entry := <-iter.WaitNextCh() + if entry == nil { + continue + } + counter++ + } + }() + + // Add n transactions with sequential ids. + for i := 0; i < n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err) + rr.Wait() + } + + wg.Wait() + require.Equal(t, n, counter) +} + +func TestReapMatchesGossipOrder(t *testing.T) { + const n = 100 + + tests := map[string]struct { + app *kvstore.Application + }{ + "test_lanes": { + app: kvstore.NewInMemoryApplication(), + }, + "test_no_lanes": { + app: kvstore.NewInMemoryApplicationWithoutLanes(), + }, + } + + for test, config := range tests { + cc := proxy.NewLocalClientCreator(config.app) + mp, cleanup := newMempoolWithApp(cc) + defer cleanup() + // Add a bunch of txs. + for i := 1; i <= n; i++ { + tx := kvstore.NewTxFromID(i) + rr, err := mp.CheckTx(tx, "") + require.NoError(t, err, err) + rr.Wait() + } + + require.Equal(t, n, mp.Size()) + + gossipIter := NewBlockingIterator(context.Background(), mp, t.Name()) + reapIter := NewNonBlockingIterator(mp) + + // Check that both iterators return the same entry as in the reaped txs. + txs := make([]types.Tx, n) + reapedTxs := mp.ReapMaxTxs(n) + for i, reapedTx := range reapedTxs { + entry := <-gossipIter.WaitNextCh() + // entry can be nil only when an entry is removed concurrently. + require.NotNil(t, entry) + gossipTx := entry.Tx() + + reapTx := reapIter.Next().Tx() + txs[i] = reapTx + require.EqualValues(t, reapTx, gossipTx) + require.EqualValues(t, reapTx, reapedTx) + if test == "test_no_lanes" { + require.EqualValues(t, reapTx, kvstore.NewTxFromID(i+1)) + } + } + require.EqualValues(t, txs, reapedTxs) + + err := mp.Update(1, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil) + require.NoError(t, err) + require.Zero(t, mp.Size()) + } +} diff --git a/mempool/lanes_info.go b/mempool/lanes_info.go new file mode 100644 index 0000000000..653ba4bacb --- /dev/null +++ b/mempool/lanes_info.go @@ -0,0 +1,52 @@ +package mempool + +type LanesInfo struct { + lanes map[LaneID]LanePriority + defaultLane LaneID +} + +// BuildLanesInfo builds the information required to initialize +// lanes given the data queried from the app. +func BuildLanesInfo(laneMap map[string]uint32, defLane string) (*LanesInfo, error) { + info := LanesInfo{} + info.lanes = make(map[LaneID]LanePriority, len(laneMap)) + for l, p := range laneMap { + info.lanes[LaneID(l)] = LanePriority(p) + } + info.defaultLane = LaneID(defLane) + + if err := validate(info); err != nil { + return nil, err + } + + return &info, nil +} + +func validate(info LanesInfo) error { + // If no lanes are provided the default priority is 0 + if len(info.lanes) == 0 && info.defaultLane == "" { + return nil + } + + // Default lane is set but empty lane list + if len(info.lanes) == 0 && info.defaultLane != "" { + return ErrEmptyLanesDefaultLaneSet{ + Info: info, + } + } + + // Lane 0 is reserved for when there are no lanes or for invalid txs; it should not be used for the default lane. + if info.defaultLane == "" && len(info.lanes) != 0 { + return ErrBadDefaultLaneNonEmptyLaneList{ + Info: info, + } + } + + if _, ok := info.lanes[info.defaultLane]; !ok { + return ErrDefaultLaneNotInList{ + Info: info, + } + } + + return nil +} diff --git a/mempool/mempoolTx.go b/mempool/mempoolTx.go index 2c2ffa7c62..a4b04e6a8e 100644 --- a/mempool/mempoolTx.go +++ b/mempool/mempoolTx.go @@ -3,6 +3,7 @@ package mempool import ( "sync" "sync/atomic" + "time" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/types" @@ -13,6 +14,9 @@ type mempoolTx struct { height int64 // height that this tx had been validated in gasWanted int64 // amount of gas this tx states it will require tx types.Tx // validated by the application + lane LaneID + seq int64 + timestamp time.Time // time when entry was created // ids of peers who've sent us this tx (as a map for quick lookups). // senders: PeerID -> struct{} diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index 6b6080c904..3b361d7097 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -18,14 +18,34 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Namespace: namespace, Subsystem: MetricsSubsystem, Name: "size", - Help: "Number of uncommitted transactions in the mempool.", + Help: "Number of uncommitted transactions in the mempool. Deprecated: this value can be obtained as the sum of LaneSize.", }, labels).With(labelsAndValues...), SizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, Name: "size_bytes", - Help: "Total size of the mempool in bytes.", + Help: "Total size of the mempool in bytes. Deprecated: this value can be obtained as the sum of LaneBytes.", }, labels).With(labelsAndValues...), + LaneSize: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "lane_size", + Help: "Number of uncommitted transactions per lane.", + }, append(labels, "lane")).With(labelsAndValues...), + LaneBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "lane_bytes", + Help: "Number of used bytes per lane.", + }, append(labels, "lane")).With(labelsAndValues...), + TxLifeSpan: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "tx_life_span", + Help: "Duration in ms of a transaction in the mempool.", + + Buckets: []float64{50, 100, 200, 500, 1000}, + }, append(labels, "lane")).With(labelsAndValues...), TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -77,6 +97,9 @@ func NopMetrics() *Metrics { return &Metrics{ Size: discard.NewGauge(), SizeBytes: discard.NewGauge(), + LaneSize: discard.NewGauge(), + LaneBytes: discard.NewGauge(), + TxLifeSpan: discard.NewHistogram(), TxSizeBytes: discard.NewHistogram(), FailedTxs: discard.NewCounter(), RejectedTxs: discard.NewCounter(), diff --git a/mempool/metrics.go b/mempool/metrics.go index fb20baa717..91b19be0fb 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -16,11 +16,26 @@ const ( // see MetricsProvider for descriptions. type Metrics struct { // Number of uncommitted transactions in the mempool. + // + // Deprecated: this value can be obtained as the sum of LaneSize. Size metrics.Gauge // Total size of the mempool in bytes. + // + // Deprecated: this value can be obtained as the sum of LaneBytes. SizeBytes metrics.Gauge + // Number of uncommitted transactions per lane. + LaneSize metrics.Gauge `metrics_labels:"lane"` + + // Number of used bytes per lane. + LaneBytes metrics.Gauge `metrics_labels:"lane"` + + // TxLifeSpan measures the time each transaction has in the mempool, since + // the time it enters until it is removed. + // metrics:Duration in ms of a transaction in the mempool. + TxLifeSpan metrics.Histogram `metrics_bucketsizes:"50,100,200,500,1000" metrics_labels:"lane"` + // Histogram of transaction sizes in bytes. TxSizeBytes metrics.Histogram `metrics_bucketsizes:"1,3,7" metrics_buckettype:"exp"` diff --git a/mempool/reactor.go b/mempool/reactor.go index d65b165fe2..578694c682 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -216,7 +216,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } }() - iter := memR.mempool.NewIterator(ctx) + iter := NewBlockingIterator(ctx, memR.mempool, string(peer.ID())) for { // In case of both next.NextWaitChan() and peer.Quit() are variable at the same time if !memR.IsRunning() || !peer.IsRunning() { @@ -224,7 +224,6 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } entry := <-iter.WaitNextCh() - // If the entry we were looking at got garbage collected (removed), try again. if entry == nil { continue diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 893a3872c3..58f33d0442 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -60,7 +60,7 @@ func TestReactorBroadcastTxsMessage(t *testing.T) { } txs := addRandomTxs(t, reactors[0].mempool, numTxs) - waitForReactors(t, txs, reactors, checkTxsInOrder) + waitForReactors(t, txs, reactors, checkTxsInMempool) } // regression test for https://github.com/tendermint/tendermint/issues/5408 @@ -198,16 +198,16 @@ func TestMempoolReactorSendLaggingPeer(t *testing.T) { reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{1}) // Add a bunch of txs to the first reactor. The second reactor should not receive any tx. - txs1 := addRandomTxs(t, reactors[0].mempool, numTxs) + txs1 := addTxs(t, reactors[0].mempool, 0, numTxs) ensureNoTxs(t, reactors[1], 5*PeerCatchupSleepIntervalMS*time.Millisecond) // Now we know that the second reactor has advanced to height 9, so it should receive all txs. reactors[0].Switch.Peers().Get(peerID).Set(types.PeerStateKey, peerState{9}) - waitForReactors(t, txs1, reactors, checkTxsInOrder) + waitForReactors(t, txs1, reactors, checkTxsInMempool) // Add a bunch of txs to first reactor. The second reactor should receive them all. - txs2 := addRandomTxs(t, reactors[0].mempool, numTxs) - waitForReactors(t, append(txs1, txs2...), reactors, checkTxsInOrder) + txs2 := addTxs(t, reactors[0].mempool, numTxs, numTxs) + waitForReactors(t, append(txs1, txs2...), reactors, checkTxsInMempool) } func TestMempoolReactorMaxTxBytes(t *testing.T) { @@ -406,7 +406,7 @@ func TestMempoolFIFOWithParallelCheckTx(t *testing.T) { // // Wait for all txs to be in the mempool of the second reactor; the other reactors should not // // receive any tx. (The second reactor only sends transactions to the first reactor.) -// checkTxsInOrder(t, txs, reactors[1], 0) +// checkTxsInMempool(t, txs, reactors[1], 0) // for _, r := range reactors[2:] { // require.Zero(t, r.mempool.Size()) // } @@ -417,8 +417,8 @@ func TestMempoolFIFOWithParallelCheckTx(t *testing.T) { // // Now the third reactor should start receiving transactions from the first reactor and // // the fourth reactor from the second -// checkTxsInOrder(t, txs, reactors[2], 0) -// checkTxsInOrder(t, txs, reactors[3], 0) +// checkTxsInMempool(t, txs, reactors[2], 0) +// checkTxsInMempool(t, txs, reactors[3], 0) // } // Test the experimental feature that limits the number of outgoing connections for gossiping @@ -449,8 +449,8 @@ func TestMempoolReactorMaxActiveOutboundConnectionsStar(t *testing.T) { // Wait for all txs to be in the mempool of the second reactor; the other reactors should not // receive any tx. (The second reactor only sends transactions to the first reactor.) - checkTxsInOrder(t, txs, reactors[0], 0) - checkTxsInOrder(t, txs, reactors[1], 0) + checkTxsInMempool(t, txs, reactors[0], 0) + checkTxsInMempool(t, txs, reactors[1], 0) for _, r := range reactors[2:] { require.Zero(t, r.mempool.Size()) @@ -462,9 +462,9 @@ func TestMempoolReactorMaxActiveOutboundConnectionsStar(t *testing.T) { // Now the third reactor should start receiving transactions from the first reactor; the fourth // reactor's mempool should still be empty. - checkTxsInOrder(t, txs, reactors[0], 0) - checkTxsInOrder(t, txs, reactors[1], 0) - checkTxsInOrder(t, txs, reactors[2], 0) + checkTxsInMempool(t, txs, reactors[0], 0) + checkTxsInMempool(t, txs, reactors[1], 0) + checkTxsInMempool(t, txs, reactors[2], 0) for _, r := range reactors[3:] { require.Zero(t, r.mempool.Size()) } @@ -495,7 +495,7 @@ func mempoolLogger(level string) *log.Logger { // makeReactors creates n mempool reactors. func makeReactors(config *cfg.Config, n int, logger *log.Logger) []*Reactor { if logger == nil { - logger = mempoolLogger("debug") + logger = mempoolLogger("info") } reactors := make([]*Reactor, n) for i := 0; i < n; i++ { @@ -579,14 +579,14 @@ func waitForNumTxsInMempool(numTxs int, mempool Mempool) { // Wait until all txs are in the mempool and check that the number of txs in the // mempool is as expected. -// func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) { -// t.Helper() -// waitForNumTxsInMempool(len(txs), reactor.mempool) +func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) { + t.Helper() + waitForNumTxsInMempool(len(txs), reactor.mempool) -// reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) -// require.Len(t, txs, len(reapedTxs)) -// require.Len(t, txs, reactor.mempool.Size()) -// } + reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) + require.Len(t, txs, len(reapedTxs)) + require.Len(t, txs, reactor.mempool.Size()) +} // Wait until all txs are in the mempool and check that they are in the same // order as given. @@ -596,6 +596,7 @@ func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex // Check that all transactions in the mempool are in the same order as txs. reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) + require.Equal(t, len(txs), len(reapedTxs)) for i, tx := range txs { assert.Equalf(t, tx, reapedTxs[i], "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) diff --git a/node/node.go b/node/node.go index 4e12357d16..51eeee2200 100644 --- a/node/node.go +++ b/node/node.go @@ -401,8 +401,13 @@ func NewNodeWithCliParams(ctx context.Context, // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") + + appInfoResponse, err := proxyApp.Query().Info(ctx, proxy.InfoRequest) + if err != nil { + return nil, fmt.Errorf("error calling ABCI Info method: %v", err) + } if !stateSync { - if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, appInfoResponse, proxyApp, consensusLogger); err != nil { return nil, err } @@ -422,7 +427,7 @@ func NewNodeWithCliParams(ctx context.Context, logNodeStartupInfo(state, pubKey, logger, consensusLogger) - mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, waitSync, memplMetrics, logger) + mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, waitSync, memplMetrics, logger, appInfoResponse) evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateStore, blockStore, logger) if err != nil { diff --git a/node/node_test.go b/node/node_test.go index 3e77bda9c2..3fc9201266 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -314,6 +314,7 @@ func TestCreateProposalBlock(t *testing.T) { memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), + nil, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -414,6 +415,7 @@ func TestMaxProposalBlockSize(t *testing.T) { memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), + nil, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), diff --git a/node/setup.go b/node/setup.go index 768c3248be..45ba5abe15 100644 --- a/node/setup.go +++ b/node/setup.go @@ -230,13 +230,14 @@ func doHandshake( blockStore sm.BlockStore, genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, + appInfoResponse *abci.InfoResponse, proxyApp proxy.AppConns, consensusLogger log.Logger, ) error { handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(ctx, proxyApp); err != nil { + if err := handshaker.Handshake(ctx, appInfoResponse, proxyApp); err != nil { return fmt.Errorf("error during handshake: %v", err) } return nil @@ -285,14 +286,21 @@ func createMempoolAndMempoolReactor( waitSync bool, memplMetrics *mempl.Metrics, logger log.Logger, + appInfoResponse *abci.InfoResponse, ) (mempl.Mempool, waitSyncP2PReactor) { switch config.Mempool.Type { // allow empty string for backward compatibility case cfg.MempoolTypeFlood, "": + lanesInfo, err := mempl.BuildLanesInfo(appInfoResponse.LanePriorities, appInfoResponse.DefaultLane) + if err != nil { + panic(fmt.Sprintf("could not get lanes info from app: %s", err)) + } + logger = logger.With("module", "mempool") mp := mempl.NewCListMempool( config.Mempool, proxyApp.Mempool(), + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), diff --git a/proto/cometbft/abci/v1/types.proto b/proto/cometbft/abci/v1/types.proto index 2b05f226af..b4763079df 100644 --- a/proto/cometbft/abci/v1/types.proto +++ b/proto/cometbft/abci/v1/types.proto @@ -259,6 +259,9 @@ message InfoResponse { int64 last_block_height = 4; bytes last_block_app_hash = 5; + + map lane_priorities = 6; + string default_lane = 7; } // InitChainResponse contains the ABCI application's hash and updates to the @@ -302,6 +305,8 @@ message CheckTxResponse { // removed). reserved 9 to 11; reserved "sender", "priority", "mempool_error"; + + string lane_id = 12; } // CommitResponse indicates how much blocks should CometBFT retain. diff --git a/scripts/qa/reporting/latency_plotter.py b/scripts/qa/reporting/latency_plotter.py index 64e022d07b..c754354e38 100644 --- a/scripts/qa/reporting/latency_plotter.py +++ b/scripts/qa/reporting/latency_plotter.py @@ -12,7 +12,8 @@ IMAGES_DIR = 'imgs' #fig_title = 'Vote Extensions Testnet' -fig_title = 'Rotating Nodes Test' +#fig_title = 'Rotating Nodes Test' +fig_title = 'Experiment title goes here' def usage(): print(f"Usage: {sys.argv[0]} release_name raw_csv_path") @@ -52,7 +53,6 @@ def plot_all_experiments(release, csv): localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) print('experiment', key ,'start', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) - (con,rate) = subKey label = 'c='+str(con) + ' r='+ str(rate) ax.axhline(y = mean, color = 'r', linestyle = '-', label="mean") @@ -69,11 +69,59 @@ def plot_all_experiments(release, csv): # Save the figure with subplots fig.savefig(os.path.join(IMAGES_DIR, 'all_experiments.png')) +def plot_all_experiments_lane(release, csv): + # Group by experiment + groups = csv.groupby(['experiment_id']) + + # number of rows and columns in the graph + ncols = 2 if groups.ngroups > 1 else 1 + nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 + fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6*ncols, 4*nrows), sharey=False) + fig.tight_layout(pad=5.0) + + # Plot experiments as subplots + for (key,ax) in zip(groups.groups.keys(), [axes] if ncols == 1 else axes.flatten()): + group = groups.get_group(key) + ax.set_ylabel('latency (s)') + ax.set_xlabel('experiment timestamp (s)') + ax.set_title(key) + ax.grid(True) + + + # Group by connection number and transaction rate and lane + paramGroups = group.groupby(['connections','rate', 'lane']) + + for (subKey) in paramGroups.groups.keys(): + subGroup = paramGroups.get_group(subKey) + startTime = subGroup.block_time.min() + endTime = subGroup.block_time.max() + subGroup.block_time = subGroup.block_time.apply(lambda x: x - startTime ) + mean = subGroup.duration_ns.mean() + localStartTime = tz.localize(datetime.fromtimestamp(startTime)).astimezone(pytz.utc) + localEndTime = tz.localize(datetime.fromtimestamp(endTime)).astimezone(pytz.utc) + print('experiment', key ,'start', localStartTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'end', localEndTime.strftime("%Y-%m-%dT%H:%M:%SZ"), 'duration', endTime - startTime, "mean", mean) + + (con,rate,lane) = subKey + label = 'c='+str(con) + ' r='+ str(rate) +' l='+ str(lane) + ax.axhline(y = mean, color='r', linestyle = '-', label="mean_l"+str(lane)) + ax.scatter(subGroup.block_time, subGroup.duration_ns, label=label) + ax.legend() + + # Save individual axes + extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) + img_path = os.path.join(IMAGES_DIR, f'e_{key}_lane.png') + fig.savefig(img_path, bbox_inches=extent.expanded(1.4, 1.5)) + + fig.suptitle(fig_title + ' - ' + release) + + # Save the figure with subplots + fig.savefig(os.path.join(IMAGES_DIR, 'all_experiments_lane.png')) + + def plot_all_configs(release, csv): # Group by configuration - groups = csv.groupby(['connections','rate']) - + groups = csv.groupby(['connections','rate', 'lane']) # number of rows and columns in the graph ncols = 2 if groups.ngroups > 1 else 1 nrows = int( np.ceil(groups.ngroups / ncols)) if groups.ngroups > 1 else 1 @@ -86,14 +134,15 @@ def plot_all_configs(release, csv): ax.set_ylabel('latency (s)') ax.set_xlabel('experiment time (s)') ax.grid(True) - (con,rate) = key - label = 'c='+str(con) + ' r='+ str(rate) + (con,rate,lane) = key + label = 'c='+str(con) + ' r='+ str(rate)+ ' l='+ str(lane) ax.set_title(label) + # Group by experiment paramGroups = group.groupby(['experiment_id']) for (subKey) in paramGroups.groups.keys(): - subGroup = paramGroups.get_group((subKey,)) + subGroup = paramGroups.get_group((subKey)) startTime = subGroup.block_time.min() subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) ax.scatter(subGroupMod, subGroup.duration_ns, label=label) @@ -102,7 +151,7 @@ def plot_all_configs(release, csv): #Save individual axes extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}.png') + img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}l{lane}.png') fig.savefig(img_path, bbox_inches=extent.expanded(1.4, 1.5)) fig.suptitle(fig_title + ' - ' + release) @@ -113,7 +162,7 @@ def plot_all_configs(release, csv): def plot_merged(release, csv): # Group by configuration - groups = csv.groupby(['connections','rate']) + groups = csv.groupby(['connections','rate','lane']) # number of rows and columns in the graph ncols = 2 if groups.ngroups > 1 else 1 @@ -127,22 +176,22 @@ def plot_merged(release, csv): ax.set_ylabel('latency (s)') ax.set_xlabel('experiment time (s)') ax.grid(True) - (con,rate) = key - label = 'c='+str(con) + ' r='+ str(rate) + (con,rate,lane) = key + label = 'c='+str(con) + ' r='+ str(rate) + ' l='+ str(lane) ax.set_title(label) # Group by experiment, but merge them as a single experiment paramGroups = group.groupby(['experiment_id']) for (subKey) in paramGroups.groups.keys(): - subGroup = paramGroups.get_group((subKey,)) + subGroup = paramGroups.get_group((subKey)) startTime = subGroup.block_time.min() subGroupMod = subGroup.block_time.apply(lambda x: x - startTime) ax.scatter(subGroupMod, subGroup.duration_ns, marker='o',c='#1f77b4') # Save individual axes extent = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted()) - (con, rate) = key - img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}_merged.png') + (con, rate, lane) = key + img_path = os.path.join(IMAGES_DIR, f'c{con}r{rate}l{lane}_merged.png') fig.savefig(img_path, bbox_inches=extent) plt.show() @@ -167,5 +216,6 @@ def plot_merged(release, csv): os.makedirs(IMAGES_DIR) plot_all_experiments(release, csv) + plot_all_experiments_lane(release, csv) plot_all_configs(release, csv) plot_merged(release, csv) diff --git a/scripts/qa/reporting/latency_throughput.py b/scripts/qa/reporting/latency_throughput.py index c048068171..5d07e5bb1a 100755 --- a/scripts/qa/reporting/latency_throughput.py +++ b/scripts/qa/reporting/latency_throughput.py @@ -20,8 +20,8 @@ DEFAULT_TITLE = "CometBFT latency vs throughput" -def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): - avg_latencies, throughput_rates = process_input_files(input_files, ) +def plot_latency_vs_throughput(input_files, output_image, output_image_lane, title=DEFAULT_TITLE): + avg_latencies, throughput_rates, avg_latencies_lane, throughput_rates_lane = process_input_files(input_files, ) fig, ax = plt.subplots() @@ -39,11 +39,30 @@ def plot_latency_vs_throughput(input_files, output_image, title=DEFAULT_TITLE): plt.legend(loc='upper left') plt.savefig(output_image) + fig, ax = plt.subplots() + + lanes = sorted(avg_latencies_lane.keys()) + for l in lanes: + tr = np.array(throughput_rates_lane[l]) + al = np.array(avg_latencies_lane[l]) + label = 'lane %d' % (l) + ax.plot(tr, al, 'o-', label=label) + + ax.set_title(title) + ax.set_xlabel('Throughput rate (tx/s)') + ax.set_ylabel('Average transaction latency (s)') + + plt.legend(loc='upper left') + plt.savefig('output_image_lane') + + + def process_input_files(input_files): # Experimental data from which we will derive the latency vs throughput # statistics experiments = {} + experiments_lane = {} for input_file in input_files: logging.info('Reading %s...' % input_file) @@ -52,8 +71,60 @@ def process_input_files(input_files): reader = csv.DictReader(inf) for tx in reader: experiments = process_tx(experiments, tx) + experiments_lane = process_tx_lane(experiments_lane, tx) + + + avg_latencies, throughput_rates = compute_experiments_stats(experiments) + avg_latencies_lane, throughput_rates_lane = compute_lane_stats(experiments_lane) + + return avg_latencies, throughput_rates, avg_latencies_lane, throughput_rates_lane + + +def process_tx_lane(experiments_lane, tx): + exp_id = tx['experiment_id'] + lane = tx['lane'] + # Block time is nanoseconds from the epoch - convert to seconds + block_time = float(tx['block_time']) / (10**9) + # Duration is also in nanoseconds - convert to seconds + duration = float(tx['duration_ns']) / (10**9) + connections = int(tx['connections']) + rate = int(tx['rate']) + lane = int(tx['lane']) + + if (exp_id,lane) not in experiments_lane: + experiments_lane[(exp_id,lane)] = { + 'connections': connections, + 'rate': rate, + 'block_time_min': block_time, + # We keep track of the latency associated with the minimum block + # time to estimate the start time of the experiment + 'block_time_min_duration': duration, + 'block_time_max': block_time, + 'total_latencies': duration, + 'tx_count': 1, + } + logging.info('Found experiment %s ,lane=%d with rate=%d, connections=%d' % + (exp_id, lane, rate, connections)) + else: + # Validation + for field in ['connections', 'rate']: + val = int(tx[field]) + if val != experiments_lane[(exp_id,lane)][field]: + raise Exception( + 'Found multiple distinct values for field ' + '"%s" for the same experiment (%s): %d and %d' % + (field, exp_id, val, experiments_lane[(exp_id,lane)][field])) - return compute_experiments_stats(experiments) + if block_time < experiments_lane[(exp_id,lane)]['block_time_min']: + experiments_lane[(exp_id,lane)]['block_time_min'] = block_time + experiments_lane[(exp_id,lane)]['block_time_min_duration'] = duration + if block_time > experiments_lane[(exp_id,lane)]['block_time_max']: + experiments_lane[(exp_id,lane)]['block_time_max'] = block_time + + experiments_lane[(exp_id,lane)]['total_latencies'] += duration + experiments_lane[(exp_id,lane)]['tx_count'] += 1 + + return experiments_lane def process_tx(experiments, tx): @@ -64,6 +135,7 @@ def process_tx(experiments, tx): duration = float(tx['duration_ns']) / (10**9) connections = int(tx['connections']) rate = int(tx['rate']) + lane = int(tx['lane']) if exp_id not in experiments: experiments[exp_id] = { @@ -101,11 +173,53 @@ def process_tx(experiments, tx): return experiments +def compute_lane_stats(experiments): + """Compute average latency vs throughput rate statistics from the given + experiments""" + statsLane = {} + # Compute average latency and throughput rate for each experiment + for (exp_id,lane), exp in experiments.items(): + conns = exp['connections'] + + avg_latency = exp['total_latencies'] / exp['tx_count'] + exp_start_time = exp['block_time_min'] - exp['block_time_min_duration'] + exp_duration = exp['block_time_max'] - exp_start_time + throughput_rate = exp['tx_count'] / exp_duration + if lane not in statsLane: + statsLane[lane] = [] + statsLane[lane].append({ + 'avg_latency': avg_latency, + 'throughput_rate': throughput_rate, + }) + + # Sort stats for each lane in an experiment in order of increasing + # throughput rate, and then extract average latencies and throughput rates + # as separate data series. + + lanesSorted = sorted(statsLane.keys()) + + avg_latencies_lane = {} + throughput_rates_lane = {} + + for l in lanesSorted: + statsLane[l] = sorted(statsLane[l], key=lambda s: s['throughput_rate']) + avg_latencies_lane[l] = [] + throughput_rates_lane[l] = [] + for s in statsLane[l]: + avg_latencies_lane[l].append(s['avg_latency']) + throughput_rates_lane[l].append(s['throughput_rate']) + logging.info('For lane %d: ' + 'throughput rate = %.6f tx/s\t' + 'average latency = %.6fs' % + (l, s['throughput_rate'], s['avg_latency'])) + + return (avg_latencies_lane, throughput_rates_lane) + + def compute_experiments_stats(experiments): """Compute average latency vs throughput rate statistics from the given experiments""" stats = {} - # Compute average latency and throughput rate for each experiment for exp_id, exp in experiments.items(): conns = exp['connections'] @@ -153,6 +267,8 @@ def compute_experiments_stats(experiments): help='Plot title') parser.add_argument('output_image', help='Output image file (in PNG format)') + parser.add_argument('output_image_lane', + help='Output image file lane (in PNG format)') parser.add_argument( 'input_csv_file', nargs='+', @@ -164,4 +280,4 @@ def compute_experiments_stats(experiments): stream=sys.stdout, level=logging.INFO) - plot_latency_vs_throughput(args.input_csv_file, args.output_image, title=args.title) + plot_latency_vs_throughput(args.input_csv_file, args.output_image, args.output_image_lane, title=args.title) diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index d425018440..6ba62dc4d5 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -17,6 +17,7 @@ import ( "sync" "time" + "github.com/cosmos/gogoproto/proto" gogo "github.com/cosmos/gogoproto/types" "github.com/cometbft/cometbft/abci/example/kvstore" @@ -27,6 +28,7 @@ import ( cryptoenc "github.com/cometbft/cometbft/crypto/encoding" "github.com/cometbft/cometbft/libs/log" "github.com/cometbft/cometbft/libs/protoio" + "github.com/cometbft/cometbft/test/loadtime/payload" cmttypes "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" ) @@ -56,6 +58,8 @@ type Application struct { restoreChunks [][]byte // It's OK not to persist this, as it is not part of the state machine seenTxs sync.Map // cmttypes.TxKey -> uint64 + + lanePriorities map[string]uint32 } // Config allows for the setting of high level parameters for running the e2e Application @@ -133,6 +137,15 @@ type Config struct { // -1 denotes it is set at genesis. // 0 denotes it is set at InitChain. PbtsUpdateHeight int64 `toml:"pbts_update_height"` + + // If true, disables the use of lanes by the application. + // Used to simulate networks that do not want to use lanes, running + // on top of CometBFT with lane support. + NoLanes bool `toml:"no_lanes"` + + // Mapping from lane IDs to lane priorities. These lanes will be used by the + // application for setting up the mempool and for classifying transactions. + Lanes map[string]uint32 `toml:"lanes"` } func DefaultConfig(dir string) *Config { @@ -140,6 +153,17 @@ func DefaultConfig(dir string) *Config { PersistInterval: 1, SnapshotInterval: 100, Dir: dir, + Lanes: DefaultLanes(), + } +} + +func DefaultLanes() map[string]uint32 { + return map[string]uint32{ + "100": 100, + "50": 50, + "10": 10, + "5": 5, + "1": 1, } } @@ -153,15 +177,23 @@ func NewApplication(cfg *Config) (*Application, error) { if err != nil { return nil, err } - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) logger.Info("Application started!") + if cfg.NoLanes { + return &Application{ + logger: logger, + state: state, + snapshots: snapshots, + cfg: cfg, + }, nil + } return &Application{ - logger: logger, - state: state, - snapshots: snapshots, - cfg: cfg, + logger: logger, + state: state, + snapshots: snapshots, + cfg: cfg, + lanePriorities: cfg.Lanes, }, nil } @@ -173,11 +205,31 @@ func (app *Application) Info(context.Context, *abci.InfoRequest) (*abci.InfoResp } height, hash := app.state.Info() + if app.cfg.NoLanes { + return &abci.InfoResponse{ + Version: version.ABCIVersion, + AppVersion: appVersion, + LastBlockHeight: int64(height), + LastBlockAppHash: hash, + }, nil + } + + // We set as default lane the (random) first lane id found in the list of + // lanes. On CheckTx requests, the application will always return a valid + // lane, so the mempool will never need to use the default lane value. + var defaultLane string + for id := range app.lanePriorities { + defaultLane = id + break + } + return &abci.InfoResponse{ Version: version.ABCIVersion, AppVersion: appVersion, LastBlockHeight: int64(height), LastBlockAppHash: hash, + LanePriorities: app.lanePriorities, + DefaultLane: defaultLane, }, nil } @@ -263,7 +315,7 @@ func (app *Application) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*a return nil, err } - key, _, err := parseTx(req.Tx) + key, value, err := parseTx(req.Tx) if err != nil || key == prefixReservedKey { //nolint:nilerr return &abci.CheckTxResponse{ @@ -294,7 +346,25 @@ func (app *Application) CheckTx(_ context.Context, req *abci.CheckTxRequest) (*a time.Sleep(app.cfg.CheckTxDelay) } - return &abci.CheckTxResponse{Code: kvstore.CodeTypeOK, GasWanted: 1}, nil + if app.cfg.NoLanes { + return &abci.CheckTxResponse{Code: kvstore.CodeTypeOK, GasWanted: 1}, nil + } + lane := extractLane(value) + return &abci.CheckTxResponse{Code: kvstore.CodeTypeOK, GasWanted: 1, LaneId: lane}, nil +} + +// extractLane returns the lane ID as string if value is a Payload, otherwise returns empty string. +func extractLane(value string) string { + valueBytes, err := hex.DecodeString(value) + if err != nil { + panic("could not hex-decode tx value for extracting lane") + } + p := &payload.Payload{} + err = proto.Unmarshal(valueBytes, p) + if err != nil { + return "" + } + return p.GetLane() } // FinalizeBlock implements ABCI. diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 9ef1828f11..cfb4d03139 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -28,6 +28,7 @@ var ( map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"}, }, "validators": {"genesis", "initchain"}, + "no_lanes": {true, false}, } nodeVersions = weightedChoice{ "": 2, @@ -315,6 +316,8 @@ func generateTestnet(r *rand.Rand, opt map[string]any, upgradeVersion string, pr ) } + manifest.NoLanes = opt["no_lanes"].(bool) + return manifest, nil } diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index 97cb26b8c6..f7e15bd6dd 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -50,6 +50,9 @@ type Config struct { PbtsEnableHeight int64 `toml:"pbts_enable_height"` PbtsUpdateHeight int64 `toml:"pbts_update_height"` + + NoLanes bool `toml:"no_lanes"` + Lanes map[string]uint32 `toml:"lanes"` } // App extracts out the application specific configuration parameters. @@ -72,6 +75,8 @@ func (cfg *Config) App() *app.Config { ABCIRequestsLoggingEnabled: cfg.ABCIRequestsLoggingEnabled, PbtsEnableHeight: cfg.PbtsEnableHeight, PbtsUpdateHeight: cfg.PbtsUpdateHeight, + NoLanes: cfg.NoLanes, + Lanes: cfg.Lanes, } } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index a1df5e0721..f5a48f15c0 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -88,6 +88,10 @@ type Manifest struct { LoadTxConnections int `toml:"load_tx_connections"` LoadMaxTxs int `toml:"load_max_txs"` + // Weight for each lane defined by the app. The transaction loader will + // assign lanes to generated transactions proportionally to their weights. + LoadLaneWeights map[string]uint `toml:"load_lane_weights"` + // LogLevel specifies the log level to be set on all nodes. LogLevel string `toml:"log_level"` @@ -151,6 +155,15 @@ type Manifest struct { // configuration files for all nodes. The format is "key = value". // Example: "p2p.send_rate = 512000". Config []string `toml:"config"` + + // Used to disable lanes for testing behavior of + // networks that upgrade to a version of CometBFT + // that supports lanes but do not opt for using them. + NoLanes bool `toml:"no_lanes"` + + // Mapping from lane IDs to lane priorities. These lanes will be used by the + // application for setting up the mempool and for classifying transactions. + Lanes map[string]uint32 `toml:"lanes"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 2af2cfc47f..87fbd7b7a5 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -20,9 +20,11 @@ import ( "github.com/cometbft/cometbft/crypto/bls12381" "github.com/cometbft/cometbft/crypto/ed25519" "github.com/cometbft/cometbft/crypto/secp256k1" + cmtrand "github.com/cometbft/cometbft/internal/rand" rpchttp "github.com/cometbft/cometbft/rpc/client/http" grpcclient "github.com/cometbft/cometbft/rpc/grpc/client" grpcprivileged "github.com/cometbft/cometbft/rpc/grpc/client/privileged" + "github.com/cometbft/cometbft/test/e2e/app" "github.com/cometbft/cometbft/types" ) @@ -83,6 +85,12 @@ type Testnet struct { // Latency Emulation is enabled when all the nodes have a zone assigned. LatencyEmulationEnabled bool + + // For generating transaction load on lanes proportionally to their + // priorities. + laneIDs []string + laneCumulativeWeights []uint + sumWeights uint } // Node represents a CometBFT node in a testnet. @@ -138,7 +146,6 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa if err != nil { return nil, fmt.Errorf("invalid IP network address %q: %w", ifd.Network, err) } - testnet := &Testnet{ Manifest: manifest, @@ -172,6 +179,35 @@ func NewTestnetFromManifest(manifest Manifest, file string, ifd InfrastructureDa testnet.LatencyEmulationEnabled = true + if len(testnet.Lanes) == 0 { + testnet.Lanes = app.DefaultLanes() + } + if len(testnet.LoadLaneWeights) == 0 { + // Assign same weight to all lanes. + testnet.LoadLaneWeights = make(map[string]uint, len(testnet.Lanes)) + for id := range testnet.Lanes { + testnet.LoadLaneWeights[id] = 1 + } + } + if len(testnet.Lanes) < 1 { + return nil, errors.New("number of lanes must be greater or equal to one") + } + + // Pre-compute lane data needed for generating transaction load. + testnet.laneIDs = make([]string, 0, len(testnet.Lanes)) + laneWeights := make([]uint, 0, len(testnet.Lanes)) + for lane := range testnet.Lanes { + testnet.laneIDs = append(testnet.laneIDs, lane) + weight := testnet.LoadLaneWeights[lane] + laneWeights = append(laneWeights, weight) + testnet.sumWeights += weight + } + testnet.laneCumulativeWeights = make([]uint, len(testnet.Lanes)) + testnet.laneCumulativeWeights[0] = laneWeights[0] + for i := 1; i < len(testnet.laneCumulativeWeights); i++ { + testnet.laneCumulativeWeights[i] = testnet.laneCumulativeWeights[i-1] + laneWeights[i] + } + for _, name := range sortNodeNames(manifest) { nodeManifest := manifest.NodesMap[name] ind, ok := ifd.Instances[name] @@ -396,6 +432,20 @@ func (t Testnet) Validate() error { ) } } + if len(t.LoadLaneWeights) != len(t.Lanes) { + return fmt.Errorf("number of lane weights (%d) must be equal to "+ + "the number of lanes defined by the app (%d)", + len(t.LoadLaneWeights), len(t.Lanes), + ) + } + for lane := range t.Lanes { + if _, ok := t.LoadLaneWeights[lane]; !ok { + return fmt.Errorf("lane %s not in weights map", lane) + } + } + if t.sumWeights <= 0 { + return errors.New("the sum of all lane weights must be greater than 0") + } for _, node := range t.Nodes { if err := node.Validate(t); err != nil { return fmt.Errorf("invalid node %q: %w", node.Name, err) @@ -592,6 +642,28 @@ func (t Testnet) HasPerturbations() bool { return false } +// weightedRandomIndex, given a list of cumulative weights and the sum of all +// weights, it picks one of them randomly and proportionally to its weight, and +// returns its index in the list. +func weightedRandomIndex(cumWeights []uint, sumWeights uint) int { + // Generate a random number in the range [0, sumWeights). + r := cmtrand.Int31n(int32(sumWeights)) + + // Return i when the random number falls in the i'th interval. + for i, cumWeight := range cumWeights { + if r < int32(cumWeight) { + return i + } + } + return -1 // unreachable +} + +// WeightedRandomLane returns an element in the list of lane ids, according to a +// predefined weight for each lane in the list. +func (t *Testnet) WeightedRandomLane() string { + return t.laneIDs[weightedRandomIndex(t.laneCumulativeWeights, t.sumWeights)] +} + // Address returns a P2P endpoint address for the node. func (n Node) AddressP2P(withID bool) string { ip := n.InternalIP.String() diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 70b6950bb5..5d8d884c2e 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -133,6 +133,7 @@ func createTxBatch(ctx context.Context, txCh chan<- types.Tx, testnet *e2e.Testn Size: uint64(testnet.LoadTxSizeBytes), Rate: uint64(testnet.LoadTxBatchSize), Connections: uint64(testnet.LoadTxConnections), + Lane: testnet.WeightedRandomLane(), }) if err != nil { panic(fmt.Sprintf("Failed to generate tx: %v", err)) diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index f42ec5a956..aed58b6636 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -412,6 +412,8 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "abci_requests_logging_enabled": node.Testnet.ABCITestsEnabled, "pbts_enable_height": node.Testnet.PbtsEnableHeight, "pbts_update_height": node.Testnet.PbtsUpdateHeight, + "no_lanes": node.Testnet.Manifest.NoLanes, + "lanes": node.Testnet.Manifest.Lanes, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index 22a3b9d8e6..e508b10ac0 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -20,7 +20,7 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mempool = mempl.NewCListMempool(cfg, appConnMem, 0) + mempool = mempl.NewCListMempool(cfg, appConnMem, nil, 0) } func Fuzz(data []byte) int { diff --git a/test/fuzz/tests/mempool_test.go b/test/fuzz/tests/mempool_test.go index ba241c0ffa..2952a3c2cf 100644 --- a/test/fuzz/tests/mempool_test.go +++ b/test/fuzz/tests/mempool_test.go @@ -24,7 +24,7 @@ func FuzzMempool(f *testing.F) { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mp := mempl.NewCListMempool(cfg, conn, 0) + mp := mempl.NewCListMempool(cfg, conn, nil, 0) f.Fuzz(func(_ *testing.T, data []byte) { _, _ = mp.CheckTx(data, "") diff --git a/test/loadtime/cmd/report/main.go b/test/loadtime/cmd/report/main.go index b604fbfd8f..a0ced07a18 100644 --- a/test/loadtime/cmd/report/main.go +++ b/test/loadtime/cmd/report/main.go @@ -94,7 +94,7 @@ func toCSVRecords(rs []report.Report) [][]string { } res := make([][]string, total+1) - res[0] = []string{"experiment_id", "block_time", "duration_ns", "tx_hash", "connections", "rate", "size"} + res[0] = []string{"experiment_id", "block_time", "duration_ns", "tx_hash", "lane", "connections", "rate", "size"} offset := 1 for _, r := range rs { idStr := r.ID.String() @@ -102,7 +102,16 @@ func toCSVRecords(rs []report.Report) [][]string { rateStr := strconv.FormatInt(int64(r.Rate), 10) sizeStr := strconv.FormatInt(int64(r.Size), 10) for i, v := range r.All { - res[offset+i] = []string{idStr, strconv.FormatInt(v.BlockTime.UnixNano(), 10), strconv.FormatInt(int64(v.Duration), 10), fmt.Sprintf("%X", v.Hash), connStr, rateStr, sizeStr} + res[offset+i] = []string{ + idStr, + strconv.FormatInt(v.BlockTime.UnixNano(), 10), + strconv.FormatInt(int64(v.Duration), 10), + fmt.Sprintf("%X", v.Hash), + v.Lane, + connStr, + rateStr, + sizeStr, + } } offset += len(r.All) } diff --git a/test/loadtime/payload/payload.pb.go b/test/loadtime/payload/payload.pb.go index 765c81d3da..5293a534e7 100644 --- a/test/loadtime/payload/payload.pb.go +++ b/test/loadtime/payload/payload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.20.1 +// protoc-gen-go v1.33.0 +// protoc v4.25.3 // source: payload/payload.proto package payload @@ -35,6 +35,7 @@ type Payload struct { Time *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=time,proto3" json:"time,omitempty"` Id []byte `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` Padding []byte `protobuf:"bytes,6,opt,name=padding,proto3" json:"padding,omitempty"` + Lane string `protobuf:"bytes,7,opt,name=lane,proto3" json:"lane,omitempty"` } func (x *Payload) Reset() { @@ -111,6 +112,13 @@ func (x *Payload) GetPadding() []byte { return nil } +func (x *Payload) GetLane() string { + if x != nil { + return x.Lane + } + return "" +} + var File_payload_payload_proto protoreflect.FileDescriptor var file_payload_payload_proto_rawDesc = []byte{ @@ -118,7 +126,7 @@ var file_payload_payload_proto_rawDesc = []byte{ 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x01, 0x0a, 0x07, 0x50, + 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x61, 0x74, 0x65, @@ -129,9 +137,10 @@ var file_payload_payload_proto_rawDesc = []byte{ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, - 0x69, 0x6e, 0x74, 0x2f, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x74, 0x2f, 0x74, + 0x0c, 0x52, 0x07, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x61, + 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e, 0x65, 0x42, 0x34, + 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6d, + 0x65, 0x74, 0x62, 0x66, 0x74, 0x2f, 0x63, 0x6f, 0x6d, 0x65, 0x74, 0x62, 0x66, 0x74, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x6c, 0x6f, 0x61, 0x64, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } diff --git a/test/loadtime/payload/payload.proto b/test/loadtime/payload/payload.proto index 59438a058c..00143c8ac4 100644 --- a/test/loadtime/payload/payload.proto +++ b/test/loadtime/payload/payload.proto @@ -15,4 +15,5 @@ message Payload { google.protobuf.Timestamp time = 4; bytes id = 5; bytes padding = 6; + string lane = 7; } diff --git a/test/loadtime/report/report.go b/test/loadtime/report/report.go index 3cd01eed19..5c86b3601b 100644 --- a/test/loadtime/report/report.go +++ b/test/loadtime/report/report.go @@ -28,6 +28,7 @@ type DataPoint struct { Duration time.Duration BlockTime time.Time Hash []byte + Lane string } // Report contains the data calculated from reading the timestamped transactions @@ -71,7 +72,7 @@ func (rs *Reports) ErrorCount() int { return rs.errorCount } -func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) { +func (rs *Reports) addDataPoint(id uuid.UUID, lane string, l time.Duration, bt time.Time, hash []byte, conns, rate, size uint64) { r, ok := rs.s[id] if !ok { r = Report{ @@ -84,7 +85,7 @@ func (rs *Reports) addDataPoint(id uuid.UUID, l time.Duration, bt time.Time, has } rs.s[id] = r } - r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash}) + r.All = append(r.All, DataPoint{Duration: l, BlockTime: bt, Hash: hash, Lane: lane}) if l > r.Max { r.Max = l } @@ -130,6 +131,7 @@ func (rs *Reports) addError() { func GenerateFromBlockStore(s BlockStore) (*Reports, error) { type payloadData struct { id uuid.UUID + lane string l time.Duration bt time.Time hash []byte @@ -173,6 +175,7 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { bt: b.bt, hash: b.tx.Hash(), id: uuid.UUID(*idb), + lane: p.GetLane(), connections: p.GetConnections(), rate: p.GetRate(), size: p.GetSize(), @@ -213,7 +216,7 @@ func GenerateFromBlockStore(s BlockStore) (*Reports, error) { reports.addError() continue } - reports.addDataPoint(pd.id, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size) + reports.addDataPoint(pd.id, pd.lane, pd.l, pd.bt, pd.hash, pd.connections, pd.rate, pd.size) } reports.calculateAll() return reports, nil diff --git a/version/version.go b/version/version.go index 5529824a08..b615fc9434 100644 --- a/version/version.go +++ b/version/version.go @@ -5,7 +5,7 @@ const ( // when not using git describe. It is formatted with semantic versioning. CMTSemVer = "1.0.1" // ABCISemVer is the semantic version of the ABCI protocol. - ABCISemVer = "2.1.0" + ABCISemVer = "2.2.0" ABCIVersion = ABCISemVer // P2PProtocol versions all p2p behavior and messages. // This includes proposer selection. From ecbb24ffbde07472cd91c25c42f2252ec1c1e4ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Fri, 27 Sep 2024 12:50:21 +0200 Subject: [PATCH 05/14] docs(mempool/lanes): Update ADR with latest design changes (#4117) Closes #4120 - [Allow different lanes to the same priority](https://github.com/cometbft/cometbft/commit/73603656d5ef27223531f00b37b0fc4a9d9bf325) - [Limits on lane capacities](https://github.com/cometbft/cometbft/commit/2de8f077dbb11ba9fdde9b676808f8d56b2fd1ed) --- .../architecture/adr-118-mempool-lanes.md | 728 ++++++++++++++++++ 1 file changed, 728 insertions(+) create mode 100644 docs/references/architecture/adr-118-mempool-lanes.md diff --git a/docs/references/architecture/adr-118-mempool-lanes.md b/docs/references/architecture/adr-118-mempool-lanes.md new file mode 100644 index 0000000000..3bc20237f6 --- /dev/null +++ b/docs/references/architecture/adr-118-mempool-lanes.md @@ -0,0 +1,728 @@ +# ADR 118: Mempool QoS + +## Changelog + +- 2024-04-12: Initial notes (@hvanz) +- 2024-04-12: Comments on the notes (@sergio-mena) +- 2024-04-17: Discussions (@sergio-mena @hvanz) +- 2024-04-18: Preliminary structure (@hvanz) +- 2024-05-01: Add Context and Properties (@hvanz) +- 2024-05-21: Add more Properties + priority mempool (@sergio-mena) +- 2024-06-13: Technical design (@hvanz) +- 2024-07-02: Updates based on reviewer's comments (@hvanz, @sergio-mena) +- 2024-07-09: Updates based on reviewer's comments (@hvanz) +- 2024-09-13: Added pre-confirmations section (@sergio-mena) +- 2024-09-27: Allow lanes to have same priority + lane capacities (@hvanz) + +## Status + +Accepted. Tracking issue: [#2803][tracking-issue]. + +## Context + +In the current implementation, the only property that the mempool tries to enforce when processing +and disseminating transactions is maintaining the order in which transactions arrive to the nodes, +that is, a FIFO ordering. However, ensuring a strict transmission order over the network proves +challenging due to inherent characteristics of the underlying communication protocols that causes +message delays and potential reordering. Consequently, while many Tendermint Core and CometBFT +applications have always assumed this ordering always holds, the FIFO-ness of transactions is not +guaranteed and is offered only as a best effort. + +Beyond the apparent FIFO sequencing, transactions in the mempool are treated equally, meaning that +they are not discriminated as to which are disseminated first or which transactions the mempool +offers to the proposer when creating the next block. In practice, however, not all transactions have +the same importance for the application logic, especially when it comes to latency requirements. +Depending on the application, we may think of countless categories of transactions based on their +importance and requirements, spanning from IBC messages to transactions for exchanges, for smart +contract execution, for smart contract deployment, grouped by SDK modules, and so on. Even +transactions prioritized by economic incentives could be given a preferential treatment. Or big +transactions, regardless of their nature, could be categorized as low priority, to mitigate +potential attacks on the mempool. + +The goal of this document is thus to propose a mechanism enabling the mempool to prioritize +transactions by *classes*, for processing and dissemination, directly impacting block creation and +transaction latency. In IP networking terminology, this is known as Quality of Service (QoS). By +providing certain QoS guarantees, developers will be able to more easily estimate when transactions +will be disseminated and reaped from the mempool to be included in a block. + +In practical terms, we envision an implementation of the transaction class abstraction as *mempool +lanes*. The application will be allowed to split the mempool transaction space into a hierarchy of +lanes, with each lane operating as an independent mempool. At the same time, all of them need to be +coordinated to ensure the delivery of the desired levels of QoS. + +Note that improving the dissemination protocol to reduce bandwidth and/or latency is a separate +concern and falls outside the scope of this proposal. Likewise, graceful degradation under high load +is an orthogonal problem to transaction classification, although the latter may help improve the former. + +## Properties + +Before jumping into the design of the proposal, we define more formally the properties supported by +the current implementation of the mempool. Then we state what properties the new mempool should +offer to guarantee the desired QoS. The following definitions are common to all properties. + +When attempting to add an incoming transaction to the mempool, the node first checks that it is not +already in the cache before checking its validity with the application. + +:memo: _Definition_: We say that a node receives a transaction `tx` _for the first time_ when the +node receives `tx` and `tx` is not in the cache. + +By this definition, it is possible that a node receives a transaction "for the first time", then +gets the transaction evicted from the cache, and at a later time receives it "for the first time" +again. The cache implements a Least-Recently Used (LRU) policy for removing entries when the +cache is full. + +:memo: _Definition_: Given any two different transactions `tx1` and `tx2`, in a given node, we say that: +1. `tx1` is *validated before* `tx2`, when `tx1` and `tx2` are received for the first time, and `tx1` +is validated against the application (via `CheckTx`) before `tx2`, +1. `tx1` is *rechecked before* `tx2`, when `tx1` and `tx2` are in the mempool and `tx1` is +re-validated (rechecked via `CheckTx`) before `tx2`, +1. `tx1` is *reaped before* `tx2`, when `tx1` is reaped from the mempool to be included in a block + proposal before `tx2`, +1. `tx1` is *disseminated before* `tx2`, when `tx1` is sent to a given peer before `tx2`. + +In 2, both transactions are rechecked at the same height, because both are in the mempool. + +In 4, note that in the current implementation there is one dissemination routine per peer, so it +could happen that `tx2` is sent to a peer before `tx1` is sent to a different peer. +Hence the importance of expression "to a given peer" in that definition. + +### Current mempool + +As stated above, the current mempool offers a best-effort FIFO ordering of transactions. We state +this property as follows. + +:parking: _Property_ **FIFO ordering of transactions**: We say that the mempool makes a best effort +in maintaining the FIFO ordering of transactions when transactions are validated, rechecked, reaped, +and disseminated in the same order in which the mempool has received them. + +More formally, given any two different transactions `tx1` and `tx2`, if a node's mempool receives +`tx1` before receiving `tx2`, then `tx1` will be validated, rechecked, reaped, and disseminated +before `tx2` (as defined above). + +Note that a node's mempool can receive a transaction either from a `broadcast_tx_*` RPC endpoint or +from a peer. + +This property guarantees the FIFO ordering at any given node, but it cannot be generalised to all +the nodes in the network because the property does not hold at the network level. Hence, FIFO +ordering on the whole system is best effort. + +### Mempool with QoS + +The main goal of QoS is to guarantee that certain transactions have lower latency than others. +Before stating this property, we need to make some definitions. + +:memo: _Definition_: a *transaction class* is a disjoint set of transactions having some common +characteristics as defined by the application. + +A transaction may only have one class. If it is not assigned any specific class, it will be assigned +a *default class*, which is a special class always present in any set of classes. This is analogous +to the _native VLAN_ for untagged traffic in an 802.1Q network. Because no transaction can belong to +two or more classes, transaction classes form disjoint sets, that is, the intersection between any two +classes is empty. Also, all transactions in the mempool are the union of the transactions in all +classes. + +:memo: _Definition_: Each class has a *priority* and two classes cannot have the same priority. +Therefore all classes can be ordered by priority. + +When a transaction is received for the first time and validated via `CheckTx`, the application MAY +return the class that it assigns to the transaction. If it actually returns a class, the mempool +MUST use it to prioritize the transaction. When transactions are rechecked, applications MAY return +a class, but the mempool will discard it. + +Given these definitions, we want the proposed QoS mechanism to offer the following property: + +#### Basic properties + +:parking: _Property_ **Priorities between classes**: Transactions belonging to a certain class will +be reaped and disseminated before transactions belonging to another class with lower priority. + +Formally, given two transaction classes `c1` and `c2`, with `c1` having more priority than `c2`, if +the application assigns the classes `c1` and `c2` respectively to transactions `tx1` and `tx2`, then +`tx1` will be reaped and disseminated before `tx2`. + +More importantly, as a direct consequence of this property, `tx1` will be disseminated faster and it +will be included in a block before `tx2`. Thus, `tx1` will have a lower latency than `tx2`. +Currently, it is not possible to guarantee this kind of property. + +:memo: _Definition_: The *latency of a transaction* is the difference between the time at which a +user or client submits the transaction for the first time to any node in the network, and the +timestamp of the block in which the transaction finally was included. + +We want also to keep the FIFO ordering within each class (for the time being): + +:parking: _Property_ **FIFO ordering per class**: For transactions within the same class, the +mempool will maintain a FIFO order within the class when transactions are validated, rechecked, +reaped, and disseminated. + +Given any two different transactions `tx1` and `tx2` belonging to the same class, if the mempool +receives `tx1` before receiving `tx2`, then: +- `tx1` will be validated and recheck against the application (via `CheckTx`) before `tx2`, and +- `tx1` will be reaped and disseminated before `tx2`. + +As a consequence, given that classes of transactions have a sequential ordering, and that classes do +not have elements in common, we can state the following property: + +:parking: _Property_ **Partial ordering of all transactions**: The set of all the transactions in +the mempool, regardless of their classes, will have a *partial order*. + +This means that some pairs of transactions are comparable and, thus, have an order, while others +not. + +#### Network-wide consistency + +The properties presented so far may be interpreted as per-node properties. +However, we need to define some network-wide properties in order for a mempool QoS implementation +to be useful and predictable for the whole appchain network. +These properties are expressed in terms of consistency of the information, configuration and behaviour +across nodes in the network. + +:parking: _Property_ **Consistent transaction classes**: For any transaction `tx`, +and any two correct nodes $p$ and $q$ that receive `tx` *for the first time*, +$p$ and $q$ MUST have the same set of transaction classes and their relative priority and configuration. + +The property is only required to hold for on-the-fly transactions: +if a node receives a (late) transaction that has already been decided, this property does not enforce anything. +The same goes for duplicate transactions. +Notice that, if this property does not hold, it is not possible to guarantee any property across the network, +such as transaction latency as defined above. + +:parking: _Property_ **Consistent transaction classification**: For any transaction `tx` +and any two correct nodes $p$ and $q$ that receive `tx` *for the first time*, +$p$'s application MUST classify `tx` into the same transaction class as $q$'s application. + +This property only makes sense when the previous property ("Consistent transaction classes") defined above holds. +Even if we ensure consistent transaction classes, if this property does not hold, a given transaction +may not receive the same classification across the network and it will thus be impossible to reason +about any network-wide guarantees we want to provide that transaction with. + +Additionally, it is important to note that these two properties also constrain the way transaction +classes and transaction classification logic can evolve in an existing implementation. +If either transaction classes or classification logic are not modified in a coordinated manner in a working system, +there will be at least a period where these two properties may not hold for all transactions. + +## Alternative Approaches + +### CometBFT Priority Mempool + +CometBFT used to have a `v1` mempool, specified in Tendermint Core [ADR067][adr067] and deprecated as of `v0.37.x`, +which supported per-transaction priority assignment. +The key point of the priority mempool's design was that `CheckTxResponse` was extended with a few fields, +one of which being an `int64` that the application could use to provide a priority to the transaction being checked. + +This design can be seen as partially addressing the specification of a Mempool with QoS +presented in the previous section. Every possible value of the `int64` priority field returned by the application +can be understood as a _different_ traffic class. +Let us examine whether the properties specified above are fulfilled by the priority mempool design +as described in [ADR067][adr067]: + +1. Partial ordering of all transactions is maintained because the design still keeps a FIFO queue for gossiping transactions. + Also, transactions are reaped according to non-decreasing priority first, and then in FIFO order + for transactions with equal priority (see this `ReapMaxBytesMaxGas`'s [docstring][reapmaxbytesmaxgas]). +1. Since the priority mempool uses FIFO for transactions of equal priority, it also fulfills the "FIFO ordering per class" property. + The problem here is that, since every value of the priority `int64` field is considered a different transaction class, + there are virtually unlimited traffic classes. + So it is too easy for an application to end up using hundreds, if not thousands of transactions classes at a given time. + In this situation, "FIFO ordering per class", while fulfilled, becomes a corner case and thus does not add much value. +1. The consistent transaction classes property is trivially fulfilled, as the set of transaction classes never changes: + it is the set of all possible values of an `int64`. +1. Finally, the priority mempool design does not make any provisions on how the application is to evolve its prioritization + (i.e., transaction classification) logic. + Therefore, the design does not guarantee the fulfillment of the consistent transaction classification property. + +The main hindrance for the wide adoption of the priority mempool was +the dramatic reduction of the _observable_ FIFO guarantees for transactions (as explained in point 2 above) +with respect to the `v0` mempool. + +Besides, the lack of provisions for evolving the prioritization logic (point 4 above) could have also got +in the way of adoption. + + +### Solana + +#### Introduction to Gulf Stream and Comparison with CometBFT's Mempool + +A core part of Solana's design is [Gulf Stream][gulf-stream], +which is marketed as a "mempool-less" way of processing in-flight transactions. +Similarly of a CometBFT- based chain, the sequence of leaders (nodes that produce blocks) is known in advance. +However, unlike CometBFT, Solana keeps the same leader for a whole epoch, whole typical length is approx. 2 days +(what if the leader fails in the middle of an epoch?). +According to the Gulf Stream design, rather than maintaining a mempool at all nodes to ensure transactions +will reach _any_ leader/validator, transactions are directly sent to the current leader and the next, +according to the sequence of leaders calculated locally (known as _leader schedule_). +As a result, Gulf Stream does not use gossip-based primitives to disseminate transactions, +but UDP packets sent directly to the current (and next) leader's IP address. +One of the main points of adopting gossip protocols by Tendermint Core and CometBFT (coming from Bitcoin and Ethereum) +is censorship resistance. It is not clear how Gulf Stream deals with an adversary controlling a part of the network +that stands on the way of those UDP packets containing submitted transactions. + +#### Transaction Priority Design + +In Solana, transaction priority is controlled by fees: they introduce the concept of [_priority fees_][solana-prio-fees]. +The priority fee is an optional configuration parameter when submitting a transaction, +which allows the submitter to increase the likelihood of their transaction making it to a block. +The priority fee is provided in terms of _price per Unit of Computation_ (UC), priced in [micro-lamports per CU][prio-fee-price]. +A UC is the equivalent of Cosmos's _gas_, and so, the priority fee is analogous (in concept) +to the Cosmos SDK's `--gas-prices` [flag][sdk-gas-prices]. +The main difference if that the SDK (currently) uses `--gas-prices` +to set up a per-node threshold of acceptability in gas prices, +whereas Solana uses the (default or user-configured) priority fee as the transaction's _actual_ priority. + +This is very similar to the way CometBFT's priority mempool in `v0.37.x` was supposed to be used by applications, +but in a monolithic manner: there is no "priority" abstraction in Solana as there is nothing similar to ABCI. +In short, the fees _are_ the priority. +Thus, if we were to check the properties specified [above](#mempool-with-qos), +with the caveat that Solana does not have a built-in mempool, +we would reach the same conclusions as with the CometBFT's `v0.37.x` [priority mempool](#cometbft-priority-mempool). +Namely, a _degradation_ in observable FIFO guarantees (affecting applications that depend on it for performance), +and a lack a provisions of evolving priority classification in a consistent manner. +The latter may appear less important as transactions are directly sent to the current leader, +but it is not clear how retried transactions in periods of high load can be receive a consistent priority treatment. + +### Ethereum Pre-confirmations + +#### Brief Explanation + +Ethereum pre-confirmations are a mechanism designed to reduce transaction latency. Justin Drake's [proposal][based-preconfs] +for _based pre-confirmations_ has gained attention in recent months in the Ethereum research community, +though similar ideas date back to Bitcoin's [Oconfs][Oconfs]. + +Pre-confirmations occur in the context of _fast games_, techniques applied between consecutive Layer-1 blocks +to improve certain performance guarantees and help manage _MEV_ (Maximal Extractable Value). + +The process is straightforward. A user submits a transaction and requests a _preconfer_ (a validator) to guarantee specific handling +of that transaction, typically for a fee, called _tip_. +In exchange, the preconfer signs a _promise_ — most often guaranteeing transaction inclusion in the next block. +The preconfer can only claim the tip if the promise is fulfilled, and validators opting in to become preconfers +accept new slashing conditions related to _liveness_ (failure to propose a block) and _safety_ (failure to meet the promise). + +This design enables various implementations of pre-confirmations, and it's still early to determine which form will dominate in Ethereum. + +#### Comparison to Mempool QoS + +Unlike Mempool QoS — the design described [below](#detailed-design) — which prioritizes transactions based +on network resource availability, +pre-confirmations focus on individual user guarantees about transaction treatment and certainty of inclusion. +While the connection to MEV is not fully understood yet, pre-confirmations may provide some mitigation against MEV-related risks. + +Pre-confirmations can also coexist with Mempool QoS in CometBFT-based blockchains. +For instance, particular Mempool QoS configurations, such as a starving, FIFO, high-priority lane, +could be part of an implementation of pre-confirmations in a CometBFT-based chain. + +### Skip's Block-SDK Lanes + +As of version `v0.47.x`, the Cosmos SDK offers application developers the possibility to use an [Application-Side Mempool][sdk-app-mempool]. +It is a mempool structure maintained by the SDK application and populated with valid transactions received via `CheckTx`. +An application maintaining such a mempool is free to define the way transactions are ordered, reaped for a block, aggregated, removed, etc. +Typically, upon `PrepareProposal`, the SDK application disregards the transactions proposed by CometBFT, +and rather proposes transactions reaped from its own mempool, and according to its mempool's rules. + +The Skip team have released an extension of the Application-Side mempool, called [Block-SDK][skip-block-sdk], +that introduces the concept of _lanes_, turning the mempool "into a *highway* consisting of individual *lanes*". +The concept of lanes, introduced in Skip's Block-SDK, is pretty aligned with Mempool QoS as specified above. +Indeed, we use the same term, _lanes_, in the [Detailed Design](#detailed-design) section below, +which describes a minimum viable product (MVP) implementation of the concept of transaction classes. + +The main difference between Skip's Block-SDK's lanes and the design we present below is that +the Block-SDK implements mempool lanes at the application level, whereas this ADR proposes a specification and a design at CometBFT level, +thus including provisions for **transaction gossiping** as an integral part of it. +As a result, the Block-SDK's lanes can be used to implement the Mempool QoS specification in everything that relates to block production, +but not at the network gossip level. + +Importantly, both designs, Skip's Block SDK and the one described [below](#detailed-design), are complementary. +An application using Skip's Block-SDK lanes already contains transaction classification logic, and so, +it can easily be extended to provide `CheckTx` with the information needed by an implementation of CometBFT mempool QoS +(such as the design we propose below) to also achieve a more predictable transaction latency, +depending on the lane/class a transaction belongs to. + +## Decision + +Implement an MVP following the design in the next section. + +## Detailed Design + +This section describes the architectural changes needed to implement an MVP of lanes in the +mempool. The following is a summary of the key design decisions: +- [[Lanes definition](#lanes-definition)] The list of lanes and their corresponding priorities + will be hardcoded in the application logic. +- [[Initialization](#initialization)] How the application configures lanes on CometBFT. +- [[Internal data structures](#internal-data-structures)] There will be one concurrent list (CList) data structure per + lane. +- [[Configuration](#configuration)] All lanes will share the same mempool configuration. +- [[Adding transactions to the mempool](#adding-transactions-to-the-mempool)] When validating a transaction via CheckTx, the + application will optionally return a lane for the transaction. +- [[Transaction dissemination](#transaction-dissemination)] We will continue to use the current P2P + channel for disseminating transactions, and we will implement in the mempool the logic for + selecting the order in which to send transactions. +- [[Reaping transactions for creating blocks](#reaping-transactions-for-creating-blocks)] + Transactions will be reaped from higher-priority lanes first, preserving intra-lane FIFO ordering. +- [[Prioritization logic](#prioritization-logic)] For disseminating and reaping transactions, the + scheduling algorithm should be prevent starvation of low-priority lanes. + +### Lanes definition + +The list of lanes and their associated priorities will be hardcoded in the application logic. A lane +is identified by a **name** of type `string` and assigned a **priority** of type `uint32`. The +application also needs to define which of the lanes is the **default lane**, which is not +necessarily the lane with the lowest priority. + +To obtain the lane information from the application, we need to extend the ABCI `Info` response to +include the following fields. These fields need to be filled by the application only in case it +wants to implement lanes. +```protobuf +message InfoResponse { + ... + map lane_priorities = 6; + uint32 default_lane = 7; +} +``` +The field `lane_priorities` is a map from lane identifiers to priorities. Different lanes may have +the same priority. On the mempool side, lane identifiers will mainly be used for user interfacing +(logging, metric labels). + +The lowest priority a lane may have is 1. Higher values correspond to higher priorities. The value 0 +is reserved for when the application does not have a lane to assign, so it leaves the `lane_id` +field empty in the `CheckTx` response (see [below](#adding-transactions-to-the-mempool)). This +happens either when the application does not classify transactions, or when the transaction is +invalid. + +On receiving the information from the app, CometBFT will validate that: +- `default_lane` is a key in `lane_priorities`, and +- `lane_priorities` is empty if and only if `default_lane` is empty. + +### Initialization + +Currently, querying the app for `Info` happens during the handshake between CometBFT and the app, +during the node initialization, and only when state sync is not enabled. The `Handshaker` first +sends an `Info` request to fetch the app information, and then replays any stored block needed to +sync CometBFT with the app. The lane information is needed regardless of whether state sync is +enabled, so one option is to query the app information outside of the `Handshaker`. + +In this proposed approach, updating the lane definitions will require a single governance proposal +for updating the software. CometBFT will not need to deal with dynamic lane changes: it will just +need to set up the lanes when starting up (whether afresh or in recovery mode). + +Different nodes also need to agree on the lanes they use. When a node connects to a peer, they both +perform a handshake to agree on some basic information (see `DefaultNodeInfo`). Since the +application includes the lane definitions, it suffices to ensure that both nodes agree on the +version of the application. Although the application version is included in `DefaultNodeInfo`, there +is currently no check for compatibility between the versions. To address this, we would need to +modify the P2P handshake process to validate that the application versions are compatible. + +Finally, this approach is compatible with applications that need to swap binaries when +catching up or upgrading, such as SDK applications using [Cosmovisor][cosmovisor]. +When a node is catching up (i.e., state or block syncing), its peers will detect +that the node is late and will not send it any transactions until it is caught up. +So, the particular lane configuration of the node is irrelevant while catching up. +When going through a Cosmovisor-driven upgrade, all nodes will swap binaries at the same +height (which is specified by the corresponding Software Upgrade gov proposal). +If the new version of the software contains modified lane configuration +(and therefore new transaction classification logic), those changes will kick in +in a coordinated manner thanks to the regular Cosmovisor workflow. + +### Internal data structures + +In the mempool, a lane is defined by its priority: +```golang +type Lane uint32 +``` + +Currently, the `CListMempool` data structure has two fields to store and access transactions: +```golang +txs *clist.CList // Concurrent list of mempool entries. +txsMap sync.Map // Map of type TxKey -> *clist.CElement, for quick access to elements in txs. +``` + +With the introduction of lanes, the main change will be to divide the `CList` data structure into +$N$ `Clist`s, one per lane. `CListMempool` will have the following fields: +```golang +lanes map[Lane]*clist.CList +txsMap sync.Map // Map of type TxKey -> *clist.CElement, for quick access to elements in lanes. +txLanes sync.Map // Map of type TxKey -> Lane, for quick access to the lane corresponding to a tx. + +// Fixed variables set during initialization. +defaultLane Lane +sortedLanes []Lane // Lanes sorted by priority +``` +The auxiliary fields `txsMap` and `txLanes` are, respectively, for direct access to the mempool +entries, and for direct access to the lane of a given transaction. + +If the application does not implement lanes (that is, it responds with empty values in +`InfoResponse`), then `defaultLane` will be set to 1, and `lanes` will have only one entry for the +default lane. In this case, the new mempool's behaviour will be equivalent to that of the current mempool. + +`CListMempool` also contains the cache, which is only needed before transactions have a lane +assigned. Since the cache is independent of the lanes, we do not need to modify it. + +### Configuration + +For an MVP, we do not need to have a customized configuration for each lane. The current mempool +configuration will continue to apply to the mempool, which now is a union of lanes. The total size +of the mempool will be the sum of the sizes of all lanes. Therefore, the mempool capacities as +currently defined in the configuration will put an upper limit on the union of all lanes. These +configurations are: +- `Size`, the total number of transactions allowed in the mempool, +- `MaxTxsBytes`, the maximum total number of bytes of the mempool, and +- `MaxTxBytes`, the maximum size in bytes of a single transaction accepted into the mempool. + +However, we still need to enforce limits on each lane's capacity. Without such limits, a +low-priority lane could end up occupying all the mempool space. Since we want to avoid introducing +new configuration options unless absolutely necessary, we propose two simple approaches for +partitioning the mempool space. + +1. Proportionally to lane priorities: This approach could lead to under-utilization of the mempool if + there are significant discrepancies between priority values, as it would allocate space unevenly. +2. Evenly across all lanes: Assuming high-priority transactions are smaller in size than + low-priority transactions, this approach would still allow for more high-priority transactions to + fit in the mempool compared to lower-priority ones. + +Note that each lane's capacity will be limited both by the number of transactions and their total +size in bytes. + +For the MVP, we've chosen the second approach. If users find that the lane capacity is insufficient, +they still have the option of increasing the total mempool size, which will proportionally increase +the capacity of all lanes. In future iterations, we may introduce more granular control over lane +capacities if needed. + +Additionally, the `Recheck` and `Broadcast` flags will apply to all lanes or to none. Remember that, +if `PrepareProposal`'s app logic can ever add a new transaction, it becomes _always_ mandatory to +recheck remaining transactions in the mempool, so there is no point in disabling `Recheck` per lane. + +### Adding transactions to the mempool + +When validating a transaction received for the first time with `CheckTx`, the application will +optionally return its lane identifier in the response. +```protobuf +message CheckTxResponse { + ... + string lane_id = 12; +} +``` +The callback that handles the first-time CheckTx response will append the new mempool entry to the +corresponding `CList`, namely `lanes[lane_id]`, and update the other auxiliary variables accordingly. +If `lane_id` is an empty string, it means that the application did not set any lane in the response +message, so the transaction will be assigned to the default lane. + +### Removing transactions from the mempool + +A transaction may be removed in two scenarios: when updating the mempool with a list of committed +transactions, or during rechecking if the transaction is reassessed as invalid. In either case, the +first step is to identify the lane the transaction belongs to by accessing the `txLanes` map. Then, +we remove the entry from the CList corresponding to its lane and update the auxiliary variables +accordingly. + +As an optimization, we could prioritize the removal of transactions from high-priority lanes first. +The broadcast goroutines are constantly reading the list of transactions to disseminate them, though +there is no guarantee that they will not send transactions that are about to be removed. + +When updating the mempool, there is potential for a slight optimization by removing transactions +from different lanes in parallel. To achieve this, we would first need to preprocess the list of +transactions to determine the lane of each transaction. However, this optimization has minimal +impact if the committed block contains few transactions. Therefore, we decided to exclude it from +the MVP. + +### Transaction dissemination + +For broadcasting transactions from multiple lanes, we have considered two possible approaches: +1. Reserve $N$ p2p channels for use by the mempool. P2P channels have priorities that we can reuse + as lane priorities. There are a maximum of 256 P2P channels, thus limiting the number of lanes. +2. Continue using the current P2P channel for disseminating transactions and implement logic within + the mempool to select the order of transactions to put in the channel. This option theoretically + allows for an unlimited number of lanes, constrained only by the nodes’ capacity to store the + lane data structures. + +We choose the second approach for its flexibility, allowing us to start with a simple scheduling +algorithm that can be refined over time (see below). Another reason is that on the first option we +would need to initialize channels dynamically (currently there is a fixed list of channels passed as +node info) and assign lanes to channels. + +Before modifying the dissemination logic, we need to refactor the current implementation and the +`Mempool` interface to clearly separate the broadcast goroutine in the mempool reactor from +`CListMempool` that includes the mempool data structures. `CListMempool` provides two methods used +by the broadcast code, `TxsWaitChan() <-chan struct{}` and `TxsFront() *clist.CElement`, which are +just wrappers around the methods `WaitChan` and `Front` of the `CList` implementation. In +particular, `TxsFront` is leaking implementation details outside the `Mempool` interface. + +### Reaping transactions for block creation + +In the current single-lane mempool, the function `ReapMaxBytesMaxGas(maxBytes, maxGas)` collects +transactions in FIFO order from the CList until either reaching `maxBytes` or `maxGas` (both of +these values are consensus parameters). + +With multiple CLists, we need to collect transactions from higher-priority lanes first, also in FIFO +order, continuing with successive lanes in the `sortedLanes` array, that is, in decreasing priority +order, and breaking the iteration when reaching `maxBytes` or `maxGas`. Note that the mempool is +locked during `ReapMaxBytesMaxGas`, so no transaction will be added or removed from the mempool +during reaping. + +This simple algorithm, though good enough for an MVP, does not guarantee that low-priority lanes +will not starve. That is why we prefer to implement one that is starvation-free, as explained in the +next section. It could be the same algorithm or similar to the one used for transaction +dissemination. + +### Prioritization logic + +For transaction dissemination and for reaping transactions for creating blocks we want a scheduling +algorithm that satisfies the properties "Priorities between classes" and "FIFO ordering per class". +This means that it must support selection by _weight_, ensuring each lane gets a fraction of the P2P +channel capacity proportional to its priority. Additionally, we want the algorithm to be _fair_ to +prevent starvation of low-priority lanes. + +A first option that meets these criteria is the current prioritization algorithm on the P2P reactor, +which we could easily reimplement in the mempool. It works as follows: +- On each P2P channel, the variable `recentlySent` keeps track of how many bytes were recently sent + over the channel. Every time data is sent, increase `recentlySent` with the number of bytes + written to the channel. Every 2 seconds, decrease `recentlySent` by 20% on all channels (these + values are fixed). +- When sending the next message, [pick the channel][selectChannelToGossipOn] whose ratio + `recentlySent/Priority` is the least. + +From the extensive research in operating systems and networking, we can pick for the MVP an existing +scheduling algorithm that meets these requirements and is straightforward to implement, such as a +variant of the [Weighted Round Robin][wrr] (WRR) algorithm. We choose this option at it gives us +more flexibility for improving the logic in the future, for example, by adding a mechanism for +congestion control or by allowing some lanes to have customized, non-FIFO scheduling algorithms. + +### Validating lanes of received transactions + +Transactions are transmitted without lane information because peers cannot be trusted to send the +correct data. A node may take advantage of the network by sending lower-priority transactions before +higher-priority ones. Although the receiving node could easily verify the priority of a transaction +when it calls `CheckTx`, it cannot detect if a peer is sending transactions out of order over a +single P2P channel. For the moment, we leave out of the MVP any mechanism for detecting and possibly +penalizing nodes for this kind of behaviour. + +## Alternative designs + +### Identify lanes by their priorities + +In the initial prototype we identified lanes by their priorities, meaning each priority could only +be assigned to a single lane. This simplified approach proved too restrictive for applications. To +address this, now we identify lanes by `string` names, decoupling lane identifiers from their +priorities. + +### One CList for all lanes + +We briefly considered sharing one CList for all lanes, changing the internal logic of CList to +accommodate lane requirements. However, this design significantly increases code complexity, +particularly in the transaction dissemination logic. + +### One P2P channel per lane + +Since P2P channels already have a built-in priority mechanism, they present a reasonable option to +implement transaction dissemination from lanes. By assigning a P2P channel to each lane, we could +simply append new transactions to their respective channels and allow the P2P layer to manage the +order of transmission. We decided against this option mainly because the prioritization logic cannot +be easily modified without altering the P2P code, potentially affecting other non-mempool channels. + +Another drawback is that this option imposes a limit to the number of P2P channels. Channels use a +byte as an ID, and the current distribution among all reactors goes up to channel `0x61`. For +example, the current mempool’s P2P channel ID is `0x30`, which would serve as the default lane. We +could reserve a range of channels for the mempool, such as starting from channel ID `0x80` and above +(all channels with the most significant byte set to 1). This would provide a maximum of 128 lanes, +which should suffice for most users. + +Nodes would also need to agree on the channel assignments during the P2P handshake. Currently, one +of the conditions for the handshake to succeed is that there must exist an intersection of P2P +channels. Since lanes are defined in the application logic, the nodes only need to agree on the +application version, as it already happens in the current implementation. + +### Duality lane/priority + +The duality lane/priority could introduce a powerful indirection. The app could just define the lane +of a transaction in `CheckTx`, but the priority of the lane itself could be configured (and +fine-tuned) elsewhere. For example, by the app itself or by node operators. The proposed design for +the MVP does not support this pattern. + +### Custom configuration per lane + +A straightforward, future improvement that we leave for after the MVP is to allow customized +configuration of the lanes instead of sharing the current mempool configuration among lanes. The +application would need to define new configuration values per lane and pass them to CometBFT during +the handshake. + +### Where to define lanes and priorities + +We have considered two alternative approaches for _where_ to configure lanes and priorities: +1. In `config.toml` or `app.toml`. We have discarded this option as it does not make sense for + different nodes to have different lane configurations. The properties defined in the + specification above are end-to-end, and so, the lane configuration has to be consistent across + the network. +1. In `ConsensusParams`. There are several disadvantages with this approach. If we allow changing + lane information via `ConsensusParams`, the mempool would need to update lanes dynamically. The + updating process would be very complex and cumbersome, and not really appealing for an MVP. Two + governance proposals would be required to pass to update the lane definitions. A first proposal + would be required for upgrading the application, because the lane classification logic (thus the + application's code) needs to know the lane configuration beforehand. And a second proposal would + be needed for upgrading the lanes via `ConsensusParams`. While it is true that SDK applications + could pass a governance proposal with both elements together, it would be something to _always_ + do, and it is not clear what the situation would be for non-SDK applications. + + Also, it is not clear in which order the proposals should apply. The community should be careful + not to break performance between the passing of both proposals. The `gov` module could be + modified to allow the two changes to be shipped in the same gov proposal, but this does not seem + a feasible solution. + +Moreover, these two alternatives have a common problem which is how to deal with nodes that are +late, possibly having lane definitions that do not match with those of nodes at the latest heights. + +## Consequences + +### Positive + +- Application developers will be able to better predict when transactions will be disseminated and + reaped from the mempool to be included in a block. This has direct impact on block creation and + transaction latency. +- The mempool will be able to offer Quality of Service (QoS) guarantees, which does not exist in the + current implementation. This MVP will serve as a base to further extend QoS in future iterations + of lanes. +- Applications that are unaware of this feature, and therefore not classifying transactions in + `CheckTx`, will observe the same behavior from the mempool as the current implementation. + +### Negative + +- The best-effort FIFO ordering that currently applies to all transactions may be broken when using + multiple lanes, which will apply FIFO ordering per lane. Since FIFO ordering is important within + the same class of transactions, we expect this will not be a real problem. +- Increased complexity in the logic of `CheckTx` (ante handlers) in order to classify transactions, + with a possibility of introducing bugs in the classification logic. + +### Neutral + +- Lanes are optional. Current applications do not need to make any change to their code. Future + applications will not be forced to use the lanes feature. +- Lanes will preserve the "FIFO ordering of transactions" property within the same class (with a + best effort approach, as the current implementation). +- The proposed prioritization algorithm (WRR) for transaction dissemination and block creation is + fair, so low-priority transactions will not get stuck in the mempool for long periods of time, and + will get included in blocks proportionally to their priorities. + +## References + +- [ADR067][adr067], Priority mempool +- [Docstring][reapmaxbytesmaxgas] of `ReapMaxBytesMaxGas` +- Solana's [Gulf Stream][gulf-stream] +- Solana's [Priority Fees][solana-prio-fees] +- Solana's [priority fee pricing][prio-fee-price] +- Cosmos SDK's [gas prices][sdk-gas-prices] +- Cosmos SDK's [application-side mempool][sdk-app-mempool] +- Skip's [Block SDK][skip-block-sdk] +- P2P's [selectChannelToGossipOn][selectChannelToGossipOn] function +- [Weighted Round Robin][wrr] +- [Cosmovisor][cosmovisor] +- [Mempool's cache][cache] + +[cache]: https://github.com/cometbft/cometbft/blob/main/spec/mempool/cache.md +[adr067]: ./tendermint-core/adr-067-mempool-refactor.md +[reapmaxbytesmaxgas]: https://github.com/cometbft/cometbft/blob/v0.37.6/mempool/v1/mempool.go#L315-L324 +[gulf-stream]: https://medium.com/solana-labs/gulf-stream-solanas-mempool-less-transaction-forwarding-protocol-d342e72186ad +[solana-prio-fees]: https://solana.com/developers/guides/advanced/how-to-use-priority-fees +[prio-fee-price]: https://solana.com/developers/guides/advanced/how-to-use-priority-fees +[sdk-gas-prices]: https://docs.cosmos.network/v0.50/learn/beginner/tx-lifecycle#gas-and-fees +[sdk-app-mempool]: https://docs.cosmos.network/v0.47/build/building-apps/app-mempool +[skip-block-sdk]: https://github.com/skip-mev/block-sdk/blob/v2.1.3/README.md +[cosmovisor]: https://docs.cosmos.network/v0.50/build/tooling/cosmovisor +[selectChannelToGossipOn]: https://github.com/cometbft/cometbft/blob/6d3ff343c2d5a06e7522344d1a4e17d24ce982ad/p2p/conn/connection.go#L542-L563 +[wrr]: https://en.wikipedia.org/wiki/Weighted_round_robin +[based-preconfs]: https://ethresear.ch/t/based-preconfirmations/17353 +[Oconfs]: https://www.reddit.com/r/btc/comments/vxr3qf/explaining_0_conf_transactions/ +[tracking-issue]: https://github.com/cometbft/cometbft/issues/2803 From f80606a91cfbc6aa6b7a9cfbef6738e403d1bc30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Wed, 2 Oct 2024 08:50:21 +0200 Subject: [PATCH 06/14] fix(test): Initialise all mempools with lane info from app (#4229) There are reports of some test failing because of non-existing lanes (eg. https://github.com/cometbft/cometbft/actions/runs/11121278617/job/30900042983) This PR always initialises mempools in tests with lane information fetched from the app. --- internal/consensus/byzantine_test.go | 3 ++- internal/consensus/common_test.go | 16 +++++++--------- internal/consensus/mempool_test.go | 12 +++++++----- internal/consensus/pbts_test.go | 1 + internal/consensus/reactor_test.go | 3 ++- internal/consensus/replay_test.go | 4 ++-- internal/consensus/state_test.go | 5 +++++ node/node_test.go | 18 ++++++++++++++---- test/fuzz/mempool/checktx.go | 13 ++++++++++++- test/fuzz/tests/mempool_test.go | 12 +++++++++++- 10 files changed, 63 insertions(+), 24 deletions(-) diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index c83385e84e..89fec5dc76 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -71,9 +71,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics()) // Make Mempool + _, lanesInfo := fetchAppInfo(app) mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, - nil, + lanesInfo, state.LastBlockHeight, mempl.WithPreCheck(sm.TxPreCheck(state)), mempl.WithPostCheck(sm.TxPostCheck(state))) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index d764cb6ac5..ac074d2eeb 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -432,12 +432,9 @@ func subscribeToVoterBuffered(cs *State, addr []byte) <-chan cmtpubsub.Message { // ------------------------------------------------------------------------------- // application -func fetchAppInfo(t *testing.T, app abci.Application) (*abci.InfoResponse, *mempl.LanesInfo) { - t.Helper() - resp, err := app.Info(context.Background(), proxy.InfoRequest) - require.NoError(t, err) - lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) - require.NoError(t, err) +func fetchAppInfo(app abci.Application) (*abci.InfoResponse, *mempl.LanesInfo) { + resp, _ := app.Info(context.Background(), proxy.InfoRequest) + lanesInfo, _ := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) return resp, lanesInfo } @@ -446,7 +443,8 @@ func fetchAppInfo(t *testing.T, app abci.Application) (*abci.InfoResponse, *memp func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { config := test.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app, nil) + _, lanesInfo := fetchAppInfo(app) + return newStateWithConfig(config, state, pv, app, lanesInfo) } func newStateWithConfig( @@ -852,7 +850,7 @@ func randConsensusNet(t *testing.T, nValidators int, testName string, tickerFunc } ensureDir(filepath.Dir(thisConfig.Consensus.WalFile())) // dir for wal app := appFunc() - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) vals := types.TM2PB.ValidatorUpdates(state.Validators) _, err := app.InitChain(context.Background(), &abci.InitChainRequest{Validators: vals}) require.NoError(t, err) @@ -911,7 +909,7 @@ func randConsensusNetWithPeers( } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.Application); ok { // simulate handshake, receive app version. If don't do this, replay test will fail diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 0e5648f322..458eebbe2d 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -29,7 +29,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, lanesInfo := fetchAppInfo(t, app) + resp, lanesInfo := fetchAppInfo(app) state.AppHash = resp.LastBlockAppHash cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() @@ -52,7 +52,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - resp, lanesInfo := fetchAppInfo(t, app) + resp, lanesInfo := fetchAppInfo(app) + require.NotNil(t, resp) + require.NotNil(t, lanesInfo) state.AppHash = resp.LastBlockAppHash cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) @@ -72,7 +74,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { config.Consensus.CreateEmptyBlocks = false state, privVals := randGenesisState(1, nil) app := kvstore.NewInMemoryApplication() - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfig(config, state, privVals[0], app, lanesInfo) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round @@ -121,7 +123,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) app := kvstore.NewInMemoryApplication() - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) @@ -148,7 +150,7 @@ func TestMempoolRmBadTx(t *testing.T) { app := kvstore.NewInMemoryApplication() blockDB := dbm.NewMemDB() stateStore := sm.NewStore(blockDB, sm.StoreOptions{DiscardABCIResponses: false}) - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB, lanesInfo) err := stateStore.Save(state) require.NoError(t, err) diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go index 3fb198c873..b31caccf1f 100644 --- a/internal/consensus/pbts_test.go +++ b/internal/consensus/pbts_test.go @@ -581,6 +581,7 @@ func TestPBTSEnableHeight(t *testing.T) { Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT, }, nil) app.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + app.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs, vss := randStateWithAppImpl(numValidators, app, c) height, round, chainID := cs.Height, cs.Round, cs.state.ChainID diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 82c041e790..eb31d633b0 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -163,9 +163,10 @@ func TestReactorWithEvidence(t *testing.T) { proxyAppConnMem := proxy.NewAppConnMempool(abcicli.NewLocalClient(mtx, app), proxy.NopMetrics()) // Make Mempool + _, lanesInfo := fetchAppInfo(app) mempool := mempl.NewCListMempool(config.Mempool, proxyAppConnMem, - nil, + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 2a7dab26df..e0859b4a9a 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -77,7 +77,7 @@ func startNewStateAndWaitForBlock( privValidator, err := loadPrivValidator(consensusReplayConfig) require.NoError(t, err) app := kvstore.NewInMemoryApplication() - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, @@ -187,7 +187,7 @@ LOOP: privValidator, err := loadPrivValidator(consensusReplayConfig) require.NoError(t, err) app := kvstore.NewInMemoryApplication() - _, lanesInfo := fetchAppInfo(t, app) + _, lanesInfo := fetchAppInfo(app) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, state, diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 19d9881804..3ef05aa21d 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -2083,6 +2083,7 @@ func TestProcessProposalAccept(t *testing.T) { } m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ProcessProposalResponse{Status: status}, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.PrepareProposalResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, _ := randStateWithApp(4, m) height, round := cs1.Height, cs1.Round @@ -2137,6 +2138,7 @@ func TestExtendVoteCalledWhenEnabled(t *testing.T) { } m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() height := int64(1) if !testCase.enabled { height = 0 @@ -2221,6 +2223,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { }, nil) m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil).Maybe() m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID cs1.state.ConsensusParams.Feature.VoteExtensionsEnableHeight = cs1.Height @@ -2306,6 +2309,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.VerifyVoteExtensionResponse{Status: abci.VERIFY_VOTE_EXTENSION_STATUS_ACCEPT}, nil) m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.FinalizeBlockResponse{}, nil) + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID @@ -2408,6 +2412,7 @@ func TestFinalizeBlockCalled(t *testing.T) { r := &abci.FinalizeBlockResponse{AppHash: []byte("the_hash")} m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(r, nil).Maybe() m.On("Commit", mock.Anything, mock.Anything).Return(&abci.CommitResponse{}, nil).Maybe() + m.On("Info", mock.Anything, mock.Anything).Return(&abci.InfoResponse{}, nil).Maybe() cs1, vss := randStateWithApp(4, m) height, round, chainID := cs1.Height, cs1.Round, cs1.state.ChainID diff --git a/node/node_test.go b/node/node_test.go index 3fc9201266..dd041353e5 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -288,7 +288,8 @@ func TestCreateProposalBlock(t *testing.T) { config := test.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) @@ -311,10 +312,14 @@ func TestCreateProposalBlock(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool + resp, err := app.Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + require.NoError(t, err) memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), - nil, + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), @@ -393,7 +398,8 @@ func TestMaxProposalBlockSize(t *testing.T) { config := test.ResetTestRoot("node_create_proposal") defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewInMemoryApplication()) + app := kvstore.NewInMemoryApplication() + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.NoError(t, err) @@ -412,10 +418,14 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) // Make Mempool + resp, err := app.Info(context.Background(), proxy.InfoRequest) + require.NoError(t, err) + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + require.NoError(t, err) memplMetrics := mempl.NopMetrics() mempool := mempl.NewCListMempool(config.Mempool, proxyApp.Mempool(), - nil, + lanesInfo, state.LastBlockHeight, mempl.WithMetrics(memplMetrics), mempl.WithPreCheck(sm.TxPreCheck(state)), diff --git a/test/fuzz/mempool/checktx.go b/test/fuzz/mempool/checktx.go index e508b10ac0..707ce68b10 100644 --- a/test/fuzz/mempool/checktx.go +++ b/test/fuzz/mempool/checktx.go @@ -1,6 +1,8 @@ package reactor import ( + "context" + "github.com/cometbft/cometbft/abci/example/kvstore" "github.com/cometbft/cometbft/config" mempl "github.com/cometbft/cometbft/mempool" @@ -20,7 +22,16 @@ func init() { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mempool = mempl.NewCListMempool(cfg, appConnMem, nil, 0) + + resp, err := app.Info(context.Background(), proxy.InfoRequest) + if err != nil { + panic(err) + } + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + if err != nil { + panic(err) + } + mempool = mempl.NewCListMempool(cfg, appConnMem, lanesInfo, 0) } func Fuzz(data []byte) int { diff --git a/test/fuzz/tests/mempool_test.go b/test/fuzz/tests/mempool_test.go index 2952a3c2cf..613ed4c903 100644 --- a/test/fuzz/tests/mempool_test.go +++ b/test/fuzz/tests/mempool_test.go @@ -3,6 +3,7 @@ package tests import ( + "context" "testing" abciclient "github.com/cometbft/cometbft/abci/client" @@ -10,6 +11,7 @@ import ( "github.com/cometbft/cometbft/config" cmtsync "github.com/cometbft/cometbft/libs/sync" mempl "github.com/cometbft/cometbft/mempool" + "github.com/cometbft/cometbft/proxy" ) func FuzzMempool(f *testing.F) { @@ -24,7 +26,15 @@ func FuzzMempool(f *testing.F) { cfg := config.DefaultMempoolConfig() cfg.Broadcast = false - mp := mempl.NewCListMempool(cfg, conn, nil, 0) + resp, err := app.Info(context.Background(), proxy.InfoRequest) + if err != nil { + panic(err) + } + lanesInfo, err := mempl.BuildLanesInfo(resp.LanePriorities, resp.DefaultLane) + if err != nil { + panic(err) + } + mp := mempl.NewCListMempool(cfg, conn, lanesInfo, 0) f.Fuzz(func(_ *testing.T, data []byte) { _, _ = mp.CheckTx(data, "") From d3d4fd65a27d14f195417c781b24bfee822fcc13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Thu, 3 Oct 2024 12:16:21 +0200 Subject: [PATCH 07/14] test(mempool): Add benchmarks for blocking iterator (#4181) As per the title. --- - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- mempool/bench_test.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mempool/bench_test.go b/mempool/bench_test.go index db14228dbe..e63f7bb90f 100644 --- a/mempool/bench_test.go +++ b/mempool/bench_test.go @@ -2,6 +2,7 @@ package mempool import ( "context" + "strconv" "sync" "sync/atomic" "testing" @@ -153,9 +154,9 @@ func BenchmarkUpdateRemoteClient(b *testing.B) { } } -// Benchmarks the time it takes an iterator to access all transactions in the -// mempool. -func BenchmarkIterator(b *testing.B) { +// Benchmarks the time it takes a blocking iterator to access all transactions +// in the mempool. +func BenchmarkBlockingIterator(b *testing.B) { app := kvstore.NewInMemoryApplication() cc := proxy.NewLocalClientCreator(app) mp, cleanup := newMempoolWithApp(cc) @@ -169,7 +170,7 @@ func BenchmarkIterator(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - iter := NewBlockingIterator(context.TODO(), mp, "bench") + iter := NewBlockingIterator(context.TODO(), mp, b.Name()) b.StartTimer() // Iterate until all txs in the mempool are accessed. @@ -181,9 +182,9 @@ func BenchmarkIterator(b *testing.B) { } } -// Benchmarks the time it takes multiple concurrent iterators to access all -// transactions in the mempool. -func BenchmarkConcurrentkIterators(b *testing.B) { +// Benchmarks the time it takes multiple concurrent blocking iterators to access +// all transactions in the mempool. +func BenchmarkConcurrentkBlockingIterators(b *testing.B) { app := kvstore.NewInMemoryApplication() cc := proxy.NewLocalClientCreator(app) mp, cleanup := newMempoolWithApp(cc) @@ -203,7 +204,7 @@ func BenchmarkConcurrentkIterators(b *testing.B) { // Create concurrent iterators. iters := make([]Iterator, numIterators) for j := 0; j < numIterators; j++ { - iters[j] = NewBlockingIterator(context.TODO(), mp, "bench") + iters[j] = NewBlockingIterator(context.TODO(), mp, strconv.Itoa(j)) } wg := sync.WaitGroup{} wg.Add(numIterators) From 160761dda674b2c80d130d41b9262ad7b02ca55c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Fri, 4 Oct 2024 12:08:40 +0200 Subject: [PATCH 08/14] docs(mempool/lanes): Create guide for app developers (#4185) Closes #4138 Formatted file: [docs/guides/app-dev/mempool-lanes.md](https://github.com/cometbft/cometbft/blob/hvanz/docs-guides-lanes-4138/docs/guides/app-dev/mempool-lanes.md) --------- Co-authored-by: Jasmina Malicevic Co-authored-by: Anton Kaliaev Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> Co-authored-by: Sergio Mena --- docs/guides/app-dev/README.md | 1 + docs/guides/app-dev/mempool-lanes.md | 163 +++++++++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 docs/guides/app-dev/mempool-lanes.md diff --git a/docs/guides/app-dev/README.md b/docs/guides/app-dev/README.md index abde0fc08b..d977e2fb35 100644 --- a/docs/guides/app-dev/README.md +++ b/docs/guides/app-dev/README.md @@ -10,3 +10,4 @@ parent: - [Getting Started](getting-started.md) - [Indexing transactions](indexing-transactions.md) - [Application Architecture Guide](app-architecture.md) +- [Mempool Lanes](mempool-lanes.md) diff --git a/docs/guides/app-dev/mempool-lanes.md b/docs/guides/app-dev/mempool-lanes.md new file mode 100644 index 0000000000..96838d0354 --- /dev/null +++ b/docs/guides/app-dev/mempool-lanes.md @@ -0,0 +1,163 @@ +# Mempool Lanes + +The Lanes feature allows applications to classify and prioritise transactions for providing Quality +of Service guarantees to the mempool ([ADR-118](adr)). + +This guide provides instructions, along with a set of best practices and rules of thumb to help +setting up and using Lanes within your application. + +## How to set up lanes in an application + +Application developers that want to make use of Lanes must pre-define a list of lane names and their +priorities and then populate specific fields in `Info` and `CheckTx` responses. In contrast, if +Lanes are not to be used by the application, no modifications to the code are necessary. + +We will explain with an example taken from our implementation of Lanes in the `kvstore` application +(in `abci/example/kvstore/kvstore.go`). + +### Define lanes and their priorities + +First, the application should keep a list of lane IDs (of type `string`) and their priorities (of +type `uint32`). In this example we store it as a map in the `Application` struct. And we define as a +constant the lane ID used as default when assigning lanes to transactions. + +```go +const defaultLane = "C" + +type Application struct { + ... + lanePriorities map[string]uint32 +} + +func NewApplication(...) *Application { + ... + return &Application{ + ... + lanePriorities: map[string]uint32{ + "A": 100, + "B": 50, + defaultLane: 10, + "D": 1, + }, + } +} +``` + +### Handling Info requests + +When a CometBFT node starts, it performs a handshake with the application by sending an `Info` +request. This process allows CometBFT to retrieve essential data from the application to initialize +and configure itself. + +Upon receiving an `Info` request, the application must reply with the lane IDs and +their priorities in the `LanePriorities` field, and the default lane in the `DefaultLane` field. The +default lane ID must be a key in the map `LanePriorities`. + +```go +func (app *Application) Info(ctx context.Context, req *types.InfoRequest) (*types.InfoResponse, error) { + ... + return &types.InfoResponse{ + ... + LanePriorities: app.lanePriorities, + DefaultLane: defaultLane, + }, nil +} +``` + +If the application does not populate these fields, CometBFT will use a single, default lane. + +### Handling CheckTx requests + +Upon receiving a `CheckTx` request for validating a transaction, the application must reply with the +lane that it assigns to the transaction in the `LaneId` field. The mempool will only use the lane if +(1) the transaction is valid and (2) the transaction is being validated for the first time at the +local node, that is, when `req.Type` equals `types.CHECK_TX_TYPE_CHECK` (not when rechecking). +Otherwise, the mempool will ignore the `LaneId` field. + +```go +func (app *Application) CheckTx(ctx context.Context, req *types.CheckTxRequest) (*types.CheckTxResponse, error) { + ... + laneID := assignLane(req.Tx) + return &types.CheckTxResponse{Code: CodeTypeOK, GasWanted: 1, LaneId: laneID}, nil +} +``` +In this example, `assignLane` is a deterministic function that, given the content of a transaction, +returns a valid lane ID. The lane ID must be one of the keys in the `app.lanePriorities` map, and it +may be the default lane if no other lane is chosen to be assigned. + +## Best practices + +### Transaction classification and ordering + +- **Independent transactions**: Transactions can only be classified into different lanes if they are + independent of each other. If there is a relationship or dependency between transactions (e.g + transaction A must be executed before transaction B), both must be placed in the same lane. + Failing to do so may result in an incorrect ordering, where B could be processed and executed + before A. +- **Ordering across lanes**: Transactions in separate lanes are not guaranteed to maintain the order + in which they are processed and disseminated to other nodes. Developers should be aware that + classification in lanes can result in transactions being committed to different blocks and executed + in different order. +- **Immutable lane assignment**: Once a transaction is assigned to a lane upon entering the mempool, + its lane cannot be changed, even during rechecking. +- **Execution timing**: The time gap between the execution of two transactions is unpredictable, + especially if they are in lanes with significantly different priority levels. + +### Number of lanes + +- **One lane minimum**: Setting up one lane replicates the behavior of the mempool before lanes were + introduced. The same behaviour is obtained when the application does not set up lanes: the mempool + will assign all transactions to the single, default lane. The latter is transparent to users. +- **Start small**: We recommend starting with fewer than 5 or 10 lanes and test them thoroughly on a + testnet. You can gradually introduce more lanes as necessary once performance and behavior are + validated. +- **Constraints**: Lanes are identified by strings. In theory, there is no limit to the number of + lanes that can be defined. However, keep in mind that both memory and CPU usage will increase in + proportion to the number of lanes. + +### Lane priorities + +- **Priority values**: Lane priorities are values of type `uint32`. Valid priorities range from 1 to + `math.MaxUint32`. Priority 0 is reserved for cases where there are no lanes to assign, such as + invalid transactions or applications that do not utilize lanes. However, if the application + returns an empty `lane_id` on `CheckTx`, the mempool will assign the default lane as specified in + `InfoResponse`. +- **Fair scheduling**: Lanes implement a scheduling algorithm for picking transactions + for dissemination to peers and for creating blocks. The algorithm is designed to be + _starvation-free_, ensuring that even transactions from lower-priority lanes will eventually be + disseminated and included in blocks. It is also _fair_, because it picks transactions across all + lanes by interleaving them when possible. +- **Equal priorities**: Multiple lanes are allowed to have the same priority. This could help + prevent one class of transaction monopolizing the entire mempool. When lanes share the same + priority, the order in which they are processed is undefined. However, transactions within the + same lane are locally treated in FIFO order as usual. + +### Lane capacity + +- **Capacity distribution**: The mempool's capacity is divided evenly among the lanes, with each + lane's capacity being constrained by both the number of transactions and the total transaction + size in bytes. Once either limit is reached, no further transactions will be accepted into that + lane. +- **Preventing spam**: Lane capacity helps mitigate the risk of large transactions flooding the + network. For optimal performance, large transactions should be assigned to lower-priority lanes + whenever possible. +- **Adjusting capacities**: If you find that the capacity of a lane is insufficient, you still have + the option of increasing the total mempool size, which will proportionally increase the capacity + of all lanes. In future releases, we may introduce more granular control over lane capacities if + needed. + +### Network setup + +- **Limited resources**: Lanes are especially useful in networks with constrained resources, such as + block size, mempool capacity, or network throughput. In such environments, lanes ensure + higher-priority transactions will be prioritized during dissemination and block inclusion. In + networks without these limitations, lanes will not significantly affect the behavior compared to + nodes that do not implement lanes. +- **Consistent setup**: To fully benefit from lanes, all nodes in the network should implement the + same lanes configuration and transaction classification logic. If some nodes do not support lanes, + the benefits of lane prioritization will not be observed, because transaction ordering during + dissemination and processing will be inconsistent across nodes. While mixing nodes with and + without lanes does not affect network correctness, consistent lane configuration is strongly + recommended for improved performance and consistent behavior. + +[adr]: ../../../docs/references/architecture/adr-118-mempool-lanes.md \ No newline at end of file From 196cfb6b96a90aec13ac9466279efa8a04956cbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Fri, 15 Nov 2024 16:50:37 +0100 Subject: [PATCH 09/14] spec(mempool/dog): Add spec of Flood gossip protocol (#4476) Contributes to #4318 The DOG spec is based on Flood's spec (#4477) [README.md](https://github.com/cometbft/cometbft/tree/hvanz/flood-spec-4318/spec/mempool/gossip/) --------- Co-authored-by: Andy Nogueira Co-authored-by: Daniel Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- spec/mempool/gossip/Makefile | 9 + spec/mempool/gossip/README.md | 46 +++++ spec/mempool/gossip/flood.md | 280 ++++++++++++++++++++++++++ spec/mempool/gossip/mempool.md | 237 ++++++++++++++++++++++ spec/mempool/gossip/p2p.md | 208 +++++++++++++++++++ spec/mempool/gossip/quint/flood.qnt | 109 ++++++++++ spec/mempool/gossip/quint/mempool.qnt | 74 +++++++ spec/mempool/gossip/quint/p2p.qnt | 94 +++++++++ spec/mempool/gossip/quint/spells.qnt | 135 +++++++++++++ 9 files changed, 1192 insertions(+) create mode 100644 spec/mempool/gossip/Makefile create mode 100644 spec/mempool/gossip/README.md create mode 100644 spec/mempool/gossip/flood.md create mode 100644 spec/mempool/gossip/mempool.md create mode 100644 spec/mempool/gossip/p2p.md create mode 100644 spec/mempool/gossip/quint/flood.qnt create mode 100644 spec/mempool/gossip/quint/mempool.qnt create mode 100644 spec/mempool/gossip/quint/p2p.qnt create mode 100644 spec/mempool/gossip/quint/spells.qnt diff --git a/spec/mempool/gossip/Makefile b/spec/mempool/gossip/Makefile new file mode 100644 index 0000000000..de0d17ac58 --- /dev/null +++ b/spec/mempool/gossip/Makefile @@ -0,0 +1,9 @@ +mds = p2p.md mempool.md flood.md +quints = $(mds:.md=.qnt) + +%.qnt: %.md + lmt $< + +all: $(quints) +.PHONY: all + \ No newline at end of file diff --git a/spec/mempool/gossip/README.md b/spec/mempool/gossip/README.md new file mode 100644 index 0000000000..0afe74fdf3 --- /dev/null +++ b/spec/mempool/gossip/README.md @@ -0,0 +1,46 @@ +# Mempool Gossip + +This directory contains specifications of gossip protocols used by the mempool to disseminate +transactions in the network. + +## Protocols + +- [Flood](flood.md). Currently implemented by CometBFT, Flood is a straightforward gossip protocol + with a focus on rapid transaction propagation. + - Pros: + + Latency: nodes forward transactions to their peers as soon as they receive them, resulting in + the minimum possible latency of decentralised P2P networks. + + Byzantine Fault Tolerance (BFT): flooding the network with messages ensures malicious actors + cannot easily prevent transaction dissemination (i.e., censoring), making it resilient to network disruptions + and attacks. + - Cons: + - Bandwidth: the broadcast nature of Flood results in significant redundancy in message + propagation, leading to exponential increases in bandwidth usage. + +## Specifications with Quint snippets + +These specifications are written in English with code snippets in the [Quint][quint] language, +following the [literature programming paradigm][lit]. The intention is that Quint snippets can be +read as pseudo-code. Moreover, we can automatically generate Quint files from the markdown files. + +Quint allows specs to be executed, tested, and formally verified. For the moment we use it here just +to give structure to the spec documentation and to type-check the definitions. + +To (re-)generate the Quint files: +1. install the [lmt tool][lmt] (see the prerequisites [here][lit]), and +2. run `make`. + +The Flood gossip protocol is self-described in its own [flood](flood.md) spec. It is built on top of +two other specs, which are not strictly needed to understand the protocol: +- [mempool](mempool.md) with definitions of common data structures from the mempool, and +- [p2p](p2p.md) with networking definitions, assumptions, and boilerplate. + +Specs dependencies: +```mermaid +flowchart TB + flood --> mempool --> p2p; +``` + +[quint]: https://quint-lang.org/ +[lit]: https://quint-lang.org/docs/literate +[lmt]: https://github.com/driusan/lmt diff --git a/spec/mempool/gossip/flood.md b/spec/mempool/gossip/flood.md new file mode 100644 index 0000000000..dc8903149b --- /dev/null +++ b/spec/mempool/gossip/flood.md @@ -0,0 +1,280 @@ +# Flood gossip protocol + +Flood is a basic _push_ gossip protocol: every time a node receives a transaction, it forwards (or +"pushes") the transaction to all its peers, except to the peer(s) from which it received the +transaction. + +This protocol is built on top of the [mempool](mempool.md) and [p2p](p2p.md) modules. + +**Table of contents** + - [Messages](#messages) + - [State](#state) + - [Initial state](#initial-state) + - [State transitions (actions)](#state-transitions-actions) + - [Adding transactions to the mempool](#adding-transactions-to-the-mempool) + - [Adding first-time transactions](#adding-first-time-transactions) + - [Handling duplicate transactions](#handling-duplicate-transactions) + - [Handling incoming messages](#handling-incoming-messages) + - [Transaction dissemination](#transaction-dissemination) + - [Properties](#properties) + +> This document was written using the literature programming paradigm. Code snippets are written in +> [Quint][quint] and can get "tangled" into a Quint file. + +## Messages + +Nodes communicates only one type of message carrying a full transaction. +```bluespec "messages" += +type Message = + | TxMsg(TX) +``` + +## State + +Flood's state consists of the underlying [mempool](mempool.md) state (variable `mempool`) and +[P2P](p2p.md) state (variables `incomingMsgs` and `peers`). + +Additionally, for each transaction in each node's mempool, we keep track of the peer IDs from whom +the node received the transaction. +```bluespec "state" += +var senders: NodeID -> TxID -> List[NodeID] +``` +We define the senders as a list instead of a set because the DOG protocol needs to know who is the +first sender of a transaction. + +Note that a transaction won't have a sender when it is in the cache but not in the mempool. Senders +are only needed for disseminating (valid) transactions that are in the mempool. + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def Senders(node) = senders.get(node) +``` + +The set of senders of transaction `tx`: +```bluespec "auxstate" += +def sendersOf(node, tx) = + node.Senders().mapGetDefault(hash(tx), List()).listToSet() +``` + +Function `addSender` adds a sender to `tx`'s list of senders (`_txSenders`), if `optionalSender` has +a value that's not already in the list. +```bluespec "auxstate" += +pure def addSender(_txSenders, tx, optionalSender) = + match optionalSender { + | Some(sender) => _txSenders.update(hash(tx), ss => + if (ss.includes(sender)) ss else ss.append(sender)) + | None => _txSenders + } +``` +
+ +## Initial state + +Flood's initial state is the underlying mempool's initial state (`MP_init`) and an empty mapping of +transactions to senders. +```bluespec "actions" += +action init = all { + MP_init, + senders' = NodeIDs.mapBy(n => Map()), +} +``` + +## State transitions (actions) + +These are the state transitions of the system. Note that generic actions are imported from the +[mempool](mempool.md) and [p2p](p2p.md) specs. The missing implementation details (`tryAddTx`, +`handleMessage`, `mkTargetNodes`) are described in the rest of the section. + +1. User-submitted transactions: when a node receives a transaction from a user, it tries to add it + to the mempool. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + ``` + +2. Peer message handling: a node processes messages received from a peer. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + ``` + +3. Transaction dissemination: a node sends a transaction in its mempool to a subset of target nodes. + ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + }, + ``` + +4. A node joins the network. + ```bluespec "steps" += + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + }, + ``` + +5. A node disconnects from the network. + ```bluespec "steps" += + all { + pickNodeAndDisconnect, + mempool' = mempool, + senders' = senders, + } + ``` + +### Adding transactions to the mempool + +A node attempting to add a transaction to its mempool processes the transaction according to whether +it has seen it before, that is, if the transaction exists in the mempool cache. +- A *first-time* transaction is one that the node does not have in its cache. +- A *duplicate* transaction is one that the node has received multiple times, and thus it's cached. + +```bluespec "actions" += +action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) +``` +In this action the sender is optional. When there's a sender, it means that the transaction comes +from a peer; otherwise it comes directly from a user. + +#### Adding first-time transactions + +`tryAddFirstTimeTx` attempts to add a first-time transaction `tx` to a +`node`'s mempool: +1. it caches `tx`, +2. if `tx` is valid, it appends `tx` to `txs`, and +3. updates its senders. +```bluespec "actions" += +action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = all { + mempool' = mempool.update(node, st => { + cache: st.cache.join(hash(tx)), + txs: if (valid(tx)) st.txs.append(tx) else st.txs, + ...st }), + senders' = senders.update(node, ss => + if (valid(tx)) ss.addSender(tx, optionalSender) else ss), + incomingMsgs' = _incomingMsgs, + peers' = peers, +} +``` + +#### Handling duplicate transactions + +Action `processDuplicateTx` processes a duplicate transaction `tx` by updating the list of senders, +only if `tx` is already in the mempool (`txs`). +```bluespec "actions" += +action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = all { + senders' = senders.update(node, ss => + if (node.Txs().includes(tx)) ss.addSender(tx, optionalSender) else ss), + mempool' = mempool, + incomingMsgs' = _incomingMsgs, + peers' = peers, +} +``` + +### Handling incoming messages + +Upon receiving a message with transaction `tx` from a peer (i.e., the `sender`), the `node` attempts +to add `tx` to its mempool. +```bluespec "actions" += +action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + } +``` +> The argument `_incomingMsgs` is passed just to update the queues of incoming messages, when +applicable (Flood does not reply with any message but DOG does). + +### Transaction dissemination + +In Flood, a node sends a transaction to all its peers except those who previously sent it. + +`mkTargetNodes` defines the set of peers to whom `node` will send `tx`. It is passed as an argument +to the generic transaction dissemination action. +```bluespec "actions" += +def mkTargetNodes(node, tx) = + node.Peers().exclude(node.sendersOf(tx)) +``` + +## Properties + +Function `txInAllMempools` returns `true` if the given transaction `tx` is in the mempool of all +nodes. +```bluespec "properties" += +def txInAllMempools(tx) = + NodeIDs.forall(n => n.Txs().includes(tx)) +``` + +_**Property**_ If a transaction is in the mempool of any node, then eventually the transaction will +reach the mempool of all nodes (maybe more than once, and assuming transactions are not removed from +mempools). +```bluespec "properties" += +temporal txInPoolGetsDisseminated = + AllTxs.forall(tx => + NodeIDs.exists(node => + node.Txs().includes(tx) implies eventually(txInAllMempools(tx)))) +``` + +_**Invariant**_ If node A sent a transaction `tx` to node B (A is in the list of `tx`'s senders), +then B does not send `tx` to A (the message won't be in A's incoming messages). +```bluespec "properties" += +val dontSendBackToSender = + NodeIDs.forall(nodeA => + NodeIDs.forall(nodeB => + AllTxs.forall(tx => + nodeB.sendersOf(tx).contains(nodeA) + implies + not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) + ))) +``` + + + +[quint]: https://quint-lang.org/ diff --git a/spec/mempool/gossip/mempool.md b/spec/mempool/gossip/mempool.md new file mode 100644 index 0000000000..9184e3f8f3 --- /dev/null +++ b/spec/mempool/gossip/mempool.md @@ -0,0 +1,237 @@ +# Mempool + +This specification of a mempool defines essential types and data structures needed to keep a list of +pending transactions ("the mempool"), as well as generic actions to disseminate transactions. Those +generic actions are then instantiated with specific functions that define the behaviour of the +gossip protocols. + +The mempool is built on top of a [P2P layer](p2p.md), which declares many definitions found here. + +## Types + +### Transactions + +A transaction is uniquely identified by a string, which represents its content (typically +implemented as an array of bytes). +```bluespec "types" += +type TX = str +``` + +Transactions are validated by an external entity. The validation function must be deterministic. In +the actual implementation, the mempool makes a CheckTx ABCI call to the application, which validates +the transaction. +```bluespec "types" += +pure def valid(tx) = true +``` + +In this simplified specification we model all transactions as valid. To model invalid transactions, +`valid` should be declared as a model parameter (a `const`) and instantiated with a deterministic +function of type `(TX) => bool`. + +### Transaction IDs + +A transaction identifier, computed as the hash of the transaction (typically a short array of +bytes). +```bluespec "types" += +type TxID = str +pure def hash(tx: TX): TxID = tx +``` + +## Parameters + +The set of all possible transactions. +```bluespec "params" += +const AllTxs: Set[TX] +``` + +## State + +Each node has a mempool state. +```bluespec "state" += +var mempool: NodeID -> MempoolState +``` + +We define `MempoolState` as a data structure with the following fields. + +#### Cache of already received transaction IDs + +We assume the cache never overflows, i.e., it can grow indefinitely. +```bluespec "mempoolstate" += +cache: Set[TxID], +``` + +#### List of uncommitted or pending transactions ("the mempool") + +This list is used for storaging transactions and for picking transactions to disseminate to peers. +```bluespec "mempoolstate" += +txs: List[TX], +``` + +We make the following assumptions about the mempool: +- It does not have a maximum capacity. +- New entries are only appended. We do not model when entries are removed. + +A transaction that is in the `txs` list, must also be in `cache` (assuming an infinite cache), but +not necessarily the inverse. The reason a transaction is in `cache` but not in `txs` is either +because: +- the transaction was initially invalid and never got into `txs`, +- the transaction became invalid after it got in `txs` and thus got evicted when it was revalidated, + or +- the transaction was committed to a block and got removed from `txs`. + +All these scenarios are not modeled here. Then `cache` and `txs` will always have the same content +and one of the two is actually redundant in this spec. + +#### Index to the next transaction to disseminate + +A mempool iterator traverses the entries in `txs` one at a time. +```bluespec "mempoolstate" += +txsIndex: int, +``` +We model transaction dissemination using one dissemination process (`disseminateNextTx`) that +iterates on the list of transactions reading one entry per step, and atomically multicasts one +transaction message to all connected peers. + +In the implementation there is one dissemination process per peer, each with its own iterator (and +thus a separate index per iterator) with a `next()` method to retrieve the next entry in the `txs` +list. If it reaches the end of the list, it blocks until a new entry is added. All iterators read +concurrently from `txs`. + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def Cache(node) = mempool.get(node).cache +def Txs(node) = mempool.get(node).txs +def TxsIndex(node) = mempool.get(node).txsIndex +``` +
+ +## Initial state + +The initial state of a mempool: +```bluespec "actions" += +action MP_init = all { + P2P_init, + mempool' = NodeIDs.mapBy(n => initialMempoolState), +} +``` +where: +```bluespec "actions" += +val initialMempoolState = { + cache: Set(), + txs: List(), + txsIndex: 0, +} +``` + +## State transitions (actions) + +### Handling incoming transactions + +Users create transactions and send them to one of the nodes in the network. Nodes receive +transactions either directly from users or in messages from peers. Transaction from users have no +sender. + +Action `receiveTxFromUser` models a `node` receiving transaction `tx` from a user. +```bluespec "actions" += +action receiveTxFromUser(node, tx, _tryAddTx) = + node._tryAddTx(incomingMsgs, None, tx) +``` +The function parameter `_tryAddTx(incomingMsgs, optionalSender, tx)` defines how transactions are +added to the mempool. + +Typically, users send (full) transactions to the node via an RPC endpoint. Users are allowed to +submit the same transaction more than once and to multiple nodes. + +This action is enabled only if the transaction is not in the mempool. In the actual mempool +implementation we have the cache that prevents this scenario. + +### Transaction dissemination + +Action `disseminateNextTx` models a `node` traversing the `txs` list while sending transactions to +its peers. It takes the transaction pointed by `txsIndex` and atomically sends it to a set of target +peers. + +The following function parameters define to who `node` will send transactions: +- `_mkTargetNodes(node, tx)` returns the set of peers to which `node` + will send `tx`. +- `_mkTxMsg(tx)` is a wrapper function that returns the specific message + type used by the gossip protocol. +```bluespec "actions" += +action disseminateNextTx(node, _mkTargetNodes, _mkTxMsg) = all { + // Check that the current index is within bounds. + require(node.TxsIndex() < node.Txs().length()), + // Get from the mempool the next transaction to disseminate. + val tx = node.Txs()[node.TxsIndex()] + all { + // Wrap transaction in a message and send it to the target nodes. + incomingMsgs' = + node.multiSend(incomingMsgs, _mkTargetNodes(node, tx), _mkTxMsg(tx)), + // Increase index. + mempool' = mempool.update(node, st => { txsIndex: st.txsIndex + 1, ...st }), + peers' = peers, + } +} +``` + +The index must not exceed the `txs`'s length. This pre-condition models when the iterator is at the +end of the list and it's blocked waiting for a new entry to be appended to the list. + +In the actual implementation, there is a separate goroutine for each peer, so not all transactions +are sent at the same time. + +## Properties + +_**Invariant**_ Transaction lists do not have repeated entries. +```bluespec "properties" += +val uniqueTxsInMempool = + NodeIDs.forall(node => size(node.Txs().listToSet()) == length(node.Txs())) +``` + + diff --git a/spec/mempool/gossip/p2p.md b/spec/mempool/gossip/p2p.md new file mode 100644 index 0000000000..e0d020cf1a --- /dev/null +++ b/spec/mempool/gossip/p2p.md @@ -0,0 +1,208 @@ +# P2P + +This module specifies a P2P layer as needed for the gossip protocols. It includes the definitions of +nodes, peers, network topology, sending messages, nodes joining and leaving the network. + +## Types + +Nodes are identified by a string. +```bluespec "types" +type NodeID = str +``` + +## Parameters + +The set of all possible node IDs, even those that are not initially connected to the network. +```bluespec "params" +const NodeIDs: Set[NodeID] +``` + +Initial network topology. A topology is defined by the set of peers each node has. +```bluespec "params" += +const InitialPeers: NodeID -> Set[NodeID] +``` + +## State + +To model network communication, each node has a queue (a list) of incoming messages. Node A sends a +message to a node B by appending the message to B's queue. We use queues to model that messages +arrive in order, as we assume this is guaranteed by the transport layer. Messages have a sender (a +node ID). + +The type variable `msg` can be instantiated on the message types of different protocols. + +```bluespec "state" +var incomingMsgs: NodeID -> List[(NodeID, msg)] +``` + +In the actual implementation, transaction messages are transamitted on the `Mempool` data channel of +the P2P layer. Control messages are usually transmitted on other channels with different priorities. +Here we model a single, reliable channel. + +The dynamic network topology. Each node has a set of peers that is updated when nodes join or leave +the network. + +```bluespec "state" += +var peers: NodeID -> Set[NodeID] +``` + +
+ Auxiliary definitions + +```bluespec "auxstate" += +def IncomingMsgs(node) = incomingMsgs.get(node) +def Peers(node) = peers.get(node) +``` + +Function `multiSend` sends message `msg` to a set of `targetNodes`. It updates a list of incoming +messages `_incomingMsgs`. `targetNodes` can be empty, in which case `_incomingMsgs` will stay the +same. +```bluespec "state" += +pure def multiSend(node, _incomingMsgs, targetNodes, msg) = + _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) +``` + +A node is in the network if it has peers: +```bluespec "auxstate" += +val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) +val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) +``` +
+ +## Initial state + +The initial state of the P2P layer: +```bluespec "actions" += +action P2P_init = all { + incomingMsgs' = NodeIDs.mapBy(_ => List()), + peers' = NodeIDs.mapBy(n => InitialPeers.get(n)), +} +``` + +## State transitions (actions) + +A node receives one of the incoming messages from a peer and handles it according to its type. +```bluespec "actions" += +action receiveFromPeer(node, handleMessage) = all { + require(length(node.IncomingMsgs()) > 0), + // We model receiving of a message as taking the head of the list of + // incoming messages and leaving the tail. + val someMsg = node.IncomingMsgs().head() + val sender = someMsg._1 + val msg = someMsg._2 + val _incomingMsgs = incomingMsgs.update(node, tail) + handleMessage(node, _incomingMsgs, sender, msg) +} +``` + +A node joins the network by connecting to a given set of peers. All those peers add the new node to +their list of peers. +```bluespec "actions" += +action joinNetwork(node, peerSet) = all { + // The node must not be connected to the network. + require(node.Peers().isEmpty()), + peers' = peers + // Assign to node the set of new peers. + .put(node, peerSet) + // Add node as a new peer to the set of connecting peers. + .updateMultiple(peerSet, ps => ps.join(node)), + incomingMsgs' = incomingMsgs, +} +``` + +Non-deterministically pick a node and its peers to join the network. +```bluespec "actions" += +action pickNodeAndJoin = all { + // Pick a node that is not connected to the network. + require(NodeIDs.exclude(nodesInNetwork).nonEmpty()), + nondet node = oneOf(NodeIDs.exclude(nodesInNetwork)) + // Pick a non-empty set of nodes in the network to be the node's peers. + nondet peerSet = oneOf(nodesInNetwork.powerset().exclude(Set())) + node.joinNetwork(peerSet), +} +``` + +A node gets disconnected from the network. All its peers are immediately aware that the node is no +longer one of their peers, so their state is updated accordingly. +```bluespec "actions" += +// TODO: the network must not become disconnected; we don't want to model that. +action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { + peers' = peers + // Clean node's state and remove all its peers. + .put(nodeToDisconnect, Set()) + // Remove node from other peers' state. + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), + incomingMsgs' = _incomingMsgs, +} +``` + +Non-deterministically pick a node to disconnect from the network. +```bluespec "actions" += +action pickNodeAndDisconnect = all { + // Pick a node that is not the only node in the network. + require(size(nodesInNetwork) > 1), + nondet nodeToDisconnect = oneOf(nodesInNetwork) + disconnectNetwork(nodeToDisconnect, incomingMsgs), +} +``` + +## Properties + +_**Invariant**_ Peer relationships are bidirectional or symmetrical: if node A has B as peer, then B +has A as peer. +```bluespec "properties" += +val bidirectionalNetwork = + NodeIDs.forall(nodeA => + nodeA.Peers().forall(nodeB => nodeA.in(nodeB.Peers()))) +``` + +_**Property**_ Eventually all messages are delivered (there are no incoming messages). +```bluespec "properties" += +temporal allMsgsDelivered = + eventually(NodeIDs.forall(node => length(node.IncomingMsgs()) == 0)) +``` + +```bluespec "properties" += +// TODO: Invariant: all nodes in the network are always connected. +``` + + diff --git a/spec/mempool/gossip/quint/flood.qnt b/spec/mempool/gossip/quint/flood.qnt new file mode 100644 index 0000000000..911326969e --- /dev/null +++ b/spec/mempool/gossip/quint/flood.qnt @@ -0,0 +1,109 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module flood { + import spells.* from "./spells" + import mempool.* from "./mempool" + export mempool.* + + //-------------------------------------------------------------------------- + // Messages + //-------------------------------------------------------------------------- + type Message = + | TxMsg(TX) + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var senders: NodeID -> TxID -> List[NodeID] + + // Auxiliary definitions + def Senders(node) = senders.get(node) + def sendersOf(node, tx) = + node.Senders().mapGetDefault(hash(tx), List()).listToSet() + pure def addSender(_txSenders, tx, optionalSender) = + match optionalSender { + | Some(sender) => _txSenders.update(hash(tx), ss => + if (ss.includes(sender)) ss else ss.append(sender)) + | None => _txSenders + } + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action init = all { + MP_init, + senders' = NodeIDs.mapBy(n => Map()), + } + action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) + action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = all { + mempool' = mempool.update(node, st => { + cache: st.cache.join(hash(tx)), + txs: if (valid(tx)) st.txs.append(tx) else st.txs, + ...st }), + senders' = senders.update(node, ss => + if (valid(tx)) ss.addSender(tx, optionalSender) else ss), + incomingMsgs' = _incomingMsgs, + peers' = peers, + } + action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = all { + senders' = senders.update(node, ss => + if (node.Txs().includes(tx)) ss.addSender(tx, optionalSender) else ss), + mempool' = mempool, + incomingMsgs' = _incomingMsgs, + peers' = peers, + } + action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + } + def mkTargetNodes(node, tx) = + node.Peers().exclude(node.sendersOf(tx)) + + action step = any { + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + }, + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + }, + all { + pickNodeAndDisconnect, + mempool' = mempool, + senders' = senders, + } + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + def txInAllMempools(tx) = + NodeIDs.forall(n => n.Txs().includes(tx)) + temporal txInPoolGetsDisseminated = + AllTxs.forall(tx => + NodeIDs.exists(node => + node.Txs().includes(tx) implies eventually(txInAllMempools(tx)))) + val dontSendBackToSender = + NodeIDs.forall(nodeA => + NodeIDs.forall(nodeB => + AllTxs.forall(tx => + nodeB.sendersOf(tx).contains(nodeA) + implies + not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) + ))) + +} diff --git a/spec/mempool/gossip/quint/mempool.qnt b/spec/mempool/gossip/quint/mempool.qnt new file mode 100644 index 0000000000..0fe3eabeeb --- /dev/null +++ b/spec/mempool/gossip/quint/mempool.qnt @@ -0,0 +1,74 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module mempool { + import spells.* from "./spells" + import p2p.* from "./p2p" + export p2p.* + + //-------------------------------------------------------------------------- + // Types + //-------------------------------------------------------------------------- + type TX = str + pure def valid(tx) = true + type TxID = str + pure def hash(tx: TX): TxID = tx + + //-------------------------------------------------------------------------- + // Parameters + //-------------------------------------------------------------------------- + const AllTxs: Set[TX] + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var mempool: NodeID -> MempoolState + + type MempoolState = { + cache: Set[TxID], + txs: List[TX], + txsIndex: int, + } + + // Auxiliary definitions + def Cache(node) = mempool.get(node).cache + def Txs(node) = mempool.get(node).txs + def TxsIndex(node) = mempool.get(node).txsIndex + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action MP_init = all { + P2P_init, + mempool' = NodeIDs.mapBy(n => initialMempoolState), + } + val initialMempoolState = { + cache: Set(), + txs: List(), + txsIndex: 0, + } + action receiveTxFromUser(node, tx, _tryAddTx) = + node._tryAddTx(incomingMsgs, None, tx) + action disseminateNextTx(node, _mkTargetNodes, _mkTxMsg) = all { + // Check that the current index is within bounds. + require(node.TxsIndex() < node.Txs().length()), + // Get from the mempool the next transaction to disseminate. + val tx = node.Txs()[node.TxsIndex()] + all { + // Wrap transaction in a message and send it to the target nodes. + incomingMsgs' = + node.multiSend(incomingMsgs, _mkTargetNodes(node, tx), _mkTxMsg(tx)), + // Increase index. + mempool' = mempool.update(node, st => { txsIndex: st.txsIndex + 1, ...st }), + peers' = peers, + } + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + val uniqueTxsInMempool = + NodeIDs.forall(node => size(node.Txs().listToSet()) == length(node.Txs())) + +} diff --git a/spec/mempool/gossip/quint/p2p.qnt b/spec/mempool/gossip/quint/p2p.qnt new file mode 100644 index 0000000000..5247c027f4 --- /dev/null +++ b/spec/mempool/gossip/quint/p2p.qnt @@ -0,0 +1,94 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module p2p { + import spells.* from "./spells" + + //-------------------------------------------------------------------------- + // Types + //-------------------------------------------------------------------------- + type NodeID = str + + //-------------------------------------------------------------------------- + // Parameters + //-------------------------------------------------------------------------- + const NodeIDs: Set[NodeID] + const InitialPeers: NodeID -> Set[NodeID] + + //-------------------------------------------------------------------------- + // State + //-------------------------------------------------------------------------- + var incomingMsgs: NodeID -> List[(NodeID, msg)] + var peers: NodeID -> Set[NodeID] + pure def multiSend(node, _incomingMsgs, targetNodes, msg) = + _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) + + // Auxiliary definitions + def IncomingMsgs(node) = incomingMsgs.get(node) + def Peers(node) = peers.get(node) + val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) + val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action P2P_init = all { + incomingMsgs' = NodeIDs.mapBy(_ => List()), + peers' = NodeIDs.mapBy(n => InitialPeers.get(n)), + } + action receiveFromPeer(node, handleMessage) = all { + require(length(node.IncomingMsgs()) > 0), + // We model receiving of a message as taking the head of the list of + // incoming messages and leaving the tail. + val someMsg = node.IncomingMsgs().head() + val sender = someMsg._1 + val msg = someMsg._2 + val _incomingMsgs = incomingMsgs.update(node, tail) + handleMessage(node, _incomingMsgs, sender, msg) + } + action joinNetwork(node, peerSet) = all { + // The node must not be connected to the network. + require(node.Peers().isEmpty()), + peers' = peers + // Assign to node the set of new peers. + .put(node, peerSet) + // Add node as a new peer to the set of connecting peers. + .updateMultiple(peerSet, ps => ps.join(node)), + incomingMsgs' = incomingMsgs, + } + action pickNodeAndJoin = all { + // Pick a node that is not connected to the network. + require(NodeIDs.exclude(nodesInNetwork).nonEmpty()), + nondet node = oneOf(NodeIDs.exclude(nodesInNetwork)) + // Pick a non-empty set of nodes in the network to be the node's peers. + nondet peerSet = oneOf(nodesInNetwork.powerset().exclude(Set())) + node.joinNetwork(peerSet), + } + // TODO: the network must not become disconnected; we don't want to model that. + action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { + peers' = peers + // Clean node's state and remove all its peers. + .put(nodeToDisconnect, Set()) + // Remove node from other peers' state. + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), + incomingMsgs' = _incomingMsgs, + } + action pickNodeAndDisconnect = all { + // Pick a node that is not the only node in the network. + require(size(nodesInNetwork) > 1), + nondet nodeToDisconnect = oneOf(nodesInNetwork) + disconnectNetwork(nodeToDisconnect, incomingMsgs), + } + + //-------------------------------------------------------------------------- + // Properties + //-------------------------------------------------------------------------- + val bidirectionalNetwork = + NodeIDs.forall(nodeA => + nodeA.Peers().forall(nodeB => nodeA.in(nodeB.Peers()))) + temporal allMsgsDelivered = + eventually(NodeIDs.forall(node => length(node.IncomingMsgs()) == 0)) + // TODO: Invariant: all nodes in the network are always connected. + +} diff --git a/spec/mempool/gossip/quint/spells.qnt b/spec/mempool/gossip/quint/spells.qnt new file mode 100644 index 0000000000..313654cc29 --- /dev/null +++ b/spec/mempool/gossip/quint/spells.qnt @@ -0,0 +1,135 @@ +// -*- mode: Bluespec; -*- +module spells { + + //-------------------------------------------------------------------------- + // Basic + //-------------------------------------------------------------------------- + + /// An annotation for writing preconditions. + pure def require(__cond: bool): bool = __cond + + //-------------------------------------------------------------------------- + // Arithmetic + //-------------------------------------------------------------------------- + + pure def min(x,y) = if (x < y) x else y + pure def max(x,y) = if (x > y) x else y + + //-------------------------------------------------------------------------- + // Options + //-------------------------------------------------------------------------- + + type Option[a] = + | Some(a) + | None + + pure def isSome(__opt) = + match __opt { + | Some(_) => true + | None => false + } + + pure def isNone(__opt) = not(isSome(__opt)) + + pure def optionMap(__opt: Option[a], __f: a => b): Option[b] = + match __opt { + | Some(a) => Some(__f(a)) + | None => None + } + + pure def optionFlatten(__opt: Option[Option[a]]): Option[a] = + match __opt { + | Some(o) => o + | None => None + } + + pure def optionGetDefault(__opt: Option[a], __default: a): a = + match __opt { + | Some(o) => o + | None => __default + } + + pure def optionToSet(__opt: Option[a]): Set[a] = + match __opt { + | Some(o) => Set(o) + | None => Set() + } + + //-------------------------------------------------------------------------- + // Sets + //-------------------------------------------------------------------------- + + pure def join(__set: Set[a], __elem: a): Set[a] = + __set.union(Set(__elem)) + + pure def isEmpty(__set: Set[a]): bool = + __set == Set() + + pure def nonEmpty(__set: Set[a]): bool = + __set != Set() + + pure def except(__set: Set[a], __elem: a): Set[a] = + __set.exclude(Set(__elem)) + + //-------------------------------------------------------------------------- + // Maps + //-------------------------------------------------------------------------- + + /// Update a map entry using the previous value. + /// + /// @param __map the map to update + /// @param __key the key to search for + /// @param __f a function that returns the new value for __key + /// when applied to __key's old value + /// @returns a new map equal to __map except that __key maps + /// to __f applied to __key's old value + pure def update(__map: a -> b, __key: a, __f: b => b): (a -> b) = + __map.put(__key, __f(__map.get(__key))) + + /// Update multiple entries in a map. + /// + /// @param __map the map to update + /// @param __keys the set of keys to be updated in __map + /// @param __values a function from __map values to new values + /// @returns a new map equal to __map except that for each key k in __keys + /// will map to __values(k) + pure def updateMultiple(__map: a -> b, __keys: Set[a], __values: b => b): (a -> b) = + __map.keys().union(__keys).mapBy(k => + if (k.in(__keys)) __values(__map.get(k)) else __map.get(k) + ) + + pure def mapRemoveMultiple(__map: a -> b, __keys: Set[a]): (a -> b) = + __map.keys().filter(k => not(k.in(__keys))).mapBy(k => __map.get(k)) + + pure def mapRemove(__map: a -> b, __key: a): (a -> b) = + mapRemoveMultiple(__map, Set(__key)) + + pure def mapGet(__map: a -> b, x: a): Option[b] = + if (__map.keys().contains(x)) Some(__map.get(x)) else None + + pure def mapGetDefault(__map: a -> b, __x: a, __default: b): b = + if (__map.keys().contains(__x)) __map.get(__x) else __default + + //-------------------------------------------------------------------------- + // Lists + //-------------------------------------------------------------------------- + + pure def listIsEmpty(__list: List[a]): bool = + __list == List() + + pure def listNonEmpty(__list: List[a]): bool = + __list != List() + + pure def listToSet(__list: List[a]): Set[a] = + __list.foldl(Set(), (__s, __x) => __s.join(__x)) + + pure def setToList(__set: Set[a]): List[a] = + __set.fold(List(), (__l, __x) => __l.append(__x)) + + pure def includes(__list: List[a], x: a): bool = + __list.listToSet().contains(x) + + pure def headOption(__list: List[a]): Option[a] = + if (__list.length() > 0) Some(__list.head()) else None + +} From c2aafcaae8edd5f78ab2b860f17b81f92ae5562f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Fri, 29 Nov 2024 09:03:49 +0100 Subject: [PATCH 10/14] spec(mempool/dog): Add spec of DOG gossip protocol (#4477) Solves #4318 Based on #4476 [README.md](https://github.com/cometbft/cometbft/tree/hvanz/dog-spec-4318/spec/mempool/gossip) --------- Co-authored-by: Andy Nogueira Co-authored-by: Daniel Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- spec/mempool/gossip/Makefile | 4 +- spec/mempool/gossip/README.md | 38 +- spec/mempool/gossip/dog.md | 625 ++++++++++++++++++++++++++++ spec/mempool/gossip/flood.md | 20 +- spec/mempool/gossip/p2p.md | 39 +- spec/mempool/gossip/quint/dog.qnt | 219 ++++++++++ spec/mempool/gossip/quint/flood.qnt | 18 +- spec/mempool/gossip/quint/p2p.qnt | 22 +- 8 files changed, 923 insertions(+), 62 deletions(-) create mode 100644 spec/mempool/gossip/dog.md create mode 100644 spec/mempool/gossip/quint/dog.qnt diff --git a/spec/mempool/gossip/Makefile b/spec/mempool/gossip/Makefile index de0d17ac58..03afc24c3e 100644 --- a/spec/mempool/gossip/Makefile +++ b/spec/mempool/gossip/Makefile @@ -1,4 +1,4 @@ -mds = p2p.md mempool.md flood.md +mds = p2p.md mempool.md flood.md dog.md quints = $(mds:.md=.qnt) %.qnt: %.md @@ -6,4 +6,4 @@ quints = $(mds:.md=.qnt) all: $(quints) .PHONY: all - \ No newline at end of file + diff --git a/spec/mempool/gossip/README.md b/spec/mempool/gossip/README.md index 0afe74fdf3..918f9dffc6 100644 --- a/spec/mempool/gossip/README.md +++ b/spec/mempool/gossip/README.md @@ -8,14 +8,33 @@ transactions in the network. - [Flood](flood.md). Currently implemented by CometBFT, Flood is a straightforward gossip protocol with a focus on rapid transaction propagation. - Pros: - + Latency: nodes forward transactions to their peers as soon as they receive them, resulting in - the minimum possible latency of decentralised P2P networks. + + Low Latency: nodes forward transactions to their peers as soon as they receive them, resulting + in the minimum possible latency of decentralised P2P networks. + Byzantine Fault Tolerance (BFT): flooding the network with messages ensures malicious actors - cannot easily prevent transaction dissemination (i.e., censoring), making it resilient to network disruptions - and attacks. + cannot easily prevent transaction dissemination (i.e., censoring), making it resilient to + network disruptions and attacks. - Cons: - - Bandwidth: the broadcast nature of Flood results in significant redundancy in message - propagation, leading to exponential increases in bandwidth usage. + - Exponential Bandwidth Usage: the broadcast nature of Flood results in significant redundancy + in message propagation, leading to exponential increases in bandwidth usage. + +- [Dynamic Optimal Graph (DOG)](dog.md). Building on Flood, DOG introduces improvements that + significantly reduces redundant transactions, enhancing bandwidth efficiency while maintaining + similar latency and resilience properties. + - Pros: + + Efficient Bandwidth Usage: DOG implements a routing mechanism that significantly reduces + bandwidth usage when compared to Flood. + + Low Latency: DOG does not introduce delays or extra communication steps that add latency, as + in "pull" gossip protocols. Instead, it selectively filters transactions before forwarding + them to peers. + + Byzantine Fault Tolerance (BFT): DOG keeps a minimum level of transaction redundancy for + preserving the resilience needed to mitigate the impact of Byzantine attacks. + - Cons: + - Convergence Time for Redundancy: Reaching the optimal routes for the desired levels of + transaction redundancy may take each node a time proportional to the number of peers it has, + though typically in the order of a few minutes. + - Traffic Fairness: Nodes with high-speed connections and a large number of peers may end up + handling higher traffic load compared to other nodes. However, as the network topology evolves + dynamically, this imbalance changes over time. ## Specifications with Quint snippets @@ -30,8 +49,9 @@ To (re-)generate the Quint files: 1. install the [lmt tool][lmt] (see the prerequisites [here][lit]), and 2. run `make`. -The Flood gossip protocol is self-described in its own [flood](flood.md) spec. It is built on top of -two other specs, which are not strictly needed to understand the protocol: +The Flood gossip protocol is self-described in its own [flood](flood.md) spec. Similarly, DOG is +also self-described in the [dog](dog.md) spec, except that imports some definitions from Flood. +Both are built on top of two other specs, which are not strictly needed to understand the protocol: - [mempool](mempool.md) with definitions of common data structures from the mempool, and - [p2p](p2p.md) with networking definitions, assumptions, and boilerplate. @@ -39,6 +59,8 @@ Specs dependencies: ```mermaid flowchart TB flood --> mempool --> p2p; + dog --> flood; + dog --> mempool; ``` [quint]: https://quint-lang.org/ diff --git a/spec/mempool/gossip/dog.md b/spec/mempool/gossip/dog.md new file mode 100644 index 0000000000..3f9009b967 --- /dev/null +++ b/spec/mempool/gossip/dog.md @@ -0,0 +1,625 @@ +# Dynamic Optimal Graph (DOG) gossip protocol + +The DOG protocol introduces two novel features to optimize network bandwidth utilization while +preserving low latency performance and ensuring robustness against Byzantine attacks. + +* **Dynamic Routing.** DOG implements a routing mechanism that filters data disseminated from a node + to its peers. When a node `A` receives from node `B` a transaction that is already present in its + cache, this means that there is a cycle in the network topology. In this case, `A` will message + `B` indicating to not send any more transactions, and `B` will close one of the "routes" that + are used to forward transactions to `A`, thus cutting the cycle. Eventually, transactions will have only one + path to reach all nodes in the network, with the resulting routes forming a superposition of + spanning trees--the optimal P2P connection structure for disseminating data across the network. + +* **Redundancy Control mechanism.** For keeping nodes resilient to Byzantine attacks, the protocol + maintains a minimum level of transaction redundancy. Nodes periodically measure the redundancy + level of received transactions and decide if they should request peers for more or less + transactions. If a node is not receiving enough duplicate transactions, it will request its peers + to re-activate a previously disabled route. This ensures a steady yet controlled flow of data. + +The DOG protocol is built on top of the [Flood protocol](flood.md). This spec re-uses many of the +same types, messages, and data structures defined in Flood's spec. + +**Table of contents** + - [Messages](#messages) + - [Dynamic Routing](#dynamic-routing) + - [Redundancy Control](#redundancy-control) + - [Computing redundancy](#computing-redundancy) + - [How to adjust](#how-to-adjust) + - [When to adjust](#when-to-adjust) + - [Parameters](#parameters) + - [Initial state](#initial-state) + - [State transitions (actions)](#state-transitions-actions) + - [Adding transactions to the mempool](#adding-transactions-to-the-mempool) + - [Handling incoming messages](#handling-incoming-messages) + - [Transaction dissemination](#transaction-dissemination) + - [Nodes disconnecting from the network](#nodes-disconnecting-from-the-network) + +> This document was written using the literature programming paradigm. Code snippets are written in +> [Quint][quint] and can get "tangled" into a Quint file. + +## Messages + +In addition to the `TxMsg` data message present in Flood, DOG adds two control messages. +```bluespec "messages" += +type Message = +``` + +* Transaction message (same as in Flood). + ```bluespec "messages" += + | TxMsg(TX) + ``` + +* A node sends a `HaveTxMsg` message to signal that it already received a transaction. The receiver + will cut a route related to `tx` that is forming a cycle in the network topology. + ```bluespec "messages" += + | HaveTxMsg(TxID) + ``` + +* A node sends a `ResetRouteMsg` message to signal that it is not receiving enough transactions. The + receiver should, if possible, re-enable some route to the node. + ```bluespec "messages" += + | ResetRouteMsg + ``` + +Note that the size of `HaveTxMsg` and `ResetRouteMsg` is negligible compared to `TxMsg`, which +carries a full transaction. + +## Dynamic Routing + +The protocol has a routing mechanism to filter transaction messages that nodes sent to their peers. + +A _route_ is a tuple `(source, target)` representing the flow of transactions between two nodes. A +route is defined within a node, and `source` and `target` are peers connected to that node. +```bluespec "routing" += +type Route = (NodeID, NodeID) +``` +We also write a route as `source -> target`. + +Each node maintains a set of disabled routes (`dr`) to manage active connections. +```bluespec "routing" += +var dr: NodeID -> Set[Route] +``` +By default, all routes are enabled, that is, the set of disabled routes is empty. A node `B` will +send a transaction `tx`, received from peer `A`, to peer `C` only if the route `A -> C` is not in `B`'s set of disabled +routes. Since a transaction can be received from multiple peers, we define its sender `A` as the first node in `tx`'s list of senders (see the section on [Transaction +dissemination](#transaction-dissemination)). + +
+ Auxiliary definitions + +```bluespec "routing" += +def DisabledRoutes(node) = dr.get(node) +pure def disableRoute(routes, route) = routes.join(route) +pure def enableRoute(routes, route) = routes.exclude(Set(route)) +pure def isSourceOrTargetIn(node, route) = node == route._1 or node == route._2 +pure def routesWithSource(routes, source) = routes.filter(r => r._1 == source) +pure def routesWithTarget(routes, target) = routes.filter(r => r._2 == target) +pure def mapTargets(routes) = routes.map(r => r._2) +``` + +`resetRoutes` re-enables all routes to `peer` or from `peer` by removing any disabled route that has +`peer` as source or target. +```bluespec "routing" += +pure def resetRoutes(routes, peer) = + routes.filter(route => not(peer.isSourceOrTargetIn(route))) +``` +
+ +## Redundancy Control + +Each node implements a Redundancy Controller (RC) with a closed-loop feedback mechanism, commonly +used in control systems for dynamic self-regulation. The controller periodically monitors the level +of redundant transactions received and adjusts accordingly by sending `HaveTx` and `ResetRoute` +messages to peers. This ensures the redundancy level remains within predefined bounds, adapting to +changes in network conditions in real time. +```bluespec "rc" += +var rc: NodeID -> RedundancyController +``` + +The data structure `RedundancyController` contains the following fields: +* A counter of transactions received for the first time by the node. + ```bluespec "rcstate" += + firstTimeTxs: int, + ``` +* A counter of duplicate transactions received by the node. + ```bluespec "rcstate" += + duplicateTxs: int, + ``` +* A flag indicating whether the node is temporarily blocked from replying with a `HaveTx` message + upon receiving a duplicate transaction. + ```bluespec "rcstate" += + isHaveTxBlocked: bool, + ``` + +
+ Auxiliary definitions + +```bluespec "rc" += +def RC(node) = rc.get(node) +val initialRCState = { firstTimeTxs: 0, duplicateTxs: 0, isHaveTxBlocked: false } +pure def increaseFirstTimeTxs(_rc) = { firstTimeTxs: _rc.firstTimeTxs + 1, ..._rc } +pure def increaseDuplicateTxs(_rc) = { duplicateTxs: _rc.duplicateTxs + 1, ..._rc } +pure def resetCounters(_rc) = { firstTimeTxs: 0, duplicateTxs: 0, ..._rc } +pure def unblockHaveTx(_rc) = { isHaveTxBlocked: false, ..._rc } +pure def blockHaveTx(_rc) = { isHaveTxBlocked: true, ..._rc } +``` +
+ +### Computing redundancy + +The _redundancy level_ is calculated as the ratio of duplicate transactions to first-time +transactions. +```bluespec "rc" += +pure def redundancy(_rc) = + if (_rc.firstTimeTxs == 0) + upperBound + else + _rc.duplicateTxs / _rc.firstTimeTxs +``` +If the number of first-time transactions is 0, the redundancy level is set to a predefined maximum +value (the constant `upperBound` defined below) to prompt the controller to reduce redundancy. +Conversely, the redundancy level is set to 0 if there are no duplicate transactions, signaling the +controller to increase redundancy. + +For example, a redundancy of 0.5 means that, for every two first-time transactions received, the +node receives one duplicate transaction (not necessarily a duplicate of any of those two first-time +transactions). + +### How to adjust + +Function `controllerActions` computes the current `redundancy` level and determines which actions +the controller will take by returning an updated controller state and whether the controller should +send a `ResetRoute` message: +- If no transactions were received during the last iteration, the controller should not react in + order to preserve the current state of the routes. +- If `redundancy` is too low, the controller should request more transactions by sending a + `ResetRoute` message to a random peer. +- If `redundancy` is too high, the controller should signal peers to reduce traffic by temporarily + allowing to reply with a `HaveTx` message the next time the node receives a duplicate transaction. +- If `redundancy` is within acceptable limits, the controller takes no action. +```bluespec "rc" += +pure def controllerActions(_rc) = + if (_rc.firstTimeTxs + _rc.duplicateTxs == 0) + (_rc, false) + else if (_rc.redundancy() < lowerBound) + (_rc, true) + else if (_rc.redundancy() >= upperBound) + (_rc.unblockHaveTx(), false) + else + (_rc, false) +``` +Note that if the target redundancy is 0 (see [parameters](#parameters)), the lower and upper bounds +are also equal to 0. Then `controllerActions` will be able to unblock `HaveTx` but it will never +send `ResetRoute` messages. + +An important aspect of the controller actions is that, on each iteration, the controller allows the +node to send at most one `HaveTx` or one `ResetRoute` message, as explained next. + +### When to adjust + +The Redundancy Controller runs in a separate thread a control loop that periodically calls +`adjustRedundancy` in order to reach and maintain the target redundancy level. + +The `adjustRedundancy` action: +1. First it calls `controllerActions` to compute the current redundancy level and determines the +next steps such as unblocking `HaveTx` messages or sending a `ResetRoute` message to a randomly +chosen peer. +2. After making the adjustment, it resets the transaction counters so that redundancy is computed +independently of past measurements. +```bluespec "actions" += +action adjustRedundancy(node) = + nondet randomPeer = oneOf(node.Peers()) + val res = node.RC().controllerActions() + val updatedNodeRC = res._1 + val sendResetRoute = res._2 + all { + incomingMsgs' = + if (sendResetRoute) + node.send(incomingMsgs, randomPeer, ResetRouteMsg) + else incomingMsgs, + rc' = rc.put(node, updatedNodeRC.resetCounters()), + } +``` + +Adjustments should be paced by a timer to account for message propagation delays. If redundancy +adjustments are made too frequently, a node risks isolation as all peers may cut routes prematurely. +The timer should align with the network’s maximum round-trip time (RTT) to allow `HaveTx` and +`ResetRoute` messages to propagate and take effect before initiating further adjustments. +Consequently, the controller is designed to send at most one control message per iteration in order +to prevent over-correction. + +For example, suppose node `A` receives a duplicate transaction from `B` and replies with a `HaveTx` +message. Until `B` receives and process the `HaveTx` message, thus cutting a route to `A`, it will +pass at least a round-trip time (RTT) until `A` stops seeing traffic from `B`. In the meantime, `A` +may still continue to receive duplicates from `B` and other peers, causing `A`'s redundancy level to +be high. During that time, `A` should not send `HaveTx` messages to other peers because that may end +up cutting all routes to it. See the `adjustInterval` [parameter](#parameters) for more details. + +> An alternative to triggering `adjustRedundancy` at fixed time intervals is to base it on the +> number of received transactions. While this approach eliminates dependency on time constraints, it +> introduces vulnerabilities that could destabilize nodes. For example, an attacker could exploit +> this mechanism by sending bursts of numerous small transactions to a node, causing the node to +> trigger `adjustRedundancy` too frequently. This results in the near-continuous activation of +> `HaveTx` messages, leading nodes to repeatedly alternating between sending `HaveTx` and +> `ResetRoute` messages. On testnets, we have observed that nodes continue to operate normally, +> except that bandwidth does not decrease as expected. + +## Parameters + +The following parameters must be configured by each node at initialization. + +* `TargetRedundancy`: the desired redundancy level that the controller aims to keep as target + (within specified bounds). + ```bluespec "params" += + const TargetRedundancy: int + ``` + A target equal to 0 partially disables the Redundancy Control mechanism: the controller can + block `HaveTx` messages but cannot send `ResetRoute` messages. Zero redundancy minimizes + bandwidth usage, achieving the lowest possible message overhead. In non-Byzantines networks, + this is the best possible scenario. However, in Byzantine networks it could potentially render + nodes isolated from transaction data. Therefore, the target should be set to a value greater + than 0. Experimental results suggest a value between 0.5 and 1, which is a safe number that does + not result in excessive duplicate transactions. + + > Note: `TargetRedundancy` should ideally be specified as a real number, but reals are not + > currently supported by Quint. + +* `TargetRedundancyDeltaPercent`: a percentage (a number in the open interval `(0, 100)`) of + `TargetRedundancy` that defines acceptable lower and upper bounds for redundancy levels as a + deviation from the target value. + ```bluespec "params" += + const TargetRedundancyDeltaPercent: int + ``` + From this value the protocol derives the constants: + ```bluespec "params" += + val _delta = TargetRedundancy * TargetRedundancyDeltaPercent / 100 + val lowerBound = TargetRedundancy - _delta + val upperBound = TargetRedundancy + _delta + ``` + This range provides flexibility, allowing the redundancy level to fluctuate while staying within + tolerable limits. For example, a `TargetRedundancy` of 0.5 with a `TargetRedundancyDeltaPercent` + of 20% would allow a redundancy level between 0.4 and 0.6. + + Based on experimentation, a `TargetRedundancyDeltaPercent` of around 20% strikes a good balance + between adaptability and stability. + + > Note: Similar to `TargetRedundancy`, this parameter could also be defined as a real type if it + > were allowed by Quint. + +* `adjustInterval`: the time (in milliseconds) that the controller waits between successive + calls of `adjustRedundancy`. + ```bluespec "params" += + const adjustInterval: int + ``` + This interval should allow sufficient time for control messages (`HaveTx` and `ResetRoute`) to + propagate through the network and take effect. + + A minimum value for `adjustInterval` depends on the network’s round-trip time (RTT). Assuming that + global latency typically stays below 500ms (maybe an excessive number, need references here), the + interval should be set to at least 1000ms to ensure stability and avoid over-adjustment. + + Optimal values depend on empirical measurements of network latency. However, in practice, a value of + 1000ms or above is recommended to allow message processing and delivery times in diverse network + environments and load scenarios. + + This value is also related to the number of peers a node has, which determines the maximum number of + routes. For instance, a node with 50 peers will have a maximum of `50 * (50 - 1) = 2450` routes. + Hypothetically, removing one route on every adjustment, at one adjustment per second, it would take + 40.8 minutes to remove all routes from the node. + +## Initial state + +DOG's initial state is based on Flood's initial state. +```bluespec "init_action" += +action DOG_init = all { + Flood::init, + dr' = NodeIDs.mapBy(_ => Set()), + rc' = NodeIDs.mapBy(_ => initialRCState) +} +``` + +## State transitions (actions) + +The following are all the state transitions allowed in the protocol. The rest of the section +describes the missing details of each step. + +1. A node receives a transaction from a user and tries to add it to its mempool. + ```bluespec "steps" += + // User-initiated transactions + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + ``` + +2. A node handles a message received from a peer. + ```bluespec "steps" += + // Peer message handling + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + ``` + +3. A node disseminates a transaction currently in its mempool to a subset of peers. + ```bluespec "steps" += + // Transaction dissemination + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + dr' = dr, + rc' = rc, + }, + ``` + + > In the actual implementation, each peer has its own independent dissemination goroutine, + > resulting in transactions being sent to different peers at different times. However, for + > simplicity, in this spec we model all of these actions in one atomic step. + +4. A node joins the network (same as in Flood). + ```bluespec "steps" += + // Node joins network + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + dr' = dr, + rc' = rc, + }, + ``` + +5. A node disconnects from the network. + ```bluespec "steps" += + // Node disconnects from network + all { + require(size(nodesInNetwork) > 1), + nondet node = oneOf(nodesInNetwork) + peers' = peers.disconnect(node), + incomingMsgs' = incomingMsgs, + mempool' = mempool, + senders' = senders, + dr' = dr, + rc' = rc, + }, + ``` + +6. A node detects that a peer is disconnected from the network. + ```bluespec "steps" += + // Node detects a peer is disconnected + nondet node = oneOf(nodesInNetwork) + all { + require(node.disconnectedPeers().nonEmpty()), + nondet peer = oneOf(node.disconnectedPeers()) + node.updateDisconnectedPeer(peer), + mempool' = mempool, + senders' = senders, + }, + ``` + +7. The Redundancy Controller periodically tries to adjust the redundancy level. + ```bluespec "steps" += + // Redundancy Controller process loop + all { + nondet node = oneOf(nodesInNetwork) + node.adjustRedundancy(), + peers' = peers, + mempool' = mempool, + senders' = senders, + dr' = dr, + }, + ``` + + > This action should ideally have a pre-condition that enables the state transition only on + > specified time intervals, as defined by the `adjustInterval` parameter. However, Quint does + > not natively support specifying time constraints without relying on workarounds such as using + > counters as clocks. As a result, this action is modelled as always being enabled. In a + > real-world implementation, redundancy adjustments would be naturally triggered by a timer set + > to the duration specified by `adjustInterval`. + +### Adding transactions to the mempool + +`tryAddTx` defines how a node adds a transaction to its mempool. The following code is the same as +in Flood; the difference is in the two functions that process the transaction. +```bluespec "actions" += +action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) +``` + +* **Adding first-time transactions** + + `tryAddFirstTimeTx` attempts to add a received first-time transaction `tx` to the mempool by + performing the same updates as in Flood (it adds `tx` to `cache`; if `tx` is valid, it appends `tx` + to the mempool and updates `tx`'s senders). Additionally, it increases the `node`'s + `rc.firstTimeTxs` counter. + ```bluespec "actions" += + action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = + all { + node.Flood::tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx), + rc' = rc.update(node, increaseFirstTimeTxs), + dr' = dr, + } + ``` + +* **Handling duplicate transactions** + + `processDuplicateTx` processes a received duplicate transaction `tx` by updating the list of senders + if `tx` is in the mempool, and the list of incoming messages, the same as in Flood. Additionally, + 1. it increases `duplicateTxs` and + 2. replies a `HaveTx` message if the RC mechanism is not blocking it (and there's a sender). + + ```bluespec "actions" += + action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = + val _rc = node.RC().increaseDuplicateTxs() + val updatedVars = node.replyHaveTx(_incomingMsgs, _rc, optionalSender, tx) + val _incomingMsgs1 = updatedVars._1 + val _rc1 = updatedVars._2 + all { + node.Flood::processDuplicateTx(_incomingMsgs1, optionalSender, tx), + rc' = rc.put(node, _rc1), + dr' = dr, + } + ``` + where `replyHaveTx` will send a `HaveTx` message if `tx` comes from a peer and `HaveTx` messages are + not blocked: + ```bluespec "actions" += + pure def replyHaveTx(node, _incomingMsgs, _rc, optionalSender, tx) = + if (optionalSender.isSome() and not(_rc.isHaveTxBlocked)) + val targets = optionalSender.optionToSet() + (node.multiSend(_incomingMsgs, targets, HaveTxMsg(hash(tx))), _rc.blockHaveTx()) + else (_incomingMsgs, _rc) + ``` + Immediately after sending the `HaveTx` message the controller will block sending a new one. + +### Handling incoming messages + +In this subsection we define how to handle each type of message received from a peer (the `sender`). +```bluespec "actions" += +action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + | HaveTxMsg(txID) => node.handleHaveTxMessage(_incomingMsgs, sender, txID) + | ResetRouteMsg => node.handleResetRouteMessage(_incomingMsgs, sender) + } +``` + +* **Handling `HaveTx` messages** + + Upon receiving `HaveTxMsg(txID)` from `sender`, `node` disables the route `firstSender -> sender`, + where `firstSender` is the first node in `txID`'s list of senders, if `txID` actually + comes from a peer. This action will decrease the traffic to `sender`. + ```bluespec "actions" += + action handleHaveTxMessage(node, _incomingMsgs, sender, txID) = all { + val txSenders = node.sendersOf(txID) + dr' = dr.update(node, drs => + if (length(txSenders) > 0) drs.disableRoute((txSenders[0], sender)) else drs), + incomingMsgs' = _incomingMsgs, + peers' = peers, + mempool' = mempool, + senders' = senders, + rc' = rc, + } + ``` + The list of `tx`’s senders contains the node IDs from which `node` received the transaction, + ordered by the arrival time of the corresponding messages. To avoid disabling the routes from + all those senders at once, the protocol picks the first sender in the list, which is the first + peer from which `node` received `tx` for the first time. Subsequent entries in the list are + nodes whose transaction messages arrived later as duplicates. As such, most routes from those + peers to `node` will eventually be disabled, with most traffic coming primarily from the first + peer. + +* **Handling ResetRoute messages** + + Upon receiving `ResetRouteMsg`, `node` re-enables a random disabled route that has `sender` as + target. + ```bluespec "actions" += + action handleResetRouteMessage(node, _incomingMsgs, sender) = all { + nondet randomRoute = oneOf(node.DisabledRoutes().routesWithTarget(sender)) + dr' = dr.update(node, drs => drs.enableRoute(randomRoute)), + incomingMsgs' = _incomingMsgs, + peers' = peers, + mempool' = mempool, + senders' = senders, + rc' = rc, + } + ``` + This will allow some traffic to flow again to `sender`. Other nodes will dynamically adapt to + the new traffic, closing routes when needed. + + The protocol re-enables only one route per `ResetRoute` message to allow traffic to `sender` to + increase gradually. If that peer still needs more transactions, it will send another + `ResetRoute` message at a later time. + +### Transaction dissemination + +As in Flood, DOG will filter out the transaction's senders. Additionally, `node` will not send `tx` +to peer `B` if the route `A -> B` is disabled, where `A` is the first node in the list of `tx`'s +senders. +```bluespec "actions" += +def mkTargetNodes(node, tx) = + val txSenders = node.sendersOf(hash(tx)) + val disabledTargets = + if (length(txSenders) > 0) + node.DisabledRoutes().routesWithSource(txSenders[0]).mapTargets() + else Set() + node.Peers() + .exclude(txSenders.listToSet()) + .exclude(disabledTargets) +``` +The protocol selects the first sender in the list based on the same reasoning applied when handling +received `HaveTx` messages. The first sender is likely responsible for the majority of traffic +related to the transaction, as it was the first to forward it to the node. Subsequent senders in the +list only sent the transaction later as duplicates, and their routes are more likely already +disabled. + +### Nodes disconnecting from the network + +When a `node` detects that a `peer` has disconnected from the network, +1. it updates its set of active peers, +2. it updates its routing table by resetting all routes that have `peer` as either a source or target, and +3. it triggers a redundancy adjustment. +```bluespec "actions" += +action updateDisconnectedPeer(node, peer) = all { + peers' = peers.update(node, ps => ps.exclude(Set(peer))), + dr' = dr.update(node, drs => drs.resetRoutes(peer)), + node.adjustRedundancy(), +} +``` +Calling `adjustRedundancy` is not strictly needed here because the protocol will make an adjustment +on the next iteration of the Redundancy Controller. This is just an improvement to trigger it +sooner. + + + +[quint]: https://quint-lang.org/ diff --git a/spec/mempool/gossip/flood.md b/spec/mempool/gossip/flood.md index dc8903149b..ee16540724 100644 --- a/spec/mempool/gossip/flood.md +++ b/spec/mempool/gossip/flood.md @@ -52,10 +52,10 @@ are only needed for disseminating (valid) transactions that are in the mempool. def Senders(node) = senders.get(node) ``` -The set of senders of transaction `tx`: +The set of senders of transaction `txID`: ```bluespec "auxstate" += -def sendersOf(node, tx) = - node.Senders().mapGetDefault(hash(tx), List()).listToSet() +def sendersOf(node, txID) = + node.Senders().mapGetDefault(txID, List()) ``` Function `addSender` adds a sender to `tx`'s list of senders (`_txSenders`), if `optionalSender` has @@ -121,11 +121,17 @@ These are the state transitions of the system. Note that generic actions are imp 5. A node disconnects from the network. ```bluespec "steps" += + nondet node = oneOf(nodesInNetwork) all { - pickNodeAndDisconnect, + require(size(nodesInNetwork) > 1), + // Disconnect node and remove node from other peers' connections. + peers' = peers + .disconnect(node) + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(node))), + incomingMsgs' = incomingMsgs, mempool' = mempool, senders' = senders, - } + }, ``` ### Adding transactions to the mempool @@ -200,7 +206,7 @@ In Flood, a node sends a transaction to all its peers except those who previousl to the generic transaction dissemination action. ```bluespec "actions" += def mkTargetNodes(node, tx) = - node.Peers().exclude(node.sendersOf(tx)) + node.Peers().exclude(node.sendersOf(hash(tx)).listToSet()) ``` ## Properties @@ -229,7 +235,7 @@ val dontSendBackToSender = NodeIDs.forall(nodeA => NodeIDs.forall(nodeB => AllTxs.forall(tx => - nodeB.sendersOf(tx).contains(nodeA) + nodeB.sendersOf(hash(tx)).includes(nodeA) implies not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) ))) diff --git a/spec/mempool/gossip/p2p.md b/spec/mempool/gossip/p2p.md index e0d020cf1a..93f06465b5 100644 --- a/spec/mempool/gossip/p2p.md +++ b/spec/mempool/gossip/p2p.md @@ -60,6 +60,8 @@ same. ```bluespec "state" += pure def multiSend(node, _incomingMsgs, targetNodes, msg) = _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) +pure def send(node, _incomingMsgs, targetNode, msg) = + node.multiSend(_incomingMsgs, Set(targetNode), msg) ``` A node is in the network if it has peers: @@ -67,6 +69,19 @@ A node is in the network if it has peers: val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) ``` + +A node disconnects from the network when it does not have peers. +```bluespec "auxstate" += +pure def disconnect(_peers, node) = + // TODO: check that the network does not become disconnected; we don't want to model that. + _peers.put(node, Set()) +``` + +The set of `node`'s peers that are not themselves connected to `node`. +```bluespec "auxstate" += +def disconnectedPeers(node) = + node.Peers().filter(p => not(node.in(p.Peers()))) +``` ## Initial state @@ -122,30 +137,6 @@ action pickNodeAndJoin = all { } ``` -A node gets disconnected from the network. All its peers are immediately aware that the node is no -longer one of their peers, so their state is updated accordingly. -```bluespec "actions" += -// TODO: the network must not become disconnected; we don't want to model that. -action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { - peers' = peers - // Clean node's state and remove all its peers. - .put(nodeToDisconnect, Set()) - // Remove node from other peers' state. - .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), - incomingMsgs' = _incomingMsgs, -} -``` - -Non-deterministically pick a node to disconnect from the network. -```bluespec "actions" += -action pickNodeAndDisconnect = all { - // Pick a node that is not the only node in the network. - require(size(nodesInNetwork) > 1), - nondet nodeToDisconnect = oneOf(nodesInNetwork) - disconnectNetwork(nodeToDisconnect, incomingMsgs), -} -``` - ## Properties _**Invariant**_ Peer relationships are bidirectional or symmetrical: if node A has B as peer, then B diff --git a/spec/mempool/gossip/quint/dog.qnt b/spec/mempool/gossip/quint/dog.qnt new file mode 100644 index 0000000000..506c1680e4 --- /dev/null +++ b/spec/mempool/gossip/quint/dog.qnt @@ -0,0 +1,219 @@ +// -*- mode: Bluespec; -*- + +// File generated from markdown using https://github.com/driusan/lmt. DO NOT EDIT. + +module dog { + import spells.* from "./spells" + import mempool.* from "./mempool" + import flood as Flood from "./flood" + import flood.senders from "./flood" + import flood.Senders from "./flood" + import flood.sendersOf from "./flood" + + //-------------------------------------------------------------------------- + // Messages + //-------------------------------------------------------------------------- + type Message = + | TxMsg(TX) + | HaveTxMsg(TxID) + | ResetRouteMsg + + //-------------------------------------------------------------------------- + // Parameters + //-------------------------------------------------------------------------- + const TargetRedundancy: int + const TargetRedundancyDeltaPercent: int + val _delta = TargetRedundancy * TargetRedundancyDeltaPercent / 100 + val lowerBound = TargetRedundancy - _delta + val upperBound = TargetRedundancy + _delta + const adjustInterval: int + + //-------------------------------------------------------------------------- + // Routing + //-------------------------------------------------------------------------- + type Route = (NodeID, NodeID) + var dr: NodeID -> Set[Route] + def DisabledRoutes(node) = dr.get(node) + pure def disableRoute(routes, route) = routes.join(route) + pure def enableRoute(routes, route) = routes.exclude(Set(route)) + pure def isSourceOrTargetIn(node, route) = node == route._1 or node == route._2 + pure def routesWithSource(routes, source) = routes.filter(r => r._1 == source) + pure def routesWithTarget(routes, target) = routes.filter(r => r._2 == target) + pure def mapTargets(routes) = routes.map(r => r._2) + pure def resetRoutes(routes, peer) = + routes.filter(route => not(peer.isSourceOrTargetIn(route))) + + //-------------------------------------------------------------------------- + // Redundancy Controller + //-------------------------------------------------------------------------- + type RedundancyController = { + firstTimeTxs: int, + duplicateTxs: int, + isHaveTxBlocked: bool, + } + var rc: NodeID -> RedundancyController + def RC(node) = rc.get(node) + val initialRCState = { firstTimeTxs: 0, duplicateTxs: 0, isHaveTxBlocked: false } + pure def increaseFirstTimeTxs(_rc) = { firstTimeTxs: _rc.firstTimeTxs + 1, ..._rc } + pure def increaseDuplicateTxs(_rc) = { duplicateTxs: _rc.duplicateTxs + 1, ..._rc } + pure def resetCounters(_rc) = { firstTimeTxs: 0, duplicateTxs: 0, ..._rc } + pure def unblockHaveTx(_rc) = { isHaveTxBlocked: false, ..._rc } + pure def blockHaveTx(_rc) = { isHaveTxBlocked: true, ..._rc } + pure def redundancy(_rc) = + if (_rc.firstTimeTxs == 0) + upperBound + else + _rc.duplicateTxs / _rc.firstTimeTxs + pure def controllerActions(_rc) = + if (_rc.firstTimeTxs + _rc.duplicateTxs == 0) + (_rc, false) + else if (_rc.redundancy() < lowerBound) + (_rc, true) + else if (_rc.redundancy() >= upperBound) + (_rc.unblockHaveTx(), false) + else + (_rc, false) + + //-------------------------------------------------------------------------- + // Actions + //-------------------------------------------------------------------------- + action DOG_init = all { + Flood::init, + dr' = NodeIDs.mapBy(_ => Set()), + rc' = NodeIDs.mapBy(_ => initialRCState) + } + + action adjustRedundancy(node) = + nondet randomPeer = oneOf(node.Peers()) + val res = node.RC().controllerActions() + val updatedNodeRC = res._1 + val sendResetRoute = res._2 + all { + incomingMsgs' = + if (sendResetRoute) + node.send(incomingMsgs, randomPeer, ResetRouteMsg) + else incomingMsgs, + rc' = rc.put(node, updatedNodeRC.resetCounters()), + } + action tryAddTx(node, _incomingMsgs, optionalSender, tx) = + if (not(hash(tx).in(node.Cache()))) + node.tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx) + else + node.processDuplicateTx(_incomingMsgs, optionalSender, tx) + action tryAddFirstTimeTx(node, _incomingMsgs, optionalSender, tx) = + all { + node.Flood::tryAddFirstTimeTx(_incomingMsgs, optionalSender, tx), + rc' = rc.update(node, increaseFirstTimeTxs), + dr' = dr, + } + action processDuplicateTx(node, _incomingMsgs, optionalSender, tx) = + val _rc = node.RC().increaseDuplicateTxs() + val updatedVars = node.replyHaveTx(_incomingMsgs, _rc, optionalSender, tx) + val _incomingMsgs1 = updatedVars._1 + val _rc1 = updatedVars._2 + all { + node.Flood::processDuplicateTx(_incomingMsgs1, optionalSender, tx), + rc' = rc.put(node, _rc1), + dr' = dr, + } + pure def replyHaveTx(node, _incomingMsgs, _rc, optionalSender, tx) = + if (optionalSender.isSome() and not(_rc.isHaveTxBlocked)) + val targets = optionalSender.optionToSet() + (node.multiSend(_incomingMsgs, targets, HaveTxMsg(hash(tx))), _rc.blockHaveTx()) + else (_incomingMsgs, _rc) + action handleMessage(node, _incomingMsgs, sender, msg) = + match msg { + | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) + | HaveTxMsg(txID) => node.handleHaveTxMessage(_incomingMsgs, sender, txID) + | ResetRouteMsg => node.handleResetRouteMessage(_incomingMsgs, sender) + } + action handleHaveTxMessage(node, _incomingMsgs, sender, txID) = all { + val txSenders = node.sendersOf(txID) + dr' = dr.update(node, drs => + if (length(txSenders) > 0) drs.disableRoute((txSenders[0], sender)) else drs), + incomingMsgs' = _incomingMsgs, + peers' = peers, + mempool' = mempool, + senders' = senders, + rc' = rc, + } + action handleResetRouteMessage(node, _incomingMsgs, sender) = all { + nondet randomRoute = oneOf(node.DisabledRoutes().routesWithTarget(sender)) + dr' = dr.update(node, drs => drs.enableRoute(randomRoute)), + incomingMsgs' = _incomingMsgs, + peers' = peers, + mempool' = mempool, + senders' = senders, + rc' = rc, + } + def mkTargetNodes(node, tx) = + val txSenders = node.sendersOf(hash(tx)) + val disabledTargets = + if (length(txSenders) > 0) + node.DisabledRoutes().routesWithSource(txSenders[0]).mapTargets() + else Set() + node.Peers() + .exclude(txSenders.listToSet()) + .exclude(disabledTargets) + action updateDisconnectedPeer(node, peer) = all { + peers' = peers.update(node, ps => ps.exclude(Set(peer))), + dr' = dr.update(node, drs => drs.resetRoutes(peer)), + node.adjustRedundancy(), + } + + action step = any { + // User-initiated transactions + nondet node = oneOf(nodesInNetwork) + nondet tx = oneOf(AllTxs) + node.receiveTxFromUser(tx, tryAddTx), + // Peer message handling + nondet node = oneOf(nodesInNetwork) + node.receiveFromPeer(handleMessage), + // Transaction dissemination + nondet node = oneOf(nodesInNetwork) + all { + node.disseminateNextTx(mkTargetNodes, TxMsg), + senders' = senders, + dr' = dr, + rc' = rc, + }, + // Node joins network + all { + pickNodeAndJoin, + mempool' = mempool, + senders' = senders, + dr' = dr, + rc' = rc, + }, + // Node disconnects from network + all { + require(size(nodesInNetwork) > 1), + nondet node = oneOf(nodesInNetwork) + peers' = peers.disconnect(node), + incomingMsgs' = incomingMsgs, + mempool' = mempool, + senders' = senders, + dr' = dr, + rc' = rc, + }, + // Node detects a peer is disconnected + nondet node = oneOf(nodesInNetwork) + all { + require(node.disconnectedPeers().nonEmpty()), + nondet peer = oneOf(node.disconnectedPeers()) + node.updateDisconnectedPeer(peer), + mempool' = mempool, + senders' = senders, + }, + // Redundancy Controller process loop + all { + nondet node = oneOf(nodesInNetwork) + node.adjustRedundancy(), + peers' = peers, + mempool' = mempool, + senders' = senders, + dr' = dr, + }, + } + +} diff --git a/spec/mempool/gossip/quint/flood.qnt b/spec/mempool/gossip/quint/flood.qnt index 911326969e..e483d68adf 100644 --- a/spec/mempool/gossip/quint/flood.qnt +++ b/spec/mempool/gossip/quint/flood.qnt @@ -20,8 +20,8 @@ module flood { // Auxiliary definitions def Senders(node) = senders.get(node) - def sendersOf(node, tx) = - node.Senders().mapGetDefault(hash(tx), List()).listToSet() + def sendersOf(node, txID) = + node.Senders().mapGetDefault(txID, List()) pure def addSender(_txSenders, tx, optionalSender) = match optionalSender { | Some(sender) => _txSenders.update(hash(tx), ss => @@ -63,7 +63,7 @@ module flood { | TxMsg(tx) => node.tryAddTx(_incomingMsgs, Some(sender), tx) } def mkTargetNodes(node, tx) = - node.Peers().exclude(node.sendersOf(tx)) + node.Peers().exclude(node.sendersOf(hash(tx)).listToSet()) action step = any { nondet node = oneOf(nodesInNetwork) @@ -81,11 +81,17 @@ module flood { mempool' = mempool, senders' = senders, }, + nondet node = oneOf(nodesInNetwork) all { - pickNodeAndDisconnect, + require(size(nodesInNetwork) > 1), + // Disconnect node and remove node from other peers' connections. + peers' = peers + .disconnect(node) + .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(node))), + incomingMsgs' = incomingMsgs, mempool' = mempool, senders' = senders, - } + }, } //-------------------------------------------------------------------------- @@ -101,7 +107,7 @@ module flood { NodeIDs.forall(nodeA => NodeIDs.forall(nodeB => AllTxs.forall(tx => - nodeB.sendersOf(tx).contains(nodeA) + nodeB.sendersOf(hash(tx)).includes(nodeA) implies not(nodeA.IncomingMsgs().includes((nodeB, TxMsg(tx)))) ))) diff --git a/spec/mempool/gossip/quint/p2p.qnt b/spec/mempool/gossip/quint/p2p.qnt index 5247c027f4..791c7f3e26 100644 --- a/spec/mempool/gossip/quint/p2p.qnt +++ b/spec/mempool/gossip/quint/p2p.qnt @@ -23,12 +23,19 @@ module p2p { var peers: NodeID -> Set[NodeID] pure def multiSend(node, _incomingMsgs, targetNodes, msg) = _incomingMsgs.updateMultiple(targetNodes, ms => ms.append((node, msg))) + pure def send(node, _incomingMsgs, targetNode, msg) = + node.multiSend(_incomingMsgs, Set(targetNode), msg) // Auxiliary definitions def IncomingMsgs(node) = incomingMsgs.get(node) def Peers(node) = peers.get(node) val nodesInNetwork = NodeIDs.filter(node => node.Peers().nonEmpty()) val nodesNotInNetwork = NodeIDs.exclude(nodesInNetwork) + pure def disconnect(_peers, node) = + // TODO: check that the network does not become disconnected; we don't want to model that. + _peers.put(node, Set()) + def disconnectedPeers(node) = + node.Peers().filter(p => not(node.in(p.Peers()))) //-------------------------------------------------------------------------- // Actions @@ -65,21 +72,6 @@ module p2p { nondet peerSet = oneOf(nodesInNetwork.powerset().exclude(Set())) node.joinNetwork(peerSet), } - // TODO: the network must not become disconnected; we don't want to model that. - action disconnectNetwork(nodeToDisconnect, _incomingMsgs) = all { - peers' = peers - // Clean node's state and remove all its peers. - .put(nodeToDisconnect, Set()) - // Remove node from other peers' state. - .updateMultiple(nodesInNetwork, ps => ps.exclude(Set(nodeToDisconnect))), - incomingMsgs' = _incomingMsgs, - } - action pickNodeAndDisconnect = all { - // Pick a node that is not the only node in the network. - require(size(nodesInNetwork) > 1), - nondet nodeToDisconnect = oneOf(nodesInNetwork) - disconnectNetwork(nodeToDisconnect, incomingMsgs), - } //-------------------------------------------------------------------------- // Properties From a1d4825a8172f24ea1341111c1bec57e956d8d33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hern=C3=A1n=20Vanzetto?= <15466498+hvanz@users.noreply.github.com> Date: Mon, 9 Dec 2024 12:04:49 +0100 Subject: [PATCH 11/14] feat(mempool): Add new Dynamic Optimal Graph (DOG) gossip protocol (#4555) Closes #4558 The bulk of the work is in one file: `mempool/reactor.go`. Each commit is a self-contained addition to the code: - [add new proto messages HaveTx and ResetRoute](https://github.com/cometbft/cometbft/pull/4555/commits/e2364279afb3973c0cf1d0908c30c288c10664a1) - [add config](https://github.com/cometbft/cometbft/pull/4555/commits/bca343a201b6bfc514b502d82a75a86396677442) - [add MempoolControlChannel](https://github.com/cometbft/cometbft/pull/4555/commits/cb65e1556aadb102d8cad6de6c1929744529dad1) - [add GetSenders method to Mempool interface, and Senders to Entry interface](https://github.com/cometbft/cometbft/pull/4555/commits/d73f263a7fde1191b54e6d81445aa70e19d179fc) - [add router to mempool reactor](https://github.com/cometbft/cometbft/pull/4555/commits/fb38f14a10752a63213a2211b4cb4399829a9a0f) - [add redundancy controller to mempool reactor](https://github.com/cometbft/cometbft/pull/4555/commits/f91be4355a797fc81c11f7d939171af53fb0d401) - [add metrics DisabledRoutes and Redundancy](https://github.com/cometbft/cometbft/pull/4555/commits/28b14f199ad7ca7b0f5c34567983e01f2ecb53f3) - [add changelog file](https://github.com/cometbft/cometbft/pull/4555/commits/4677722b30167e24f54a5b3a6e208aeaf1d83eb3) --- - [X] Tests written/updated - [X] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [X] Updated relevant documentation (`docs/` or `spec/`) and code comments --------- Co-authored-by: Jasmina Malicevic Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .../3297-mempool-interface-senders.md | 2 + .../3297-proto-mempool-message.md | 2 + .../3297-config-mempool-dog-protocol.md | 3 + .../features/3297-mempool-dog-protocol.md | 2 + .../features/3297-metrics-dog-protocol.md | 2 + .../3297-proto-mempool-dog-protocol.md | 2 + api/cometbft/mempool/v1/message.go | 1 - api/cometbft/mempool/v2/message.go | 42 + api/cometbft/mempool/v2/types.pb.go | 1024 +++++++++++++++++ config/config.go | 41 + config/config.toml.tpl | 15 + config/config_test.go | 9 + docs/references/config/config.toml.md | 46 + internal/consensus/replay_stubs.go | 17 +- mempool/clist_mempool.go | 17 +- mempool/mempool.go | 9 +- mempool/mempoolTx.go | 9 + mempool/metrics.gen.go | 21 + mempool/metrics.go | 9 + mempool/mocks/mempool.go | 30 + mempool/nop_mempool.go | 3 + mempool/reactor.go | 414 ++++++- mempool/reactor_test.go | 175 ++- mempool/types.go | 4 +- node/node.go | 2 +- node/node_test.go | 3 +- proto/README.md | 30 +- proto/cometbft/mempool/v2/types.proto | 30 + .../provisioning/dashboards-data/main.json | 4 +- .../dashboards-data/mempool_compact.json | 12 +- types/tx.go | 4 + 31 files changed, 1914 insertions(+), 70 deletions(-) create mode 100644 .changelog/v1.0.0/breaking-changes/3297-mempool-interface-senders.md create mode 100644 .changelog/v1.0.0/breaking-changes/3297-proto-mempool-message.md create mode 100644 .changelog/v1.0.0/features/3297-config-mempool-dog-protocol.md create mode 100644 .changelog/v1.0.0/features/3297-mempool-dog-protocol.md create mode 100644 .changelog/v1.0.0/features/3297-metrics-dog-protocol.md create mode 100644 .changelog/v1.0.0/features/3297-proto-mempool-dog-protocol.md create mode 100644 api/cometbft/mempool/v2/message.go create mode 100644 api/cometbft/mempool/v2/types.pb.go create mode 100644 proto/cometbft/mempool/v2/types.proto diff --git a/.changelog/v1.0.0/breaking-changes/3297-mempool-interface-senders.md b/.changelog/v1.0.0/breaking-changes/3297-mempool-interface-senders.md new file mode 100644 index 0000000000..f2c1d4b6a1 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/3297-mempool-interface-senders.md @@ -0,0 +1,2 @@ +- `[mempool]` Add new `GetSenders` method to `Mempool` interface, and `Senders` method to `Entry` + interface. ([#3297](https://github.com/cometbft/cometbft/issue/3297)). diff --git a/.changelog/v1.0.0/breaking-changes/3297-proto-mempool-message.md b/.changelog/v1.0.0/breaking-changes/3297-proto-mempool-message.md new file mode 100644 index 0000000000..6719a07b77 --- /dev/null +++ b/.changelog/v1.0.0/breaking-changes/3297-proto-mempool-message.md @@ -0,0 +1,2 @@ +- `[proto]` Mempool proto version upgraded to `v2`. +([#3297](https://github.com/cometbft/cometbft/issue/3297)). diff --git a/.changelog/v1.0.0/features/3297-config-mempool-dog-protocol.md b/.changelog/v1.0.0/features/3297-config-mempool-dog-protocol.md new file mode 100644 index 0000000000..4690587992 --- /dev/null +++ b/.changelog/v1.0.0/features/3297-config-mempool-dog-protocol.md @@ -0,0 +1,3 @@ +- `[config]` For the DOG gossip protocol, add `dog_protocol_enabled`, `dog_target_redundancy`, + `dog_adjust_interval` to the `mempool` configuration section + ([\#3297](https://github.com/cometbft/cometbft/issues/3297)). diff --git a/.changelog/v1.0.0/features/3297-mempool-dog-protocol.md b/.changelog/v1.0.0/features/3297-mempool-dog-protocol.md new file mode 100644 index 0000000000..bfb449429a --- /dev/null +++ b/.changelog/v1.0.0/features/3297-mempool-dog-protocol.md @@ -0,0 +1,2 @@ +- `[mempool]` Add new Dynamic Optimal Graph (DOG) gossip protocol + ([#3297](https://github.com/cometbft/cometbft/issue/3297)). diff --git a/.changelog/v1.0.0/features/3297-metrics-dog-protocol.md b/.changelog/v1.0.0/features/3297-metrics-dog-protocol.md new file mode 100644 index 0000000000..1b14680d71 --- /dev/null +++ b/.changelog/v1.0.0/features/3297-metrics-dog-protocol.md @@ -0,0 +1,2 @@ +- `[metrics]` Add new mempool metrics `DisabledRoutes` and `Redundancy` for the DOG protocol + ([#3297](https://github.com/cometbft/cometbft/issue/3297)). diff --git a/.changelog/v1.0.0/features/3297-proto-mempool-dog-protocol.md b/.changelog/v1.0.0/features/3297-proto-mempool-dog-protocol.md new file mode 100644 index 0000000000..32f94fdb76 --- /dev/null +++ b/.changelog/v1.0.0/features/3297-proto-mempool-dog-protocol.md @@ -0,0 +1,2 @@ +- `[proto]` Add new proto messages `HaveTx` and `ResetRoute` for the mempool DOG protocol + ([#3297](https://github.com/cometbft/cometbft/issue/3297)). diff --git a/api/cometbft/mempool/v1/message.go b/api/cometbft/mempool/v1/message.go index a2093ee868..9ce81c7a71 100644 --- a/api/cometbft/mempool/v1/message.go +++ b/api/cometbft/mempool/v1/message.go @@ -19,7 +19,6 @@ func (m *Message) Unwrap() (proto.Message, error) { switch msg := m.Sum.(type) { case *Message_Txs: return m.GetTxs(), nil - default: return nil, fmt.Errorf("unknown message: %T", msg) } diff --git a/api/cometbft/mempool/v2/message.go b/api/cometbft/mempool/v2/message.go new file mode 100644 index 0000000000..90ac0835a2 --- /dev/null +++ b/api/cometbft/mempool/v2/message.go @@ -0,0 +1,42 @@ +package v2 + +import ( + "fmt" + + "github.com/cosmos/gogoproto/proto" +) + +// Wrap implements the p2p Wrapper interface and wraps a mempool message. +func (m *Txs) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_Txs{Txs: m} + return mm +} + +func (m *HaveTx) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_HaveTx{HaveTx: m} + return mm +} + +func (m *ResetRoute) Wrap() proto.Message { + mm := &Message{} + mm.Sum = &Message_ResetRoute{ResetRoute: m} + return mm +} + +// Unwrap implements the p2p Wrapper interface and unwraps a wrapped mempool +// message. +func (m *Message) Unwrap() (proto.Message, error) { + switch msg := m.Sum.(type) { + case *Message_Txs: + return m.GetTxs(), nil + case *Message_HaveTx: + return m.GetHaveTx(), nil + case *Message_ResetRoute: + return m.GetResetRoute(), nil + + default: + return nil, fmt.Errorf("unknown message: %T", msg) + } +} diff --git a/api/cometbft/mempool/v2/types.pb.go b/api/cometbft/mempool/v2/types.pb.go new file mode 100644 index 0000000000..d946a2e7bb --- /dev/null +++ b/api/cometbft/mempool/v2/types.pb.go @@ -0,0 +1,1024 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: cometbft/mempool/v2/types.proto + +package v2 + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Txs contains a list of transaction from the mempool. +type Txs struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` +} + +func (m *Txs) Reset() { *m = Txs{} } +func (m *Txs) String() string { return proto.CompactTextString(m) } +func (*Txs) ProtoMessage() {} +func (*Txs) Descriptor() ([]byte, []int) { + return fileDescriptor_f354aa43d1c2a8af, []int{0} +} +func (m *Txs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Txs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Txs.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Txs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Txs.Merge(m, src) +} +func (m *Txs) XXX_Size() int { + return m.Size() +} +func (m *Txs) XXX_DiscardUnknown() { + xxx_messageInfo_Txs.DiscardUnknown(m) +} + +var xxx_messageInfo_Txs proto.InternalMessageInfo + +func (m *Txs) GetTxs() [][]byte { + if m != nil { + return m.Txs + } + return nil +} + +// HaveTx is sent by the DOG protocol to signal a peer that the sender already +// has a transaction. +type HaveTx struct { + TxKey []byte `protobuf:"bytes,1,opt,name=tx_key,json=txKey,proto3" json:"tx_key,omitempty"` +} + +func (m *HaveTx) Reset() { *m = HaveTx{} } +func (m *HaveTx) String() string { return proto.CompactTextString(m) } +func (*HaveTx) ProtoMessage() {} +func (*HaveTx) Descriptor() ([]byte, []int) { + return fileDescriptor_f354aa43d1c2a8af, []int{1} +} +func (m *HaveTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HaveTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HaveTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HaveTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_HaveTx.Merge(m, src) +} +func (m *HaveTx) XXX_Size() int { + return m.Size() +} +func (m *HaveTx) XXX_DiscardUnknown() { + xxx_messageInfo_HaveTx.DiscardUnknown(m) +} + +var xxx_messageInfo_HaveTx proto.InternalMessageInfo + +func (m *HaveTx) GetTxKey() []byte { + if m != nil { + return m.TxKey + } + return nil +} + +// ResetRoute is sent by the DOG protocol to signal a peer to reset a (random) +// route to the sender. +type ResetRoute struct { +} + +func (m *ResetRoute) Reset() { *m = ResetRoute{} } +func (m *ResetRoute) String() string { return proto.CompactTextString(m) } +func (*ResetRoute) ProtoMessage() {} +func (*ResetRoute) Descriptor() ([]byte, []int) { + return fileDescriptor_f354aa43d1c2a8af, []int{2} +} +func (m *ResetRoute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResetRoute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResetRoute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResetRoute) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResetRoute.Merge(m, src) +} +func (m *ResetRoute) XXX_Size() int { + return m.Size() +} +func (m *ResetRoute) XXX_DiscardUnknown() { + xxx_messageInfo_ResetRoute.DiscardUnknown(m) +} + +var xxx_messageInfo_ResetRoute proto.InternalMessageInfo + +// Message is an abstract mempool message. +type Message struct { + // Sum of all possible messages. + // + // Types that are valid to be assigned to Sum: + // + // *Message_Txs + // *Message_HaveTx + // *Message_ResetRoute + Sum isMessage_Sum `protobuf_oneof:"sum"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { + return fileDescriptor_f354aa43d1c2a8af, []int{3} +} +func (m *Message) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Message.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Message) XXX_Merge(src proto.Message) { + xxx_messageInfo_Message.Merge(m, src) +} +func (m *Message) XXX_Size() int { + return m.Size() +} +func (m *Message) XXX_DiscardUnknown() { + xxx_messageInfo_Message.DiscardUnknown(m) +} + +var xxx_messageInfo_Message proto.InternalMessageInfo + +type isMessage_Sum interface { + isMessage_Sum() + MarshalTo([]byte) (int, error) + Size() int +} + +type Message_Txs struct { + Txs *Txs `protobuf:"bytes,1,opt,name=txs,proto3,oneof" json:"txs,omitempty"` +} +type Message_HaveTx struct { + HaveTx *HaveTx `protobuf:"bytes,2,opt,name=have_tx,json=haveTx,proto3,oneof" json:"have_tx,omitempty"` +} +type Message_ResetRoute struct { + ResetRoute *ResetRoute `protobuf:"bytes,3,opt,name=reset_route,json=resetRoute,proto3,oneof" json:"reset_route,omitempty"` +} + +func (*Message_Txs) isMessage_Sum() {} +func (*Message_HaveTx) isMessage_Sum() {} +func (*Message_ResetRoute) isMessage_Sum() {} + +func (m *Message) GetSum() isMessage_Sum { + if m != nil { + return m.Sum + } + return nil +} + +func (m *Message) GetTxs() *Txs { + if x, ok := m.GetSum().(*Message_Txs); ok { + return x.Txs + } + return nil +} + +func (m *Message) GetHaveTx() *HaveTx { + if x, ok := m.GetSum().(*Message_HaveTx); ok { + return x.HaveTx + } + return nil +} + +func (m *Message) GetResetRoute() *ResetRoute { + if x, ok := m.GetSum().(*Message_ResetRoute); ok { + return x.ResetRoute + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Message) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Message_Txs)(nil), + (*Message_HaveTx)(nil), + (*Message_ResetRoute)(nil), + } +} + +func init() { + proto.RegisterType((*Txs)(nil), "cometbft.mempool.v2.Txs") + proto.RegisterType((*HaveTx)(nil), "cometbft.mempool.v2.HaveTx") + proto.RegisterType((*ResetRoute)(nil), "cometbft.mempool.v2.ResetRoute") + proto.RegisterType((*Message)(nil), "cometbft.mempool.v2.Message") +} + +func init() { proto.RegisterFile("cometbft/mempool/v2/types.proto", fileDescriptor_f354aa43d1c2a8af) } + +var fileDescriptor_f354aa43d1c2a8af = []byte{ + // 282 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0x84, 0x50, + 0x14, 0x86, 0xef, 0x4d, 0xc6, 0x81, 0x33, 0xb3, 0x08, 0x23, 0x12, 0x82, 0x3b, 0x83, 0x2b, 0x17, + 0xa1, 0x60, 0xd1, 0x03, 0xb8, 0x12, 0xa2, 0x16, 0x17, 0x57, 0x6d, 0x44, 0xe3, 0x34, 0x0e, 0x25, + 0x57, 0xbc, 0x57, 0xb9, 0xbe, 0x45, 0xcf, 0xd3, 0x13, 0xb4, 0x9c, 0x65, 0xcb, 0xd0, 0x17, 0x09, + 0x9d, 0xc6, 0x36, 0xee, 0xce, 0x81, 0xf3, 0x9d, 0xff, 0xe3, 0x87, 0xcd, 0x8b, 0x28, 0x50, 0x65, + 0xaf, 0xca, 0x2f, 0xb0, 0x28, 0x85, 0x78, 0xf7, 0x9b, 0xc0, 0x57, 0x6d, 0x89, 0xd2, 0x2b, 0x2b, + 0xa1, 0x84, 0x75, 0x71, 0x3a, 0xf0, 0xfe, 0x0e, 0xbc, 0x26, 0x70, 0xae, 0xc0, 0x88, 0xb5, 0xb4, + 0xce, 0xc1, 0x50, 0x5a, 0xda, 0x74, 0x6b, 0xb8, 0x6b, 0x3e, 0x8c, 0xce, 0x06, 0xcc, 0x28, 0x6d, + 0x30, 0xd6, 0xd6, 0x25, 0x98, 0x4a, 0x27, 0x6f, 0xd8, 0xda, 0x74, 0x4b, 0xdd, 0x35, 0x5f, 0x28, + 0xfd, 0x80, 0xad, 0xb3, 0x06, 0xe0, 0x28, 0x51, 0x71, 0x51, 0x2b, 0x74, 0x3e, 0x29, 0x2c, 0x1f, + 0x51, 0xca, 0x74, 0x87, 0xd6, 0xcd, 0xe9, 0x19, 0x75, 0x57, 0x81, 0xed, 0xcd, 0xc4, 0x7a, 0xb1, + 0x96, 0x11, 0x19, 0x83, 0xac, 0x7b, 0x58, 0xe6, 0x69, 0x83, 0x89, 0xd2, 0xf6, 0xd9, 0x48, 0x5c, + 0xcf, 0x12, 0x47, 0x99, 0x88, 0x70, 0x33, 0x3f, 0x6a, 0x85, 0xb0, 0xaa, 0x86, 0xfc, 0xa4, 0x1a, + 0x04, 0x6c, 0x63, 0x64, 0x37, 0xb3, 0xec, 0xbf, 0x67, 0x44, 0x38, 0x54, 0xd3, 0x16, 0x2e, 0xc0, + 0x90, 0x75, 0x11, 0x3e, 0x7d, 0x75, 0x8c, 0x1e, 0x3a, 0x46, 0x7f, 0x3a, 0x46, 0x3f, 0x7a, 0x46, + 0x0e, 0x3d, 0x23, 0xdf, 0x3d, 0x23, 0xcf, 0x77, 0xbb, 0xbd, 0xca, 0xeb, 0x6c, 0xf8, 0xea, 0x4f, + 0xfd, 0x4e, 0x43, 0x5a, 0xee, 0xfd, 0x99, 0xd6, 0x33, 0x73, 0x2c, 0xfc, 0xf6, 0x37, 0x00, 0x00, + 0xff, 0xff, 0xaa, 0x61, 0x3b, 0xc6, 0x93, 0x01, 0x00, 0x00, +} + +func (m *Txs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Txs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HaveTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HaveTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HaveTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TxKey) > 0 { + i -= len(m.TxKey) + copy(dAtA[i:], m.TxKey) + i = encodeVarintTypes(dAtA, i, uint64(len(m.TxKey))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResetRoute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResetRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResetRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Sum != nil { + { + size := m.Sum.Size() + i -= size + if _, err := m.Sum.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Message_Txs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_Txs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Txs != nil { + { + size, err := m.Txs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} +func (m *Message_HaveTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_HaveTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.HaveTx != nil { + { + size, err := m.HaveTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *Message_ResetRoute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ResetRoute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ResetRoute != nil { + { + size, err := m.ResetRoute.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Txs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *HaveTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TxKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResetRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Message) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + n += m.Sum.Size() + } + return n +} + +func (m *Message_Txs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Txs != nil { + l = m.Txs.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_HaveTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HaveTx != nil { + l = m.HaveTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ResetRoute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResetRoute != nil { + l = m.ResetRoute.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Txs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Txs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Txs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HaveTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HaveTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HaveTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxKey = append(m.TxKey[:0], dAtA[iNdEx:postIndex]...) + if m.TxKey == nil { + m.TxKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResetRoute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResetRoute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResetRoute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Txs{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_Txs{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HaveTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &HaveTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_HaveTx{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResetRoute", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResetRoute{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ResetRoute{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTypes + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTypes + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTypes + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") +) diff --git a/config/config.go b/config/config.go index 9047d0ede5..42d26021e8 100644 --- a/config/config.go +++ b/config/config.go @@ -928,6 +928,22 @@ type MempoolConfig struct { // performance results using the default P2P configuration. ExperimentalMaxGossipConnectionsToPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_persistent_peers"` ExperimentalMaxGossipConnectionsToNonPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_non_persistent_peers"` + + // When using the Flood mempool type, enable the DOG gossip protocol to + // reduce network bandwidth on transaction dissemination (for details, see + // specs/mempool/gossip/). + DOGProtocolEnabled bool `mapstructure:"dog_protocol_enabled"` + + // Used by the DOG protocol to set the desired transaction redundancy level + // for the node. For example, a redundancy of 0.5 means that, for every two + // first-time transactions received, the node will receive one duplicate + // transaction. + DOGTargetRedundancy float64 `mapstructure:"dog_target_redundancy"` + + // Used by the DOG protocol to set how often it will attempt to adjust the + // redundancy level. The higher the value, the longer it will take the node + // to reduce bandwidth and converge to a stable redundancy level. + DOGAdjustInterval time.Duration `mapstructure:"dog_adjust_interval"` } // DefaultMempoolConfig returns a default configuration for the CometBFT mempool. @@ -946,6 +962,9 @@ func DefaultMempoolConfig() *MempoolConfig { CacheSize: 10000, ExperimentalMaxGossipConnectionsToNonPersistentPeers: 0, ExperimentalMaxGossipConnectionsToPersistentPeers: 0, + DOGProtocolEnabled: true, + DOGTargetRedundancy: 1, + DOGAdjustInterval: 1000 * time.Millisecond, } } @@ -1007,6 +1026,28 @@ func (cfg *MempoolConfig) ValidateBasic() error { } } + // DOG gossip protocol + if cfg.Type != MempoolTypeFlood && cfg.DOGProtocolEnabled { + return cmterrors.ErrWrongField{ + Field: "dog_protocol_enabled", + Err: errors.New("DOG protocol only works with the Flood mempool type"), + } + } + if cfg.DOGProtocolEnabled && + (cfg.ExperimentalMaxGossipConnectionsToPersistentPeers > 0 || + cfg.ExperimentalMaxGossipConnectionsToNonPersistentPeers > 0) { + return cmterrors.ErrWrongField{ + Field: "dog_protocol_enabled", + Err: errors.New("DOG protocol is not compatible with experimental_max_gossip_connections_to_*_peers feature"), + } + } + if cfg.DOGTargetRedundancy <= 0 { + return cmterrors.ErrNegativeOrZeroField{Field: "target_redundancy"} + } + if cfg.DOGAdjustInterval.Milliseconds() < 1000 { + return errors.New("DOG protocol requires the adjustment interval to be higher than 1000ms") + } + return nil } diff --git a/config/config.toml.tpl b/config/config.toml.tpl index 785f7133ce..ef935324c3 100644 --- a/config/config.toml.tpl +++ b/config/config.toml.tpl @@ -392,6 +392,21 @@ keep-invalid-txs-in-cache = {{ .Mempool.KeepInvalidTxsInCache }} experimental_max_gossip_connections_to_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers }} experimental_max_gossip_connections_to_non_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers }} +# When using the Flood mempool type, enable the DOG gossip protocol to +# reduce network bandwidth on transaction dissemination (for details, see +# specs/mempool/gossip/). +dog_protocol_enabled = {{ .Mempool.DOGProtocolEnabled }} + +# Used by the DOG protocol to set the desired transaction redundancy level +# for the node. For example, redundancy of 0.5 means that, for every two first-time +# transactions received, the node will receive one duplicate transaction. +dog_target_redundancy = {{ .Mempool.DOGTargetRedundancy }} + +# Used by the DOG protocol to set how often it will attempt to adjust the +# redundancy level. The higher the value, the longer it will take the node +# to reduce bandwidth and converge to a stable redundancy level. +dog_adjust_interval = "{{ .Mempool.DOGAdjustInterval }}" + ####################################################### ### State Sync Configuration Options ### ####################################################### diff --git a/config/config_test.go b/config/config_test.go index c450cfe6f3..85f4fadc28 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -119,6 +119,7 @@ func TestMempoolConfigValidateBasic(t *testing.T) { reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString("invalid") require.Error(t, cfg.ValidateBasic()) reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString(config.MempoolTypeFlood) + reflect.ValueOf(cfg).Elem().FieldByName("DOGProtocolEnabled").SetBool(false) setFieldTo := func(fieldName string, value int64) { reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(value) @@ -163,6 +164,14 @@ func TestMempoolConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) setFieldTo(name, 1) // reset } + + // with DOG protocol only works with Flood and no MaxGossip feature. + reflect.ValueOf(cfg).Elem().FieldByName("DOGProtocolEnabled").SetBool(true) + require.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString(config.MempoolTypeFlood) + reflect.ValueOf(cfg).Elem().FieldByName("ExperimentalMaxGossipConnectionsToPersistentPeers").SetInt(0) + reflect.ValueOf(cfg).Elem().FieldByName("ExperimentalMaxGossipConnectionsToNonPersistentPeers").SetInt(0) + require.NoError(t, cfg.ValidateBasic()) } func TestStateSyncConfigValidateBasic(t *testing.T) { diff --git a/docs/references/config/config.toml.md b/docs/references/config/config.toml.md index d353d8a3e7..384f85bea2 100644 --- a/docs/references/config/config.toml.md +++ b/docs/references/config/config.toml.md @@ -1441,6 +1441,52 @@ to limit broadcasts to persistent peer nodes. For non-persistent peers, if enabled, a value of 10 is recommended based on experimental performance results using the default P2P configuration. +### mempool.dog_protocol_enabled +```toml +dog_protocol_enabled = true +``` + +| Value type | boolean | +|:--------------------|:--------| +| **Possible values** | `false` | +| | `true` | + +When set to `true`, it enables the DOG [gossip protocol](../../../specs/mempool/gossip) to reduce redundant +messages during transaction dissemination. It only works with `mempool.type = "flood"`, and it's not +compatible `mempool.experimental_max_gossip_connections_to_*_peers`. + +### mempool.dog_target_redundancy +```toml +dog_target_redundancy = 1 +``` + +| Value type | real | +|:--------------------|:-------| +| **Possible values** | > 0 | + +Used by the DOG protocol to set the desired transaction redundancy level for the node. For example, +a redundancy of 0.5 means that, for every two first-time transactions received, the node will +receive one duplicate transaction. Zero redundancy is disabled because it could render the node +isolated from transaction data. + +Check out the issue [#4597](https://github.com/cometbft/cometbft/issues/4597) for discussions about +possible values. + +### mempool.dog_adjust_interval +```toml +dog_adjust_interval = 1000 +``` + +| Value type | integer | +|:--------------------|:-----------| +| **Possible values** | ≥ 1000 | + +Used by the DOG protocol to set how often it will attempt to adjust the redundancy level. The higher +the value, the longer it will take the node to reduce bandwidth and converge to a stable redundancy +level. In networks with high latency between nodes (> 500ms), it could be necessary to increase the +default value, as explained in the +[spec](https://github.com/cometbft/cometbft/blob/13d852b43068d2e19de0f307d2bc399b30c0ae68/spec/mempool/gossip/dog.md#when-to-adjust). + ## State synchronization State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine snapshot from peers instead of fetching and replaying historical blocks. It requires some peers in the network to take and serve state diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 87f5f894c9..0e66f72232 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -39,14 +39,15 @@ func (emptyMempool) Update( ) error { return nil } -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn() error { return nil } -func (emptyMempool) Contains(types.TxKey) bool { return false } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) TxsBytes() int64 { return 0 } -func (emptyMempool) TxsFront() *clist.CElement { return nil } -func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn() error { return nil } +func (emptyMempool) Contains(types.TxKey) bool { return false } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) TxsBytes() int64 { return 0 } +func (emptyMempool) TxsFront() *clist.CElement { return nil } +func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } +func (emptyMempool) GetSenders(types.TxKey) ([]p2p.ID, error) { return nil, nil } // ----------------------------------------------------------------------------- // mockProxyApp uses ABCIResponses to give the right results. diff --git a/mempool/clist_mempool.go b/mempool/clist_mempool.go index 7e3b4014fd..a34621fed9 100644 --- a/mempool/clist_mempool.go +++ b/mempool/clist_mempool.go @@ -20,7 +20,10 @@ import ( "github.com/cometbft/cometbft/types" ) -const defaultLane = "default" +const ( + noSender = p2p.ID("") + defaultLane = "default" +) // CListMempool is an ordered in-memory pool for transactions before they are // proposed in a consensus round. Transaction validity is checked using the @@ -152,6 +155,18 @@ func NewCListMempool( return mp } +func (mem *CListMempool) GetSenders(txKey types.TxKey) ([]p2p.ID, error) { + mem.txsMtx.RLock() + defer mem.txsMtx.RUnlock() + + elem, ok := mem.txsMap[txKey] + if !ok { + return nil, ErrTxNotFound + } + memTx := elem.Value.(*mempoolTx) + return memTx.Senders(), nil +} + func (mem *CListMempool) addToCache(tx types.Tx) bool { return mem.cache.Push(tx) } diff --git a/mempool/mempool.go b/mempool/mempool.go index 8ae3462c82..a2099423f9 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -11,7 +11,8 @@ import ( ) const ( - MempoolChannel = byte(0x30) + MempoolChannel = byte(0x30) + MempoolControlChannel = byte(0x31) // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind. PeerCatchupSleepIntervalMS = 100 @@ -104,6 +105,9 @@ type Mempool interface { // SizeBytes returns the total size of all txs in the mempool. SizeBytes() int64 + + // GetSenders returns the list of node IDs from which we receive the given transaction. + GetSenders(txKey types.TxKey) ([]p2p.ID, error) } // PreCheckFunc is an optional filter executed before CheckTx and rejects @@ -166,6 +170,9 @@ type Entry interface { // IsSender returns whether we received the transaction from the given peer ID. IsSender(peerID p2p.ID) bool + + // Senders returns the list of registered peers that sent us the transaction. + Senders() []p2p.ID } // An iterator is used to iterate through the mempool entries. diff --git a/mempool/mempoolTx.go b/mempool/mempoolTx.go index a4b04e6a8e..7517af1cbe 100644 --- a/mempool/mempoolTx.go +++ b/mempool/mempoolTx.go @@ -50,3 +50,12 @@ func (memTx *mempoolTx) addSender(peerID p2p.ID) bool { } return false } + +func (memTx *mempoolTx) Senders() []p2p.ID { + senders := make([]p2p.ID, 0) + memTx.senders.Range(func(key, _ any) bool { + senders = append(senders, key.(p2p.ID)) + return true + }) + return senders +} diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index 3b361d7097..8c800b74c9 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -90,6 +90,24 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "active_outbound_connections", Help: "Number of connections being actively used for gossiping transactions (experimental feature).", }, labels).With(labelsAndValues...), + RecheckDurationSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "recheck_duration_seconds", + Help: "Cumulative time spent rechecking transactions", + }, labels).With(labelsAndValues...), + DisabledRoutes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "disabled_routes", + Help: "Number of disabled routes.", + }, labels).With(labelsAndValues...), + Redundancy: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "redundancy", + Help: "Redundancy level.", + }, labels).With(labelsAndValues...), } } @@ -107,5 +125,8 @@ func NopMetrics() *Metrics { RecheckTimes: discard.NewCounter(), AlreadyReceivedTxs: discard.NewCounter(), ActiveOutboundConnections: discard.NewGauge(), + RecheckDurationSeconds: discard.NewGauge(), + DisabledRoutes: discard.NewGauge(), + Redundancy: discard.NewGauge(), } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 91b19be0fb..590a042627 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -67,4 +67,13 @@ type Metrics struct { // Number of connections being actively used for gossiping transactions // (experimental feature). ActiveOutboundConnections metrics.Gauge + + // Cumulative time spent rechecking transactions + RecheckDurationSeconds metrics.Gauge + + // Number of disabled routes. + DisabledRoutes metrics.Gauge + + // Redundancy level. + Redundancy metrics.Gauge } diff --git a/mempool/mocks/mempool.go b/mempool/mocks/mempool.go index beb776011d..f5f41d21e6 100644 --- a/mempool/mocks/mempool.go +++ b/mempool/mocks/mempool.go @@ -96,6 +96,36 @@ func (_m *Mempool) FlushAppConn() error { return r0 } +// GetSenders provides a mock function with given fields: txKey +func (_m *Mempool) GetSenders(txKey types.TxKey) ([]p2p.ID, error) { + ret := _m.Called(txKey) + + if len(ret) == 0 { + panic("no return value specified for GetSenders") + } + + var r0 []p2p.ID + var r1 error + if rf, ok := ret.Get(0).(func(types.TxKey) ([]p2p.ID, error)); ok { + return rf(txKey) + } + if rf, ok := ret.Get(0).(func(types.TxKey) []p2p.ID); ok { + r0 = rf(txKey) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]p2p.ID) + } + } + + if rf, ok := ret.Get(1).(func(types.TxKey) error); ok { + r1 = rf(txKey) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // GetTxByHash provides a mock function with given fields: hash func (_m *Mempool) GetTxByHash(hash []byte) types.Tx { ret := _m.Called(hash) diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go index b7909da04b..9ce0f0139b 100644 --- a/mempool/nop_mempool.go +++ b/mempool/nop_mempool.go @@ -80,6 +80,9 @@ func (*NopMempool) Size() int { return 0 } // SizeBytes always returns 0. func (*NopMempool) SizeBytes() int64 { return 0 } +// GetSenders always returns nil. +func (*NopMempool) GetSenders(_ types.TxKey) ([]p2p.ID, error) { return nil, nil } + // NopMempoolReactor is a mempool reactor that does nothing. type NopMempoolReactor struct { service.BaseService diff --git a/mempool/reactor.go b/mempool/reactor.go index 578694c682..f8cc914e86 100644 --- a/mempool/reactor.go +++ b/mempool/reactor.go @@ -9,13 +9,21 @@ import ( "golang.org/x/sync/semaphore" - protomem "github.com/cometbft/cometbft/api/cometbft/mempool/v1" + abcicli "github.com/cometbft/cometbft/abci/client" + protomem "github.com/cometbft/cometbft/api/cometbft/mempool/v2" cfg "github.com/cometbft/cometbft/config" + cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" + cmtsync "github.com/cometbft/cometbft/libs/sync" "github.com/cometbft/cometbft/p2p" "github.com/cometbft/cometbft/types" ) +// A number in the open interval (0, 100) representing a percentage of +// config.DOGTargetRedundancy. In the DOG protocol, it defines acceptable lower +// and upper bounds for redundancy levels as a deviation from the target value. +const targetRedundancyDeltaPercent = 10 + // Reactor handles mempool tx broadcasting amongst peers. // It maintains a map from peer ID to counter, to prevent gossiping txs to the // peers you received it from. @@ -27,6 +35,10 @@ type Reactor struct { waitSync atomic.Bool waitSyncCh chan struct{} // for signaling when to start receiving and sending txs + // DOG protocol: Control enabled/disabled routes for disseminating txs. + router *gossipRouter + redundancyControl *redundancyControl + // Semaphores to keep track of how many connections to peers are active for broadcasting // transactions. Each semaphore has a capacity that puts an upper bound on the number of // connections for different groups of peers. @@ -66,6 +78,11 @@ func (memR *Reactor) OnStart() error { if !memR.config.Broadcast { memR.Logger.Info("Tx broadcasting is disabled") } + if memR.config.DOGProtocolEnabled { + memR.router = newGossipRouter() + memR.redundancyControl = newRedundancyControl(memR.config) + go memR.redundancyControl.controlLoop(memR) + } return nil } @@ -79,6 +96,11 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { }, } + key := types.Tx(largestTx).Key() + haveTxMsg := protomem.Message{ + Sum: &protomem.Message_HaveTx{HaveTx: &protomem.HaveTx{TxKey: key[:]}}, + } + return []*p2p.ChannelDescriptor{ { ID: MempoolChannel, @@ -86,6 +108,12 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { RecvMessageCapacity: batchMsg.Size(), MessageType: &protomem.Message{}, }, + { + ID: MempoolControlChannel, + Priority: 10, + RecvMessageCapacity: haveTxMsg.Size(), + MessageType: &protomem.Message{}, + }, } } @@ -132,45 +160,137 @@ func (memR *Reactor) AddPeer(peer p2p.Peer) { } } +func (memR *Reactor) RemovePeer(peer p2p.Peer, _ any) { + if memR.router != nil { + // Remove all routes with peer as source or target and immediately + // adjust redundancy. + memR.router.resetRoutes(peer.ID()) + memR.redundancyControl.triggerAdjustment(memR) + memR.mempool.metrics.DisabledRoutes.Set(float64(memR.router.numRoutes())) + } +} + // Receive implements Reactor. // It adds any received transactions to the mempool. func (memR *Reactor) Receive(e p2p.Envelope) { - memR.Logger.Debug("Receive", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) - switch msg := e.Message.(type) { - case *protomem.Txs: - if memR.WaitSync() { - memR.Logger.Debug("Ignored message received while syncing", "msg", msg) - return + if memR.WaitSync() { + memR.Logger.Debug("Ignore message received while syncing", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + return + } + + senderID := e.Src.ID() + + switch e.ChannelID { + case MempoolControlChannel: + switch msg := e.Message.(type) { + case *protomem.HaveTx: + txKey := types.TxKey(msg.GetTxKey()) + if len(txKey) == 0 { + memR.Logger.Error("Received empty HaveTx message from peer", "src", e.Src.ID()) + return + } + memR.Logger.Debug("Received HaveTx", "from", senderID, "txKey", txKey) + + if memR.router != nil { + // Get tx's list of senders. + senders, err := memR.mempool.GetSenders(txKey) + if err != nil || len(senders) == 0 || senders[0] == noSender { + // It is possible that tx got removed from the mempool. + memR.Logger.Debug("Received HaveTx but failed to get sender", "tx", txKey.Hash(), "err", err) + return + } + // Disable route with tx's first sender as source and peer as target. + memR.router.disableRoute(senders[0], senderID) + + memR.Logger.Debug("Disable route", "source", senders[0], "target", senderID) + memR.mempool.metrics.DisabledRoutes.Set(float64(memR.router.numRoutes())) + } + + case *protomem.ResetRoute: + memR.Logger.Debug("Received Reset", "from", senderID) + if memR.router != nil { + memR.router.resetRandomRouteWithTarget(senderID) + memR.mempool.metrics.DisabledRoutes.Set(float64(memR.router.numRoutes())) + } + + default: + memR.Logger.Error("Unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) } - protoTxs := msg.GetTxs() - if len(protoTxs) == 0 { - memR.Logger.Error("Received empty Txs message from peer", "src", e.Src) + case MempoolChannel: + switch msg := e.Message.(type) { + case *protomem.Txs: + protoTxs := msg.GetTxs() + if len(protoTxs) == 0 { + memR.Logger.Error("Received empty Txs message from peer", "src", e.Src.ID()) + return + } + + memR.Logger.Debug("Received Txs", "from", senderID, "msg", e.Message) + for _, txBytes := range protoTxs { + _, _ = memR.TryAddTx(types.Tx(txBytes), e.Src) + } + + default: + memR.Logger.Error("Unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) return } + default: + memR.Logger.Error("Unknown channel", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) + memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message on channel: %T", e.Message)) + } - for _, txBytes := range protoTxs { - tx := types.Tx(txBytes) - _, err := memR.mempool.CheckTx(tx, e.Src.ID()) - if err != nil { - switch { - case errors.Is(err, ErrTxInCache): - memR.Logger.Debug("Tx already exists in cache", "tx", tx.Hash()) - case errors.As(err, &ErrMempoolIsFull{}): - // using debug level to avoid flooding when traffic is high - memR.Logger.Debug(err.Error()) - default: - memR.Logger.Info("Could not check tx", "tx", tx.Hash(), "err", err) + // broadcasting happens from go routines per peer +} + +// TryAddTx attempts to add an incoming transaction to the mempool. +// When the sender is nil, it means the transaction comes from an RPC endpoint. +func (memR *Reactor) TryAddTx(tx types.Tx, sender p2p.Peer) (*abcicli.ReqRes, error) { + senderID := noSender + if sender != nil { + senderID = sender.ID() + } + + reqRes, err := memR.mempool.CheckTx(tx, senderID) + if err != nil { + txKey := tx.Key() + switch { + case errors.Is(err, ErrTxInCache): + memR.Logger.Debug("Tx already exists in cache", "tx", txKey.Hash(), "sender", senderID) + if memR.redundancyControl != nil { + memR.redundancyControl.incDuplicateTxs() + if memR.redundancyControl.isHaveTxBlocked() { + return nil, err + } + ok := sender.Send(p2p.Envelope{ChannelID: MempoolControlChannel, Message: &protomem.HaveTx{TxKey: txKey[:]}}) + if !ok { + memR.Logger.Error("Failed to send HaveTx message", "peer", senderID, "txKey", txKey) + } else { + memR.Logger.Debug("Sent HaveTx message", "tx", txKey.Hash(), "peer", senderID) + // Block HaveTx and restart timer, during which time, sending HaveTx is not allowed. + memR.redundancyControl.blockHaveTx() } } + return nil, err + + case errors.As(err, &ErrMempoolIsFull{}): + // using debug level to avoid flooding when traffic is high + memR.Logger.Debug(err.Error()) + return nil, err + + default: + memR.Logger.Info("Could not check tx", "tx", txKey.Hash(), "sender", senderID, "err", err) + return nil, err } - default: - memR.Logger.Error("Unknown message type", "src", e.Src, "chId", e.ChannelID, "msg", e.Message) - memR.Switch.StopPeerForError(e.Src, fmt.Errorf("mempool cannot handle message of type: %T", e.Message)) - return } - // broadcasting happens from go routines per peer + if memR.redundancyControl != nil { + memR.redundancyControl.incFirstTimeTxs() + } + + return reqRes, nil } func (memR *Reactor) EnableInOutTxs() { @@ -261,18 +381,35 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { // We are paying the cost of computing the transaction hash in // any case, even when logger level > debug. So it only once. // See: https://github.com/cometbft/cometbft/issues/4167 - txHash := entry.Tx().Hash() + txKey := entry.Tx().Key() + txHash := txKey.Hash() - // Do not send this transaction if we receive it from peer. if entry.IsSender(peer.ID()) { + // Do not send this transaction if we receive it from peer. memR.Logger.Debug("Skipping transaction, peer is sender", - "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) + "tx", txHash, "peer", peer.ID()) continue } + if memR.router != nil { + // Do not send if the route from the first sender to peer is disabled. + senders := entry.Senders() + if len(senders) > 0 && memR.router.isRouteDisabled(senders[0], peer.ID()) { + memR.Logger.Debug("Disabled route: do not send transaction to peer", + "tx", txHash, "peer", peer.ID(), "senders", senders) + continue + } + } + for { + // The entry may have been removed from the mempool since it was + // chosen at the beginning of the loop. Skip it if that's the case. + if !memR.mempool.Contains(txKey) { + break + } + memR.Logger.Debug("Sending transaction to peer", - "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) + "tx", txHash, "peer", peer.ID()) success := peer.Send(p2p.Envelope{ ChannelID: MempoolChannel, @@ -283,7 +420,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } memR.Logger.Debug("Failed sending transaction to peer", - "tx", log.NewLazySprintf("%X", txHash), "peer", peer.ID()) + "tx", txHash, "peer", peer.ID()) select { case <-time.After(PeerCatchupSleepIntervalMS * time.Millisecond): @@ -295,3 +432,216 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { } } } + +type gossipRouter struct { + mtx cmtsync.RWMutex + // A set of `source -> target` routes that are disabled for disseminating + // transactions, where source and target are node IDs. + disabledRoutes map[p2p.ID]map[p2p.ID]struct{} +} + +func newGossipRouter() *gossipRouter { + return &gossipRouter{ + disabledRoutes: make(map[p2p.ID]map[p2p.ID]struct{}), + } +} + +// disableRoute marks the route `source -> target` as disabled. +func (r *gossipRouter) disableRoute(source, target p2p.ID) { + if source == noSender || target == noSender { + // TODO: this shouldn't happen + return + } + + r.mtx.Lock() + defer r.mtx.Unlock() + + targets, ok := r.disabledRoutes[source] + if !ok { + targets = make(map[p2p.ID]struct{}) + } + targets[target] = struct{}{} + r.disabledRoutes[source] = targets +} + +// isRouteEnabled returns true iff the route source->target is disabled. +func (r *gossipRouter) isRouteDisabled(source, target p2p.ID) bool { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if targets, ok := r.disabledRoutes[source]; ok { + if _, ok := targets[target]; ok { + return true + } + } + return false +} + +// resetRoutes removes all disabled routes with peerID as source or target. +func (r *gossipRouter) resetRoutes(peerID p2p.ID) { + r.mtx.Lock() + defer r.mtx.Unlock() + + // Remove peer as source. + delete(r.disabledRoutes, peerID) + + // Remove peer as target. + for _, targets := range r.disabledRoutes { + delete(targets, peerID) + } +} + +// resetRandomRouteWithTarget removes a random disabled route that has the given +// target. +func (r *gossipRouter) resetRandomRouteWithTarget(target p2p.ID) { + r.mtx.Lock() + defer r.mtx.Unlock() + + sourcesWithTarget := make([]p2p.ID, 0) + for s, targets := range r.disabledRoutes { + if _, ok := targets[target]; ok { + sourcesWithTarget = append(sourcesWithTarget, s) + } + } + + if len(sourcesWithTarget) > 0 { + randomSource := sourcesWithTarget[cmtrand.Intn(len(sourcesWithTarget))] + delete(r.disabledRoutes[randomSource], target) + } +} + +// numRoutes returns the number of disabled routes in this node. Used for +// metrics. +func (r *gossipRouter) numRoutes() int { + r.mtx.RLock() + defer r.mtx.RUnlock() + + count := 0 + for _, targets := range r.disabledRoutes { + count += len(targets) + } + return count +} + +type redundancyControl struct { + // Pre-computed upper and lower bounds of accepted redundancy. + lowerBound float64 + upperBound float64 + + // Timer to adjust redundancy periodically. + adjustTicker *time.Ticker + adjustInterval time.Duration + + // Counters for calculating the redundancy level. + mtx cmtsync.RWMutex + firstTimeTxs int64 // number of transactions received for the first time + duplicateTxs int64 // number of duplicate transactions + + // If true, do not send HaveTx messages. + haveTxBlocked atomic.Bool +} + +func newRedundancyControl(config *cfg.MempoolConfig) *redundancyControl { + adjustInterval := config.DOGAdjustInterval + targetRedundancyDeltaAbs := config.DOGTargetRedundancy * targetRedundancyDeltaPercent / 100 + return &redundancyControl{ + lowerBound: config.DOGTargetRedundancy - targetRedundancyDeltaAbs, + upperBound: config.DOGTargetRedundancy + targetRedundancyDeltaAbs, + adjustTicker: time.NewTicker(adjustInterval), + adjustInterval: adjustInterval, + } +} + +func (rc *redundancyControl) adjustRedundancy(memR *Reactor) { + // Compute current redundancy level and reset transaction counters. + redundancy := rc.currentRedundancy() + if redundancy < 0 { + // There were no transactions during the last iteration. Do not adjust. + return + } + + // If redundancy level is low, ask a random peer for more transactions. + if redundancy < rc.lowerBound { + memR.Logger.Debug("TX redundancy BELOW lower limit: increase it (send Reset)", "redundancy", redundancy) + // Send Reset message to random peer. + randomPeer := memR.Switch.Peers().Random() + if randomPeer != nil { + ok := randomPeer.Send(p2p.Envelope{ChannelID: MempoolControlChannel, Message: &protomem.ResetRoute{}}) + if !ok { + memR.Logger.Error("Failed to send Reset message", "peer", randomPeer.ID()) + } + } + } + + // If redundancy level is high, ask peers for less txs. + if redundancy >= rc.upperBound { + memR.Logger.Debug("TX redundancy ABOVE upper limit: decrease it (unblock HaveTx)", "redundancy", redundancy) + // Unblock HaveTx messages. + rc.haveTxBlocked.Store(false) + } + + // Update metrics. + memR.mempool.metrics.Redundancy.Set(redundancy) +} + +func (rc *redundancyControl) controlLoop(memR *Reactor) { + for { + select { + case <-rc.adjustTicker.C: + rc.adjustRedundancy(memR) + case <-memR.Quit(): + return + } + } +} + +// currentRedundancy returns the current redundancy level and resets the +// counters. If there are no transactions, return -1. If firstTimeTxs is 0, +// return upperBound. If duplicateTxs is 0, return 0. +func (rc *redundancyControl) currentRedundancy() float64 { + rc.mtx.Lock() + defer rc.mtx.Unlock() + + if rc.firstTimeTxs+rc.duplicateTxs == 0 { + return -1 + } + + redundancy := rc.upperBound + if rc.firstTimeTxs != 0 { + redundancy = float64(rc.duplicateTxs) / float64(rc.firstTimeTxs) + } + + // Reset counters. + rc.firstTimeTxs, rc.duplicateTxs = 0, 0 + + return redundancy +} + +func (rc *redundancyControl) incDuplicateTxs() { + rc.mtx.Lock() + rc.duplicateTxs++ + rc.mtx.Unlock() +} + +func (rc *redundancyControl) incFirstTimeTxs() { + rc.mtx.Lock() + rc.firstTimeTxs++ + rc.mtx.Unlock() +} + +func (rc *redundancyControl) isHaveTxBlocked() bool { + return rc.haveTxBlocked.Load() +} + +// blockHaveTx blocks sending HaveTx messages and restarts the timer that +// adjusts redundancy. +func (rc *redundancyControl) blockHaveTx() { + rc.haveTxBlocked.Store(true) + // Wait until next adjustment to check if HaveTx messages should be unblocked. + rc.adjustTicker.Reset(rc.adjustInterval) +} + +func (rc *redundancyControl) triggerAdjustment(memR *Reactor) { + rc.adjustRedundancy(memR) + rc.adjustTicker.Reset(rc.adjustInterval) +} diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 58f33d0442..7b16c7aad9 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -14,7 +14,7 @@ import ( "github.com/cometbft/cometbft/abci/example/kvstore" abci "github.com/cometbft/cometbft/abci/types" - memproto "github.com/cometbft/cometbft/api/cometbft/mempool/v1" + memproto "github.com/cometbft/cometbft/api/cometbft/mempool/v2" cfg "github.com/cometbft/cometbft/config" cmtrand "github.com/cometbft/cometbft/internal/rand" "github.com/cometbft/cometbft/libs/log" @@ -632,3 +632,176 @@ func TestMempoolVectors(t *testing.T) { require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) } } + +// Verify that counting of duplicates and first time transactions work +// The test sends transactions from node2 to node1 twice. +// The second time they will get rejected. +func TestDOGTransactionCount(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.DOGProtocolEnabled = true + + // Put the interval to a higher value to make sure the values don't get reset + config.Mempool.DOGAdjustInterval = 15 * time.Second + reactors, _ := makeAndConnectReactors(config, 2, nil) + + // create random transactions + txs := newUniqueTxs(numTxs) + secondNodeID := reactors[1].Switch.NodeInfo().ID() + secondNode := reactors[0].Switch.Peers().Get(secondNodeID) + + for _, tx := range txs { + _, err := reactors[0].TryAddTx(tx, secondNode) + require.NoError(t, err) + } + + require.Equal(t, int64(len(txs)), reactors[0].redundancyControl.firstTimeTxs) + for _, tx := range txs { + _, err := reactors[0].TryAddTx(tx, secondNode) + // The transaction is in cache, hence the Error + require.Error(t, err) + } + require.Equal(t, int64(len(txs)), reactors[0].redundancyControl.duplicateTxs) + + reactors[0].redundancyControl.triggerAdjustment(reactors[0]) + // This is done to give enough time for the route changes to take effect + // If the test starts failing, revisit this value + time.Sleep(100 * time.Millisecond) + + reactors[0].redundancyControl.mtx.RLock() + dupTx := reactors[0].redundancyControl.duplicateTxs + firstTimeTx := reactors[0].redundancyControl.firstTimeTxs + reactors[0].redundancyControl.mtx.RUnlock() + + // Now the counters should be reset + require.Equal(t, int64(0), dupTx) + require.Equal(t, int64(0), firstTimeTx) +} + +// Testing the disabled route between two nodes +// AS the description of DOG in the issue: +// The core idea of the protocol is the following. +// Consider a node A that receives from node B a transaction +// that it already has. Let's assume B itself had received +// the transaction from C. The fact that A received from B +// a transaction it already has means that there must exist +// a cycle in the network topology. +// Therefore, a tells B to stop sending transactions B would be getting from C +// (i.e. A tells B to disable route C → A → B). +// We then reduce the redundancy level forcing A to tell B to re-enable the routes. +func TestDOGDisabledRoute(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.DOGProtocolEnabled = true + + // Put the interval to a higher value to make sure the values don't get reset + config.Mempool.DOGAdjustInterval = 35 * time.Second + reactors, _ := makeAndConnectReactors(config, 3, nil) + + secondNodeID := reactors[1].Switch.NodeInfo().ID() + secondNode := reactors[0].Switch.Peers().Get(secondNodeID) + secondNodeFromThird := reactors[2].Switch.Peers().Get(secondNodeID) + + thirdNodeID := reactors[2].Switch.NodeInfo().ID() + thirdNodeFromFirst := reactors[0].Switch.Peers().Get(thirdNodeID) + + firstNodeID := reactors[0].Switch.NodeInfo().ID() + firstNodeFromThird := reactors[2].Switch.Peers().Get(firstNodeID) + + // create random transactions + txs := newUniqueTxs(numTxs) + // Add transactions to node 3 from node 2 + // node3.senders[tx] = node2 + for _, tx := range txs { + _, err := reactors[2].TryAddTx(tx, secondNodeFromThird) + require.NoError(t, err) + } + + // Add the same transactions to node 1 from node 2 + for _, tx := range txs { + _, err := reactors[0].TryAddTx(tx, secondNode) + require.NoError(t, err) + } + + // Trying to add the same transactions node 1 has received + // from node 2, but this time from node 3 + // Node 1 should now ask node 3 to disable the route between + // a node that has sent this tx to node 3(node 2) and node1 + for _, tx := range txs { + _, err := reactors[0].TryAddTx(tx, thirdNodeFromFirst) + // The transaction is in cache, hence the Error + require.ErrorIs(t, err, ErrTxInCache) + } + + reactors[0].redundancyControl.triggerAdjustment(reactors[0]) + // Wait for the redundancy adjustment to kick in + // If the test starts failing, revisit this value + time.Sleep(100 * time.Second) + + reactors[2].router.mtx.RLock() + // Make sure that Node 3 has at least one disabled route + require.Greater(t, len(reactors[2].router.disabledRoutes), 0) + + require.True(t, reactors[2].router.isRouteDisabled(secondNodeFromThird.ID(), firstNodeFromThird.ID())) + reactors[2].router.mtx.RUnlock() + + // This will force Node 3 to delete all disabled routes + reactors[2].Switch.StopPeerGracefully(secondNode) + + // The route should not be there + require.False(t, reactors[2].router.isRouteDisabled(secondNodeFromThird.ID(), firstNodeFromThird.ID())) +} + +// When a peer disconnects we want to remove all disabled route info +// for that peer only. +func TestDOGRemoveDisabledRoutesOnDisconnect(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.DOGProtocolEnabled = true + + reactors, _ := makeAndConnectReactors(config, 4, nil) + + fourthNodeID := reactors[3].Switch.NodeInfo().ID() + + secondNodeID := reactors[1].Switch.NodeInfo().ID() + secondNode := reactors[0].Switch.Peers().Get(secondNodeID) + + thirdNodeID := reactors[2].Switch.NodeInfo().ID() + + reactors[0].router.disableRoute(secondNodeID, fourthNodeID) + reactors[0].router.disableRoute(thirdNodeID, fourthNodeID) + reactors[0].router.disableRoute(thirdNodeID, secondNodeID) + + require.True(t, reactors[0].router.isRouteDisabled(secondNodeID, fourthNodeID)) + require.True(t, reactors[0].router.isRouteDisabled(thirdNodeID, fourthNodeID)) + require.True(t, reactors[0].router.isRouteDisabled(thirdNodeID, secondNodeID)) + + reactors[0].Switch.StopPeerGracefully(secondNode) + + require.False(t, reactors[0].router.isRouteDisabled(secondNodeID, fourthNodeID)) + require.False(t, reactors[0].router.isRouteDisabled(thirdNodeID, secondNodeID)) + require.True(t, reactors[0].router.isRouteDisabled(thirdNodeID, fourthNodeID)) +} + +// Test redundancy values depending on Number of transactions. +func TestDOGTestRedundancyCalculation(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.DOGProtocolEnabled = true + config.Mempool.DOGTargetRedundancy = 0.5 + reactors, _ := makeAndConnectReactors(config, 1, nil) + + redundancy := reactors[0].redundancyControl.currentRedundancy() + require.Equal(t, redundancy, float64(-1)) + + reactors[0].redundancyControl.firstTimeTxs = 10 + reactors[0].redundancyControl.duplicateTxs = 0 + redundancy = reactors[0].redundancyControl.currentRedundancy() + require.Equal(t, redundancy, float64(0)) + + reactors[0].redundancyControl.duplicateTxs = 1000 + reactors[0].redundancyControl.firstTimeTxs = 10 + redundancy = reactors[0].redundancyControl.currentRedundancy() + require.Greater(t, redundancy, config.Mempool.DOGTargetRedundancy) + + reactors[0].redundancyControl.duplicateTxs = 1000 + reactors[0].redundancyControl.firstTimeTxs = 0 + redundancy = reactors[0].redundancyControl.currentRedundancy() + require.Equal(t, redundancy, reactors[0].redundancyControl.upperBound) +} diff --git a/mempool/types.go b/mempool/types.go index ee3f694b71..2c93e5f1ed 100644 --- a/mempool/types.go +++ b/mempool/types.go @@ -1,11 +1,13 @@ package mempool import ( - memprotos "github.com/cometbft/cometbft/api/cometbft/mempool/v1" + memprotos "github.com/cometbft/cometbft/api/cometbft/mempool/v2" "github.com/cometbft/cometbft/types" ) var ( _ types.Wrapper = &memprotos.Txs{} + _ types.Wrapper = &memprotos.HaveTx{} + _ types.Wrapper = &memprotos.ResetRoute{} _ types.Unwrapper = &memprotos.Message{} ) diff --git a/node/node.go b/node/node.go index 51eeee2200..162a42019a 100644 --- a/node/node.go +++ b/node/node.go @@ -1041,7 +1041,7 @@ func makeNodeInfo( Channels: []byte{ bc.BlocksyncChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, - mempl.MempoolChannel, + mempl.MempoolChannel, mempl.MempoolControlChannel, evidence.EvidenceChannel, statesync.SnapshotChannel, statesync.ChunkChannel, }, diff --git a/node/node_test.go b/node/node_test.go index dd041353e5..6141a243eb 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -475,7 +475,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { cr := p2pmock.NewReactor() cr.Channels = []*conn.ChannelDescriptor{ { - ID: byte(0x31), + ID: byte(0xff), Priority: 5, SendQueueCapacity: 100, RecvMessageCapacity: 100, @@ -513,6 +513,7 @@ func TestNodeNewNodeCustomReactors(t *testing.T) { channels := n.NodeInfo().(p2p.DefaultNodeInfo).Channels assert.Contains(t, channels, mempl.MempoolChannel) + assert.Contains(t, channels, mempl.MempoolControlChannel) assert.Contains(t, channels, cr.Channels[0].ID) } diff --git a/proto/README.md b/proto/README.md index 92940bfe9f..b81066e4ea 100644 --- a/proto/README.md +++ b/proto/README.md @@ -34,21 +34,21 @@ is published in packages suffixed with `.v1`. Earlier revisions of the definitions, where they differed, are provided alongside in `.v1beta`_N_ packages. The correspondence between package suffixes and releases is as follows: -| Domain | 0.34 | 0.37 | 0.38 | 1.0 | -|-----------------|-----------|-----------|-----------|------| -| `abci` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | -| `blocksync` | | `v1beta1` | `v1` | `v1` | -| `consensus` | `v1beta1` | `v1beta1` | `v1beta1` | `v1` | -| `crypto` | `v1` | `v1` | `v1` | `v1` | -| `libs/bits` | `v1` | `v1` | `v1` | `v1` | -| `mempool` | `v1` | `v1` | `v1` | `v1` | -| `p2p` | `v1` | `v1` | `v1` | `v1` | -| `privval` | `v1beta1` | `v1beta1` | `v1beta2` | `v1` | -| `rpc/grpc`[^1] | `v1beta1` | `v1beta2` | `v1beta3` | | -| `state` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | -| `statesync` | `v1` | `v1` | `v1` | `v1` | -| `types` | `v1beta1` | `v1beta2` | `v1` | `v1` | -| `version` | `v1` | `v1` | `v1` | `v1` | +| Domain | 0.34 | 0.37 | 0.38 | 1.0 | main | +| -------------- | --------- | --------- | --------- | ---- | ---- | +| `abci` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | `v1` | +| `blocksync` | | `v1beta1` | `v1` | `v1` | `v1` | +| `consensus` | `v1beta1` | `v1beta1` | `v1beta1` | `v1` | `v1` | +| `crypto` | `v1` | `v1` | `v1` | `v1` | `v1` | +| `libs/bits` | `v1` | `v1` | `v1` | `v1` | `v1` | +| `mempool` | `v1` | `v1` | `v1` | `v1` | `v2` | +| `p2p` | `v1` | `v1` | `v1` | `v1` | `v1` | +| `privval` | `v1beta1` | `v1beta1` | `v1beta2` | `v1` | `v1` | +| `rpc/grpc`[^1] | `v1beta1` | `v1beta2` | `v1beta3` | | | +| `state` | `v1beta1` | `v1beta2` | `v1beta3` | `v1` | `v1` | +| `statesync` | `v1` | `v1` | `v1` | `v1` | `v1` | +| `types` | `v1beta1` | `v1beta2` | `v1` | `v1` | `v1` | +| `version` | `v1` | `v1` | `v1` | `v1` | `v1` | [^1]: Retired in 1.0 diff --git a/proto/cometbft/mempool/v2/types.proto b/proto/cometbft/mempool/v2/types.proto new file mode 100644 index 0000000000..1f568cfd63 --- /dev/null +++ b/proto/cometbft/mempool/v2/types.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package cometbft.mempool.v2; + +option go_package = "github.com/cometbft/cometbft/api/cometbft/mempool/v2"; + +// Txs contains a list of transaction from the mempool. +message Txs { + repeated bytes txs = 1; +} + +// HaveTx is sent by the DOG protocol to signal a peer that the sender already +// has a transaction. +message HaveTx { + bytes tx_key = 1; +} + +// ResetRoute is sent by the DOG protocol to signal a peer to reset a (random) +// route to the sender. +message ResetRoute { +} + +// Message is an abstract mempool message. +message Message { + // Sum of all possible messages. + oneof sum { + Txs txs = 1; + HaveTx have_tx = 2; + ResetRoute reset_route = 3; + } +} diff --git a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json index e43d8a67b1..63f8a0da7e 100644 --- a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json +++ b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/main.json @@ -3201,7 +3201,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"mempool_Txs|v1_Txs|mempool_Tx|v1_Tx|v1_Message|v1_WantTx|v1_SeenTx|v1_HaveTx|v1_Reset\"}[$__rate_interval])/1024) by (job)", + "expr": "sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\"}[$__rate_interval])/1024) by (job)", "hide": false, "legendFormat": "{{job}}", "range": true, @@ -3298,7 +3298,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"mempool_Txs|v1_Txs|mempool_Tx|v1_Tx|v1_Message|v1_WantTx|v1_SeenTx|v1_HaveTx|v1_Reset\"}[$__rate_interval])/1024) by (job)", + "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\"}[$__rate_interval])/1024) by (job)", "legendFormat": "{{job}}", "range": true, "refId": "A" diff --git a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json index 818662f09c..26b5701bf8 100644 --- a/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json +++ b/test/e2e/monitoring/config-grafana/provisioning/dashboards-data/mempool_compact.json @@ -651,7 +651,7 @@ }, "disableTextWrap": false, "editorMode": "code", - "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_SeenTx|v1_WantTx\"}[$__rate_interval]))/1024", + "expr": "sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\"}[$__rate_interval]))/1024", "fullMetaSearch": false, "hide": false, "includeNullMetadata": true, @@ -813,7 +813,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "(sum(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_SeenTx|v1_WantTx\"}[$__rate_interval])) by (message_type)))/1024", + "expr": "(sum(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\"}[$__rate_interval])) by (message_type)))/1024", "hide": false, "legendFormat": "mempool_*", "range": true, @@ -1186,7 +1186,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "(sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_SeenTx|v1_WantTx\", job=\"validator001\"}[$__rate_interval])))/1024", + "expr": "(sum(rate(cometbft_p2p_message_send_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\", job=\"validator001\"}[$__rate_interval])))/1024", "legendFormat": "validator receiving all load", "range": true, "refId": "A" @@ -1197,7 +1197,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "(avg(rate(cometbft_p2p_message_send_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_SeenTx|v1_WantTx\", job!=\"validator001\"}[$__rate_interval])))/1024", + "expr": "(avg(rate(cometbft_p2p_message_send_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\", job!=\"validator001\"}[$__rate_interval])))/1024", "hide": false, "legendFormat": "avg. of other validators", "range": true, @@ -1336,7 +1336,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_WantTx|v1_SeenTx\", job=\"validator001\"}[$__rate_interval])))/1024", + "expr": "(sum(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\", job=\"validator001\"}[$__rate_interval])))/1024", "hide": false, "legendFormat": "validator receiving all load", "range": true, @@ -1348,7 +1348,7 @@ "uid": "$datasource" }, "editorMode": "code", - "expr": "(avg(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\"v1_Txs|v1_Message|v1_WantTx|v1_SeenTx\", job!=\"validator001\"}[$__rate_interval])))/1024", + "expr": "(avg(rate(cometbft_p2p_message_receive_bytes_total{message_type=~\".._Txs|.._Tx|.._WantTx|.._SeenTx|.._HaveTx|.._ResetRoute\", job!=\"validator001\"}[$__rate_interval])))/1024", "hide": false, "legendFormat": "avg. of other validators", "range": true, diff --git a/types/tx.go b/types/tx.go index 3cbcf3319d..11b00f7061 100644 --- a/types/tx.go +++ b/types/tx.go @@ -39,6 +39,10 @@ func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } +func (txKey TxKey) Hash() []byte { + return txKey[:] +} + // Txs is a slice of Tx. type Txs []Tx From 8c68b3684c48ee51b239c424a0b95c3407006efd Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Wed, 11 Dec 2024 18:19:45 +0100 Subject: [PATCH 12/14] fix(mempool): Decreased the sleep in `TestDOGDisabledRoutes` (#4646) A mistake in the tests got merged with the PR merging the DOG protocol into main. `TestDOGDisabledRoutes` was put asleep for 100s instead of 100ms. --- #### PR checklist - [ ] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --- mempool/reactor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index 7b16c7aad9..a3cf7f840e 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -734,7 +734,7 @@ func TestDOGDisabledRoute(t *testing.T) { reactors[0].redundancyControl.triggerAdjustment(reactors[0]) // Wait for the redundancy adjustment to kick in // If the test starts failing, revisit this value - time.Sleep(100 * time.Second) + time.Sleep(100 * time.Millisecond) reactors[2].router.mtx.RLock() // Make sure that Node 3 has at least one disabled route From b52e01ce164ec12653d012bbba5a39a72d3ebbe9 Mon Sep 17 00:00:00 2001 From: Jasmina Malicevic Date: Fri, 13 Dec 2024 05:28:36 +0100 Subject: [PATCH 13/14] fix(mempool/tests): Fixes flakiness in a test related to mempool lanes. (#4655) Closes #4654 --- mempool/clist_mempool_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/mempool/clist_mempool_test.go b/mempool/clist_mempool_test.go index 51bc961f35..6bcabf4bac 100644 --- a/mempool/clist_mempool_test.go +++ b/mempool/clist_mempool_test.go @@ -705,15 +705,23 @@ func TestMempoolTxsBytes(t *testing.T) { mp.Flush() assert.EqualValues(t, 0, mp.SizeBytes()) + require.NotEqual(t, len(mp.sortedLanes), 0) // 5. ErrLaneIsFull is returned when/if the limit on the lane bytes capacity is reached. + + // According to the logic of assigning transactions in kvstore, + // a lane is assigned only if the key is an integer. + // We make sure that the transaction here goes to the default lane + // so that the lane overflows. So tx3 and tx4 go to the same lane laneMaxBytes := int(cfg.Mempool.MaxTxsBytes) / len(mp.sortedLanes) - tx3 := kvstore.NewRandomTx(laneMaxBytes) + tx3 := kvstore.NewTx("B", cmtrand.Str(laneMaxBytes-2)) + rr, err := mp.CheckTx(tx3, "") require.NoError(t, err) require.NoError(t, rr.Error()) - tx4 := kvstore.NewRandomTx(10) + tx4 := kvstore.NewTx("A", "10") rr, err = mp.CheckTx(tx4, "") + require.NoError(t, err) require.ErrorAs(t, rr.Error(), &ErrLaneIsFull{}) From 8ba4960ab7dd9b541eeecf01e6d0d536f0909137 Mon Sep 17 00:00:00 2001 From: Daniel Date: Thu, 26 Sep 2024 13:02:37 +0200 Subject: [PATCH 14/14] test(mempool): enhanced `TestReactorNoBroadcastToSender` (#4127) The test should be able now to catch the error introduced by https://github.com/cometbft/cometbft/pull/3657. ---- To see how it would break, please checkout the branch `cason/test-nobroadcastsender`, then: ``` $ cd mempool $ go test -v -run TestReactorNoBroadcastToSender ``` And wait it to fail, after 2 minutes. You are going to see the busy-loop around, logs entries starting with `Skipping tx`, forever for the same transaction. Important: the test unit only works in the traditional mempool design, without lanes. We should consider adding a variant of it that should also pass when lanes are considered. --- - [x] Tests written/updated - [ ] Changelog entry added in `.changelog` (we use [unclog](https://github.com/informalsystems/unclog) to manage our changelog) - [ ] Updated relevant documentation (`docs/` or `spec/`) and code comments --------- Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- mempool/reactor_test.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index a3cf7f840e..987c8cf362 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -128,7 +128,7 @@ func TestReactorConcurrency(t *testing.T) { func TestReactorNoBroadcastToSender(t *testing.T) { config := cfg.TestConfig() const n = 2 - reactors, _ := makeAndConnectReactors(config, n, nil) + reactors, _ := makeAndConnectReactorsNoLanes(config, n, nil) defer func() { for _, r := range reactors { if err := r.Stop(); err != nil { @@ -151,6 +151,7 @@ func TestReactorNoBroadcastToSender(t *testing.T) { // The second peer sends some transactions to the first peer secondNodeID := reactors[1].Switch.NodeInfo().ID() + secondNode := reactors[0].Switch.Peers().Get(secondNodeID) for i, tx := range txs { shouldBroadcast := cmtrand.Bool() || // random choice // Force shouldBroadcast == true to ensure that @@ -162,11 +163,11 @@ func TestReactorNoBroadcastToSender(t *testing.T) { if !shouldBroadcast { // From the second peer => should not be broadcast - _, err := reactors[0].mempool.CheckTx(tx, secondNodeID) + _, err := reactors[0].TryAddTx(tx, secondNode) require.NoError(t, err) } else { // Emulate a tx received via RPC => should broadcast - _, err := reactors[0].mempool.CheckTx(tx, "") + _, err := reactors[0].TryAddTx(tx, nil) require.NoError(t, err) txsToBroadcast = append(txsToBroadcast, tx) } @@ -493,13 +494,18 @@ func mempoolLogger(level string) *log.Logger { } // makeReactors creates n mempool reactors. -func makeReactors(config *cfg.Config, n int, logger *log.Logger) []*Reactor { +func makeReactors(config *cfg.Config, n int, logger *log.Logger, lanesEnabled bool) []*Reactor { if logger == nil { logger = mempoolLogger("info") } reactors := make([]*Reactor, n) for i := 0; i < n; i++ { - app := kvstore.NewInMemoryApplication() + var app *kvstore.Application + if lanesEnabled { + app = kvstore.NewInMemoryApplication() + } else { + app = kvstore.NewInMemoryApplicationWithoutLanes() + } cc := proxy.NewLocalClientCreator(app) mempool, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -522,15 +528,21 @@ func connectReactors(config *cfg.Config, reactors []*Reactor, connect func([]*p2 return p2p.StartAndConnectSwitches(switches, connect) } +func makeAndConnectReactorsNoLanes(config *cfg.Config, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { + reactors := makeReactors(config, n, logger, false) + switches := connectReactors(config, reactors, p2p.Connect2Switches) + return reactors, switches +} + func makeAndConnectReactors(config *cfg.Config, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { - reactors := makeReactors(config, n, logger) + reactors := makeReactors(config, n, logger, true) switches := connectReactors(config, reactors, p2p.Connect2Switches) return reactors, switches } // connect N mempool reactors through N switches as a star centered in c. func makeAndConnectReactorsStar(config *cfg.Config, c, n int, logger *log.Logger) ([]*Reactor, []*p2p.Switch) { - reactors := makeReactors(config, n, logger) + reactors := makeReactors(config, n, logger, true) switches := connectReactors(config, reactors, p2p.ConnectStarSwitches(c)) return reactors, switches }