Skip to content

Commit 393ba74

Browse files
authored
refactor(block): log error on data verification failed (#2624)
<!-- Please read and fill out this form before submitting your PR. Please make sure you have reviewed our contributors guide before submitting your first PR. NOTE: PR titles should follow semantic commits: https://www.conventionalcommits.org/en/v1.0.0/ --> ## Overview Looks like we we never logging the error when data verification failed. This changes isValidSignedData to return an error with the cause instead of true/false <!-- Please provide an explanation of the PR, including the appropriate context, background, goal, and rationale. If there is an issue with this information, please provide a tl;dr and link the issue. Ex: Closes #<issue number> -->
1 parent c5d00c5 commit 393ba74

File tree

5 files changed

+157
-149
lines changed

5 files changed

+157
-149
lines changed

block/manager.go

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1127,22 +1127,3 @@ func (m *Manager) SaveCache() error {
11271127

11281128
return nil
11291129
}
1130-
1131-
// isValidSignedData returns true if the data signature is valid for the expected sequencer.
1132-
func (m *Manager) isValidSignedData(signedData *types.SignedData) bool {
1133-
if signedData == nil || signedData.Txs == nil {
1134-
return false
1135-
}
1136-
1137-
if err := m.assertUsingExpectedSingleSequencer(signedData.Signer.Address); err != nil {
1138-
return false
1139-
}
1140-
1141-
dataBytes, err := signedData.Data.MarshalBinary()
1142-
if err != nil {
1143-
return false
1144-
}
1145-
1146-
valid, err := signedData.Signer.PubKey.Verify(dataBytes, signedData.Signature)
1147-
return err == nil && valid
1148-
}

block/manager_test.go

Lines changed: 0 additions & 100 deletions
Original file line numberDiff line numberDiff line change
@@ -410,106 +410,6 @@ func TestGetDataSignature_NilSigner(t *testing.T) {
410410
require.ErrorContains(err, "signer is nil; cannot sign data")
411411
}
412412

413-
// TestIsValidSignedData covers valid, nil, wrong proposer, and invalid signature cases for isValidSignedData.
414-
func TestIsValidSignedData(t *testing.T) {
415-
require := require.New(t)
416-
privKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256)
417-
require.NoError(err)
418-
testSigner, err := noopsigner.NewNoopSigner(privKey)
419-
require.NoError(err)
420-
proposerAddr, err := testSigner.GetAddress()
421-
require.NoError(err)
422-
gen := genesispkg.NewGenesis(
423-
"testchain",
424-
1,
425-
time.Now(),
426-
proposerAddr,
427-
)
428-
m := &Manager{
429-
signer: testSigner,
430-
genesis: gen,
431-
}
432-
433-
t.Run("valid signed data", func(t *testing.T) {
434-
batch := &types.Data{
435-
Txs: types.Txs{types.Tx("tx1"), types.Tx("tx2")},
436-
}
437-
sig, err := m.getDataSignature(batch)
438-
require.NoError(err)
439-
pubKey, err := m.signer.GetPublic()
440-
require.NoError(err)
441-
signedData := &types.SignedData{
442-
Data: *batch,
443-
Signature: sig,
444-
Signer: types.Signer{
445-
PubKey: pubKey,
446-
Address: proposerAddr,
447-
},
448-
}
449-
assert.True(t, m.isValidSignedData(signedData))
450-
})
451-
452-
t.Run("nil signed data", func(t *testing.T) {
453-
assert.False(t, m.isValidSignedData(nil))
454-
})
455-
456-
t.Run("nil Txs", func(t *testing.T) {
457-
signedData := &types.SignedData{
458-
Data: types.Data{},
459-
Signer: types.Signer{
460-
Address: proposerAddr,
461-
},
462-
}
463-
signedData.Txs = nil
464-
assert.False(t, m.isValidSignedData(signedData))
465-
})
466-
467-
t.Run("wrong proposer address", func(t *testing.T) {
468-
batch := &types.Data{
469-
Txs: types.Txs{types.Tx("tx1")},
470-
}
471-
sig, err := m.getDataSignature(batch)
472-
require.NoError(err)
473-
pubKey, err := m.signer.GetPublic()
474-
require.NoError(err)
475-
wrongAddr := make([]byte, len(proposerAddr))
476-
copy(wrongAddr, proposerAddr)
477-
wrongAddr[0] ^= 0xFF // flip a bit
478-
signedData := &types.SignedData{
479-
Data: *batch,
480-
Signature: sig,
481-
Signer: types.Signer{
482-
PubKey: pubKey,
483-
Address: wrongAddr,
484-
},
485-
}
486-
assert.False(t, m.isValidSignedData(signedData))
487-
})
488-
489-
t.Run("invalid signature", func(t *testing.T) {
490-
batch := &types.Data{
491-
Txs: types.Txs{types.Tx("tx1")},
492-
}
493-
sig, err := m.getDataSignature(batch)
494-
require.NoError(err)
495-
pubKey, err := m.signer.GetPublic()
496-
require.NoError(err)
497-
// Corrupt the signature
498-
badSig := make([]byte, len(sig))
499-
copy(badSig, sig)
500-
badSig[0] ^= 0xFF
501-
signedData := &types.SignedData{
502-
Data: *batch,
503-
Signature: badSig,
504-
Signer: types.Signer{
505-
PubKey: pubKey,
506-
Address: proposerAddr,
507-
},
508-
}
509-
assert.False(t, m.isValidSignedData(signedData))
510-
})
511-
}
512-
513413
// TestManager_execValidate tests the execValidate method for various header/data/state conditions.
514414
func TestManager_execValidate(t *testing.T) {
515415
require := require.New(t)

block/retriever_da.go

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -255,8 +255,8 @@ func (m *Manager) tryDecodeData(bz []byte, daHeight uint64) *types.Data {
255255
}
256256

257257
// Early validation to reject junk data
258-
if !m.isValidSignedData(&signedData) {
259-
m.logger.Debug().Uint64("daHeight", daHeight).Msg("invalid data signature")
258+
if err := m.assertValidSignedData(&signedData); err != nil {
259+
m.logger.Debug().Uint64("daHeight", daHeight).Err(err).Msg("invalid data signature")
260260
return nil
261261
}
262262

@@ -268,6 +268,33 @@ func (m *Manager) tryDecodeData(bz []byte, daHeight uint64) *types.Data {
268268
return &signedData.Data
269269
}
270270

271+
// assertValidSignedData validates the data signature and returns an error if it's invalid.
272+
func (m *Manager) assertValidSignedData(signedData *types.SignedData) error {
273+
if signedData == nil || signedData.Txs == nil {
274+
return errors.New("empty signed data")
275+
}
276+
277+
if err := m.assertUsingExpectedSingleSequencer(signedData.Signer.Address); err != nil {
278+
return err
279+
}
280+
281+
dataBytes, err := signedData.Data.MarshalBinary()
282+
if err != nil {
283+
return fmt.Errorf("failed to marshal data: %w", err)
284+
}
285+
286+
valid, err := signedData.Signer.PubKey.Verify(dataBytes, signedData.Signature)
287+
if err != nil {
288+
return fmt.Errorf("failed to verify signature: %w", err)
289+
}
290+
291+
if !valid {
292+
return fmt.Errorf("invalid signature")
293+
}
294+
295+
return nil
296+
}
297+
271298
// isAtHeight checks if a height is available.
272299
func (m *Manager) isAtHeight(ctx context.Context, height uint64) error {
273300
currentHeight, err := m.GetStoreHeight(ctx)

block/retriever_da_test.go

Lines changed: 103 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ import (
2323
"github.com/evstack/ev-node/pkg/cache"
2424
"github.com/evstack/ev-node/pkg/config"
2525
"github.com/evstack/ev-node/pkg/genesis"
26-
"github.com/evstack/ev-node/pkg/signer/noop"
26+
noopsigner "github.com/evstack/ev-node/pkg/signer/noop"
2727
storepkg "github.com/evstack/ev-node/pkg/store"
2828
rollmocks "github.com/evstack/ev-node/test/mocks"
2929
"github.com/evstack/ev-node/types"
@@ -52,7 +52,7 @@ func setupManagerForRetrieverTest(t *testing.T, initialDAHeight uint64) (*daRetr
5252
src := rand.Reader
5353
pk, _, err := crypto.GenerateEd25519Key(src)
5454
require.NoError(t, err)
55-
noopSigner, err := noop.NewNoopSigner(pk)
55+
noopSigner, err := noopsigner.NewNoopSigner(pk)
5656
require.NoError(t, err)
5757

5858
addr, err := noopSigner.GetAddress()
@@ -361,7 +361,7 @@ func TestProcessNextDAHeader_UnexpectedSequencer(t *testing.T) {
361361
src := rand.Reader
362362
pk, _, err := crypto.GenerateEd25519Key(src)
363363
require.NoError(t, err)
364-
signerNoop, err := noop.NewNoopSigner(pk)
364+
signerNoop, err := noopsigner.NewNoopSigner(pk)
365365
require.NoError(t, err)
366366
hc := types.HeaderConfig{
367367
Height: blockHeight,
@@ -732,3 +732,103 @@ func TestRetrieveLoop_DAHeightIncrementsOnlyOnSuccess(t *testing.T) {
732732

733733
mockDAClient.AssertExpectations(t)
734734
}
735+
736+
// TestAssertValidSignedData covers valid, nil, wrong proposer, and invalid signature cases for assertValidSignedData.
737+
func TestAssertValidSignedData(t *testing.T) {
738+
require := require.New(t)
739+
privKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 256)
740+
require.NoError(err)
741+
testSigner, err := noopsigner.NewNoopSigner(privKey)
742+
require.NoError(err)
743+
proposerAddr, err := testSigner.GetAddress()
744+
require.NoError(err)
745+
gen := genesis.NewGenesis(
746+
"testchain",
747+
1,
748+
time.Now(),
749+
proposerAddr,
750+
)
751+
m := &Manager{
752+
signer: testSigner,
753+
genesis: gen,
754+
}
755+
756+
t.Run("valid signed data", func(t *testing.T) {
757+
batch := &types.Data{
758+
Txs: types.Txs{types.Tx("tx1"), types.Tx("tx2")},
759+
}
760+
sig, err := m.getDataSignature(batch)
761+
require.NoError(err)
762+
pubKey, err := m.signer.GetPublic()
763+
require.NoError(err)
764+
signedData := &types.SignedData{
765+
Data: *batch,
766+
Signature: sig,
767+
Signer: types.Signer{
768+
PubKey: pubKey,
769+
Address: proposerAddr,
770+
},
771+
}
772+
assert.NoError(t, m.assertValidSignedData(signedData))
773+
})
774+
775+
t.Run("nil signed data", func(t *testing.T) {
776+
assert.Error(t, m.assertValidSignedData(nil))
777+
})
778+
779+
t.Run("nil Txs", func(t *testing.T) {
780+
signedData := &types.SignedData{
781+
Data: types.Data{},
782+
Signer: types.Signer{
783+
Address: proposerAddr,
784+
},
785+
}
786+
signedData.Txs = nil
787+
assert.Error(t, m.assertValidSignedData(signedData))
788+
})
789+
790+
t.Run("wrong proposer address", func(t *testing.T) {
791+
batch := &types.Data{
792+
Txs: types.Txs{types.Tx("tx1")},
793+
}
794+
sig, err := m.getDataSignature(batch)
795+
require.NoError(err)
796+
pubKey, err := m.signer.GetPublic()
797+
require.NoError(err)
798+
wrongAddr := make([]byte, len(proposerAddr))
799+
copy(wrongAddr, proposerAddr)
800+
wrongAddr[0] ^= 0xFF // flip a bit
801+
signedData := &types.SignedData{
802+
Data: *batch,
803+
Signature: sig,
804+
Signer: types.Signer{
805+
PubKey: pubKey,
806+
Address: wrongAddr,
807+
},
808+
}
809+
assert.Error(t, m.assertValidSignedData(signedData))
810+
})
811+
812+
t.Run("invalid signature", func(t *testing.T) {
813+
batch := &types.Data{
814+
Txs: types.Txs{types.Tx("tx1")},
815+
}
816+
sig, err := m.getDataSignature(batch)
817+
require.NoError(err)
818+
pubKey, err := m.signer.GetPublic()
819+
require.NoError(err)
820+
// Corrupt the signature
821+
badSig := make([]byte, len(sig))
822+
copy(badSig, sig)
823+
badSig[0] ^= 0xFF
824+
signedData := &types.SignedData{
825+
Data: *batch,
826+
Signature: badSig,
827+
Signer: types.Signer{
828+
PubKey: pubKey,
829+
Address: proposerAddr,
830+
},
831+
}
832+
assert.Error(t, m.assertValidSignedData(signedData))
833+
})
834+
}

docs/learn/specs/block-manager.md

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ The `DataStoreRetrieveLoop`:
456456
- Operates at `BlockTime` intervals via `dataStoreCh` signals
457457
- Tracks `dataStoreHeight` for the last retrieved data
458458
- Retrieves all data blocks between last height and current store height
459-
- Validates data signatures using `isValidSignedData`
459+
- Validates data signatures using `assertValidSignedData`
460460
- Marks data as "seen" in the data cache
461461
- Sends data to sync goroutine via `dataInCh`
462462

@@ -568,30 +568,30 @@ The communication with DA layer:
568568

569569
## Assumptions and Considerations
570570

571-
* The block manager loads the initial state from the local store and uses genesis if not found in the local store, when the node (re)starts.
572-
* The default mode for sequencer nodes is normal (not lazy).
573-
* The sequencer can produce empty blocks.
574-
* In lazy aggregation mode, the block manager maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks.
575-
* The lazy aggregation mechanism uses a dual timer approach:
576-
* A `blockTimer` that triggers block production when transactions are available
577-
* A `lazyTimer` that ensures blocks are produced even during periods of inactivity
578-
* Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps.
579-
* Transaction notifications from the `Reaper` to the `Manager` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure.
580-
* The block manager enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues.
581-
* Headers and data are submitted separately to the DA layer using different namespaces, supporting the header/data separation architecture.
582-
* The block manager uses persistent caches for headers and data to track seen items and DA inclusion status.
583-
* Namespace migration is handled transparently, with automatic detection and state persistence to optimize future operations.
584-
* The system supports backward compatibility with legacy single-namespace deployments while transitioning to separate namespaces.
585-
* Gas price management includes automatic adjustment with `GasMultiplier` on DA submission retries.
586-
* The block manager uses persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops.
587-
* The block manager does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches.
588-
* Header and data stores use separate prefixes for isolation in the underlying database.
589-
* The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header.
590-
* Block sync over the P2P network works only when a full node is connected to the P2P network by specifying the initial seeds to connect to via `P2PConfig.Seeds` configuration parameter when starting the full node.
591-
* Node's context is passed down to all components to support graceful shutdown and cancellation.
592-
* The block manager supports custom signature payload providers for headers, enabling flexible signing schemes.
593-
* The block manager supports the separation of header and data structures in Evolve. This allows for expanding the sequencing scheme beyond single sequencing and enables the use of a decentralized sequencer mode. For detailed information on this architecture, see the [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md).
594-
* The block manager processes blocks with a minimal header format, which is designed to eliminate dependency on CometBFT's header format and can be used to produce an execution layer tailored header if needed. For details on this header structure, see the [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) specification.
571+
- The block manager loads the initial state from the local store and uses genesis if not found in the local store, when the node (re)starts.
572+
- The default mode for sequencer nodes is normal (not lazy).
573+
- The sequencer can produce empty blocks.
574+
- In lazy aggregation mode, the block manager maintains consistency with the DA layer by producing empty blocks at regular intervals, ensuring a 1:1 mapping between DA layer blocks and execution layer blocks.
575+
- The lazy aggregation mechanism uses a dual timer approach:
576+
- A `blockTimer` that triggers block production when transactions are available
577+
- A `lazyTimer` that ensures blocks are produced even during periods of inactivity
578+
- Empty batches are handled differently in lazy mode - instead of discarding them, they are returned with the `ErrNoBatch` error, allowing the caller to create empty blocks with proper timestamps.
579+
- Transaction notifications from the `Reaper` to the `Manager` are handled via a non-blocking notification channel (`txNotifyCh`) to prevent backpressure.
580+
- The block manager enforces `MaxPendingHeadersAndData` limit to prevent unbounded growth of pending queues during DA submission issues.
581+
- Headers and data are submitted separately to the DA layer using different namespaces, supporting the header/data separation architecture.
582+
- The block manager uses persistent caches for headers and data to track seen items and DA inclusion status.
583+
- Namespace migration is handled transparently, with automatic detection and state persistence to optimize future operations.
584+
- The system supports backward compatibility with legacy single-namespace deployments while transitioning to separate namespaces.
585+
- Gas price management includes automatic adjustment with `GasMultiplier` on DA submission retries.
586+
- The block manager uses persistent storage (disk) when the `root_dir` and `db_path` configuration parameters are specified in `config.yaml` file under the app directory. If these configuration parameters are not specified, the in-memory storage is used, which will not be persistent if the node stops.
587+
- The block manager does not re-apply blocks when they transition from soft confirmed to DA included status. The block is only marked DA included in the caches.
588+
- Header and data stores use separate prefixes for isolation in the underlying database.
589+
- The genesis `ChainID` is used to create separate `PubSubTopID`s for headers and data in go-header.
590+
- Block sync over the P2P network works only when a full node is connected to the P2P network by specifying the initial seeds to connect to via `P2PConfig.Seeds` configuration parameter when starting the full node.
591+
- Node's context is passed down to all components to support graceful shutdown and cancellation.
592+
- The block manager supports custom signature payload providers for headers, enabling flexible signing schemes.
593+
- The block manager supports the separation of header and data structures in Evolve. This allows for expanding the sequencing scheme beyond single sequencing and enables the use of a decentralized sequencer mode. For detailed information on this architecture, see the [Header and Data Separation ADR](../../adr/adr-014-header-and-data-separation.md).
594+
- The block manager processes blocks with a minimal header format, which is designed to eliminate dependency on CometBFT's header format and can be used to produce an execution layer tailored header if needed. For details on this header structure, see the [Evolve Minimal Header](../../adr/adr-015-rollkit-minimal-header.md) specification.
595595

596596
## Metrics
597597

0 commit comments

Comments
 (0)