diff --git a/.avalanche-golangci.yml b/.avalanche-golangci.yml index 2d0b812652..df1d903c25 100644 --- a/.avalanche-golangci.yml +++ b/.avalanche-golangci.yml @@ -55,7 +55,7 @@ linters: - bodyclose - copyloopvar # - depguard - # - errcheck + - errcheck - errorlint - forbidigo - goconst diff --git a/core/blockchain_ext_test.go b/core/blockchain_ext_test.go index 566f85274a..f8cb92be02 100644 --- a/core/blockchain_ext_test.go +++ b/core/blockchain_ext_test.go @@ -1651,7 +1651,7 @@ func ReexecCorruptedStateTest(t *testing.T, create ReexecTestFunc) { require.NoError(t, blockchain.Accept(chain[0])) // Simulate a crash by updating the acceptor tip - blockchain.writeBlockAcceptedIndices(chain[1]) + require.NoError(t, blockchain.writeBlockAcceptedIndices(chain[1])) blockchain.Stop() // Restart blockchain with existing state diff --git a/core/fifo_cache.go b/core/fifo_cache.go index 9f1eb8cf1e..7802f44230 100644 --- a/core/fifo_cache.go +++ b/core/fifo_cache.go @@ -44,7 +44,7 @@ func (f *BufferFIFOCache[K, V]) Put(key K, val V) { f.l.Lock() defer f.l.Unlock() - f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit] + _ = f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit] f.m[key] = val } diff --git a/core/state_manager_test.go b/core/state_manager_test.go index 3e50c7226b..d9127ed9b0 100644 --- a/core/state_manager_test.go +++ b/core/state_manager_test.go @@ -56,8 +56,8 @@ func TestCappedMemoryTrieWriter(t *testing.T) { require.NoError(w.InsertTrie(block)) require.Zero(m.LastDereference, "should not have dereferenced block on insert") require.Zero(m.LastCommit, "should not have committed block on insert") + require.NoError(w.AcceptTrie(block)) - w.AcceptTrie(block) if i <= tipBufferSize { require.Zero(m.LastDereference, "should not have dereferenced block on accept") } else { @@ -71,7 +71,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) { m.LastCommit = common.Hash{} } - w.RejectTrie(block) + require.NoError(w.RejectTrie(block)) require.Equal(block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} @@ -96,12 +96,12 @@ func TestNoPruningTrieWriter(t *testing.T) { require.Zero(m.LastDereference, "should not have dereferenced block on insert") require.Zero(m.LastCommit, "should not have committed block on insert") - w.AcceptTrie(block) + require.NoError(w.AcceptTrie(block)) require.Zero(m.LastDereference, "should not have dereferenced block on accept") require.Equal(block.Root(), m.LastCommit, "should have committed block on accept") m.LastCommit = common.Hash{} - w.RejectTrie(block) + require.NoError(w.RejectTrie(block)) require.Equal(block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index 0fbdd59ff5..9976575a7f 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -244,7 +244,9 @@ func (a *AtomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error return err } } - a.trieDB.Reference(root, common.Hash{}) + if err := a.trieDB.Reference(root, common.Hash{}); err != nil { + return err + } // The use of [Cap] in [insertTrie] prevents exceeding the configured memory // limit (and OOM) in case there is a large backlog of processing (unaccepted) blocks. @@ -285,12 +287,16 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // - not committted, in which case the current root we are inserting contains // references to all the relevant data from the previous root, so the previous // root can be dereferenced. - a.trieDB.Dereference(a.lastAcceptedRoot) + if err := a.trieDB.Dereference(a.lastAcceptedRoot); err != nil { + return false, err + } a.lastAcceptedRoot = root return hasCommitted, nil } func (a *AtomicTrie) RejectTrie(root common.Hash) error { - a.trieDB.Dereference(root) + if err := a.trieDB.Dereference(root); err != nil { + return err + } return nil } diff --git a/plugin/evm/atomic/state/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go index 626eac9db0..55fd58487e 100644 --- a/plugin/evm/atomic/state/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -604,7 +604,7 @@ func TestAtomicTrie_AcceptTrie(t *testing.T) { _, storageSize, _ := atomicTrie.trieDB.Size() require.NotZero(t, storageSize, "there should be a dirty node taking up storage space") } - atomicTrie.updateLastCommitted(testCase.lastCommittedRoot, testCase.lastCommittedHeight) + require.NoError(t, atomicTrie.updateLastCommitted(testCase.lastCommittedRoot, testCase.lastCommittedHeight)) hasCommitted, err := atomicTrie.AcceptTrie(testCase.height, testCase.root) require.NoError(t, err) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 6171ef5b86..1899e88b5f 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -230,7 +230,9 @@ func (be *blockExtension) Reject() error { func (be *blockExtension) CleanupVerified() { vm := be.blockExtender.vm if atomicState, err := vm.AtomicBackend.GetVerifiedAtomicState(be.block.GetEthBlock().Hash()); err == nil { - atomicState.Reject() + if err := atomicState.Reject(); err != nil { + log.Error("failed to reject atomic state", "err", err) + } } } diff --git a/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go index e009c36bdf..a160c471fe 100644 --- a/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go +++ b/plugin/evm/atomic/vm/gossiper_atomic_gossiping_test.go @@ -143,7 +143,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { tx, conflictingTx := importTxs[0], importTxs[1] txID := tx.ID() - vm.AtomicMempool.AddRemoteTx(tx) + require.NoError(vm.AtomicMempool.AddRemoteTx(tx)) vm.AtomicMempool.NextTx() vm.AtomicMempool.DiscardCurrentTx(txID) @@ -177,7 +177,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { // Conflicting tx must be submitted over the API to be included in push gossip. // (i.e., txs received via p2p are not included in push gossip) // This test adds it directly to the mempool + gossiper to simulate that. - vm.AtomicMempool.AddRemoteTx(conflictingTx) + require.NoError(vm.AtomicMempool.AddRemoteTx(conflictingTx)) vm.AtomicTxPushGossiper.Add(conflictingTx) time.Sleep(500 * time.Millisecond) diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index 7bb24104e7..d05a6aca94 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -198,14 +198,13 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { utxos := map[ids.ShortID]uint64{ vmtest.TestShortIDAddrs[0]: importAmount, } - addUTXOs(tvm.AtomicMemory, vm.Ctx, utxos) + require.NoError(addUTXOs(tvm.AtomicMemory, vm.Ctx, utxos)) defer func() { require.NoError(vm.Shutdown(context.Background())) }() importTx, err := vm.newImportTx(vm.Ctx.XChainID, vmtest.TestEthAddrs[0], vmtest.InitialBaseFee, vmtest.TestKeys[0:1]) require.NoError(err) - require.NoError(vm.AtomicMempool.AddLocalTx(importTx)) msg, err := vm.WaitForEvent(context.Background()) @@ -214,11 +213,8 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { blk, err := vm.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) - require.NoError(vm.SetPreference(context.Background(), blk.ID())) - require.NoError(blk.Accept(context.Background())) lastAcceptedID, err := vm.LastAccepted(context.Background()) @@ -232,7 +228,6 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { wrappedState := extstate.New(state) exportTx, err := atomic.NewExportTx(vm.Ctx, vm.CurrentRules(), wrappedState, vm.Ctx.AVAXAssetID, importAmount-(2*ap0.AtomicTxFee), vm.Ctx.XChainID, vmtest.TestShortIDAddrs[0], vmtest.InitialBaseFee, vmtest.TestKeys[0:1]) require.NoError(err) - require.NoError(vm.AtomicMempool.AddLocalTx(exportTx)) msg, err = vm.WaitForEvent(context.Background()) @@ -241,9 +236,7 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { blk2, err := vm.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk2.Verify(context.Background())) - require.NoError(blk2.Accept(context.Background())) lastAcceptedID, err = vm.LastAccepted(context.Background()) @@ -843,9 +836,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { blk, err := vm.BuildBlock(context.Background()) require.NoError(err) require.NoError(blk.Verify(context.Background())) - require.NoError(vm.SetPreference(context.Background(), blk.ID())) - require.NoError(blk.Accept(context.Background())) newHead := <-newTxPoolHeadChan @@ -853,8 +844,8 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions // will fail verification when build block is called. - vm.AtomicMempool.AddRemoteTx(importTxs[1]) - vm.AtomicMempool.AddRemoteTx(importTxs[2]) + require.NoError(vm.AtomicMempool.ForceAddTx(importTxs[1])) + require.NoError(vm.AtomicMempool.ForceAddTx(importTxs[2])) _, err = vm.BuildBlock(context.Background()) require.ErrorIs(err, ErrEmptyBlock) @@ -891,7 +882,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { err = vm.AtomicMempool.AddLocalTx(conflictTx) require.ErrorIs(err, txpool.ErrConflict) // force add the tx - vm.AtomicMempool.ForceAddTx(conflictTx) + require.NoError(vm.AtomicMempool.ForceAddTx(conflictTx)) conflictSets[index].Add(conflictTx.ID()) } msg, err := vm.WaitForEvent(context.Background()) @@ -1654,7 +1645,7 @@ func TestWaitForEvent(t *testing.T) { }, }}}})) testCase.testCase(t, vm, address, key) - vm.Shutdown(context.Background()) + require.NoError(t, vm.Shutdown(context.Background())) }) } } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 93e633ac55..3fe676866f 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -101,8 +101,7 @@ type BlockExtension interface { // it can be implemented to extend inner block verification SemanticVerify() error // CleanupVerified is called when a block has passed SemanticVerify and SynctacticVerify, - // and should be cleaned up due to error or verification runs under non-write mode. This - // does not return an error because the block has already been verified. + // and should be cleaned up due to error or verification runs under non-write mode. CleanupVerified() // Accept is called when a block is accepted by the block manager. Accept takes a // database.Batch that contains the changes that were made to the database as a result diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index c3f1a6795f..aee9a70775 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -38,7 +38,7 @@ func init() { // See https://github.com/ava-labs/coreth/pull/999 c.SkipRegistrations(3) - Codec.RegisterCodec(Version, c) + errs.Add(Codec.RegisterCodec(Version, c)) if errs.Errored() { panic(errs.Err) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 548072c786..862551d3ae 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -467,7 +467,9 @@ func (vm *VM) Initialize( // Add p2p warp message warpHandler warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) - vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + if err = vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler); err != nil { + return err + } vm.stateSyncDone = make(chan struct{}) @@ -896,7 +898,9 @@ func (vm *VM) Shutdown(context.Context) error { for _, handler := range vm.rpcHandlers { handler.Stop() } - vm.eth.Stop() + if err := vm.eth.Stop(); err != nil { + log.Error("error stopping eth", "err", err) + } vm.shutdownWg.Wait() return nil } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 11c205c887..a928fbc0f9 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1682,7 +1682,7 @@ func TestWaitForEvent(t *testing.T) { Fork: &fork, }) testCase.testCase(t, vm) - vm.Shutdown(context.Background()) + require.NoError(t, vm.Shutdown(context.Background())) }) } } @@ -1885,7 +1885,9 @@ func TestDelegatePrecompile_BehaviorAcrossUpgrades(t *testing.T) { vmtest.SetupTestVM(t, vm, vmtest.TestVMConfig{ Fork: &tt.fork, }) - defer vm.Shutdown(ctx) + defer func() { + require.NoError(t, vm.Shutdown(ctx)) + }() if tt.preDeployTime != 0 { vm.clock.Set(time.Unix(tt.preDeployTime, 0)) diff --git a/plugin/evm/vmtest/genesis.go b/plugin/evm/vmtest/genesis.go index 6853793414..c8ef0321c1 100644 --- a/plugin/evm/vmtest/genesis.go +++ b/plugin/evm/vmtest/genesis.go @@ -65,7 +65,9 @@ func NewTestGenesis(cfg *params.ChainConfig) *core.Genesis { g.Config = &cpy allocStr := `{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}}` - json.Unmarshal([]byte(allocStr), &g.Alloc) + if err := json.Unmarshal([]byte(allocStr), &g.Alloc); err != nil { + panic(err) + } // After Durango, an additional account is funded in tests to use // with warp messages. if params.GetExtra(cfg).IsDurango(0) { diff --git a/plugin/evm/vmtest/test_syncervm.go b/plugin/evm/vmtest/test_syncervm.go index be7703d039..d070f9c234 100644 --- a/plugin/evm/vmtest/test_syncervm.go +++ b/plugin/evm/vmtest/test_syncervm.go @@ -156,7 +156,9 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") - go testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + go func() { + require.NoError(testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request)) + }() return nil } ResetMetrics(testSyncVMSetup.syncerVM.SnowCtx) @@ -221,7 +223,9 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] testSyncVMSetup.serverVM.AppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) + go func() { + require.NoError(syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response), "AppResponse failed") + }() } else { go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) } @@ -363,7 +367,9 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] serverTest.AppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.AppResponse(ctx, nodeID, requestID, response) + go func() { + require.NoError(syncerVM.AppResponse(ctx, nodeID, requestID, response)) + }() } else { go test.responseIntercept(syncerVM, nodeID, requestID, response) } diff --git a/plugin/main.go b/plugin/main.go index 9132d6d1d4..6804079836 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -32,5 +32,9 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s\n", err) os.Exit(1) } - rpcchainvm.Serve(context.Background(), factory.NewPluginVM()) + + if err := rpcchainvm.Serve(context.Background(), factory.NewPluginVM()); err != nil { + fmt.Printf("failed to serve rpc chain vm: %s\n", err) + os.Exit(1) + } } diff --git a/sync/statesync/code_queue_test.go b/sync/statesync/code_queue_test.go index b46ad1d07f..bb6c14f40f 100644 --- a/sync/statesync/code_queue_test.go +++ b/sync/statesync/code_queue_test.go @@ -235,7 +235,9 @@ func TestQuitAndAddCodeRace(t *testing.T) { in := []common.Hash{{}} ready.Done() <-start - q.AddCode(in) + // Due to the race condition, AddCode may either succeed or fail + // depending on whether the quit channel is closed first + _ = q.AddCode(in) }() ready.Wait() diff --git a/sync/statesync/trie_sync_tasks.go b/sync/statesync/trie_sync_tasks.go index 1133e5654f..d06931046e 100644 --- a/sync/statesync/trie_sync_tasks.go +++ b/sync/statesync/trie_sync_tasks.go @@ -75,7 +75,9 @@ func (m *mainTrieTask) OnLeafs(db ethdb.KeyValueWriter, keys, vals [][]byte) err // check if this account has storage root that we need to fetch if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash { - m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash) + if err := m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash); err != nil { + return err + } } // check if this account has code and add it to codeHashes to fetch diff --git a/triedb/firewood/database.go b/triedb/firewood/database.go index 3ffa60e5ea..ef246c622e 100644 --- a/triedb/firewood/database.go +++ b/triedb/firewood/database.go @@ -553,7 +553,11 @@ func (db *Database) getProposalHash(parentRoot common.Hash, keys, values [][]byt ffiHashTimer.Inc(time.Since(start).Milliseconds()) // We succesffuly created a proposal, so we must drop it after use. - defer p.Drop() + defer func() { + if err := p.Drop(); err != nil { + log.Error("firewood: error dropping proposal after hash computation", "parentRoot", parentRoot.Hex(), "error", err) + } + }() rootBytes, err := p.Root() if err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index f8cd8686b6..29a50bbc00 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -52,8 +52,7 @@ func TestAddressedCallSignatures(t *testing.T) { require.NoError(t, err) signature, err := snowCtx.WarpSigner.Sign(msg) require.NoError(t, err) - - backend.AddMessage(msg) + require.NoError(t, backend.AddMessage(msg)) return msg.Bytes(), signature }, verifyStats: func(t *testing.T, stats *verifierStats) {