Skip to content
This repository was archived by the owner on Nov 25, 2025. It is now read-only.
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions plugin/evm/atomic/sync/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ func (s *Syncer) Sync(ctx context.Context) error {
return s.syncer.Sync(ctx)
}

func (*Syncer) UpdateTarget(_ message.Syncable) error {
return nil
}

// addZeroes returns the big-endian representation of `height`, prefixed with [common.HashLength] zeroes.
func addZeroes(height uint64) []byte {
// Key format is [height(8 bytes)][blockchainID(32 bytes)]. Start should be the
Expand Down
44 changes: 44 additions & 0 deletions plugin/evm/vmsync/block_queue.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package vmsync

import "sync"

// blockQueue buffers blocks that arrive while the coordinator is in the Running
// state. It is cleared (drained) on UpdateSyncTarget to avoid drops and is
// snapshotted at finalization via DequeueBatch. Enqueue is always allowed; a
// DequeueBatch only captures the current buffered blocks and clears them, and
// new enqueues after the snapshot are not part of that batch.
type blockQueue struct {
mu sync.Mutex
// buffered blocks accumulated before finalization
items []EthBlockWrapper
}

// newBlockQueue creates a new empty queue.
func newBlockQueue() *blockQueue {
return &blockQueue{}
}

// Enqueue appends a block to the buffer. Returns true if the block was queued,
// false if the block is nil.
func (q *blockQueue) Enqueue(b EthBlockWrapper) bool {
if b == nil {
return false
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this reasonable and/or possible?

}
q.mu.Lock()
defer q.mu.Unlock()
q.items = append(q.items, b)
return true
}

// DequeueBatch returns the current buffered blocks and clears the buffer. New
// arrivals after the snapshot are not included and remain buffered for later.
func (q *blockQueue) DequeueBatch() []EthBlockWrapper {
q.mu.Lock()
defer q.mu.Unlock()
out := q.items
q.items = nil
return out
}
Loading
Loading