diff --git a/headertest/store.go b/headertest/store.go index 8ce96213..b08b8c03 100644 --- a/headertest/store.go +++ b/headertest/store.go @@ -79,8 +79,9 @@ func (m *Store[H]) GetByHeight(_ context.Context, height uint64) (H, error) { return zero, header.ErrNotFound } -func (m *Store[H]) DeleteTo(ctx context.Context, to uint64) error { - for h := m.TailHeight; h < to; h++ { +func (m *Store[H]) DeleteRange(ctx context.Context, from, to uint64) error { + // Delete headers in the range [from:to) + for h := from; h < to; h++ { _, ok := m.Headers[h] if !ok { continue @@ -95,7 +96,16 @@ func (m *Store[H]) DeleteTo(ctx context.Context, to uint64) error { delete(m.Headers, h) // must be after deleteFn } - m.TailHeight = to + // Update TailHeight if we deleted from the beginning + if from <= m.TailHeight { + m.TailHeight = to + } + + // Update HeadHeight if we deleted from the end + if to >= m.HeadHeight { + m.HeadHeight = from - 1 + } + return nil } diff --git a/interface.go b/interface.go index 1c719fa6..de7146de 100644 --- a/interface.go +++ b/interface.go @@ -85,8 +85,8 @@ type Store[H Header[H]] interface { // GetRange returns the range [from:to). GetRange(context.Context, uint64, uint64) ([]H, error) - // DeleteTo deletes the range [Tail():to). - DeleteTo(ctx context.Context, to uint64) error + // DeleteRange deletes the range [from:to). + DeleteRange(ctx context.Context, from, to uint64) error // OnDelete registers given handler to be called whenever a header with the height is being removed. // OnDelete guarantees that the header is accessible for the handler with GetByHeight and is removed diff --git a/p2p/server_test.go b/p2p/server_test.go index 0af96b11..315359fc 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -192,7 +192,7 @@ func (timeoutStore[H]) GetRange(ctx context.Context, _, _ uint64) ([]H, error) { return nil, ctx.Err() } -func (timeoutStore[H]) DeleteTo(ctx context.Context, _ uint64) error { +func (timeoutStore[H]) DeleteRange(ctx context.Context, _, _ uint64) error { <-ctx.Done() return ctx.Err() } diff --git a/store/store_delete.go b/store/store_delete.go index 378ec03f..408c8a71 100644 --- a/store/store_delete.go +++ b/store/store_delete.go @@ -11,8 +11,6 @@ import ( "time" "github.com/ipfs/go-datastore" - - "github.com/celestiaorg/go-header" ) // OnDelete implements [header.Store] interface. @@ -36,59 +34,6 @@ func (s *Store[H]) OnDelete(fn func(context.Context, uint64) error) { }) } -// DeleteTo implements [header.Store] interface. -func (s *Store[H]) DeleteTo(ctx context.Context, to uint64) error { - // ensure all the pending headers are synchronized - err := s.Sync(ctx) - if err != nil { - return err - } - - head, err := s.Head(ctx) - if err != nil { - return fmt.Errorf("header/store: reading head: %w", err) - } - if head.Height()+1 < to { - _, err := s.getByHeight(ctx, to) - if errors.Is(err, header.ErrNotFound) { - return fmt.Errorf( - "header/store: delete to %d beyond current head(%d)", - to, - head.Height(), - ) - } - if err != nil { - return fmt.Errorf("delete to potential new head: %w", err) - } - - // if `to` is bigger than the current head and is stored - allow delete, making `to` a new head - } - - tail, err := s.Tail(ctx) - if err != nil { - return fmt.Errorf("header/store: reading tail: %w", err) - } - if tail.Height() >= to { - return fmt.Errorf("header/store: delete to %d below current tail(%d)", to, tail.Height()) - } - - err = s.deleteRange(ctx, tail.Height(), to) - if errors.Is(err, header.ErrNotFound) && head.Height()+1 == to { - // this is the case where we have deleted all the headers - // wipe the store - if err := s.wipe(ctx); err != nil { - return fmt.Errorf("header/store: wipe: %w", err) - } - - return nil - } - if err != nil { - return fmt.Errorf("header/store: delete to height %d: %w", to, err) - } - - return nil -} - // deleteRangeParallelThreshold defines the threshold for parallel deletion. // If range is smaller than this threshold, deletion will be performed sequentially. var ( @@ -96,69 +41,6 @@ var ( errDeleteTimeout = errors.New("delete timeout") ) -// deleteRange deletes [from:to) header range from the store. -// It gracefully handles context and errors attempting to save interrupted progress. -func (s *Store[H]) deleteRange(ctx context.Context, from, to uint64) (err error) { - startTime := time.Now() - - var ( - height uint64 - missing int - ) - defer func() { - if err != nil { - if errors.Is(err, errDeleteTimeout) { - log.Warnw("partial delete", - "from_height", from, - "expected_to_height", to, - "actual_to_height", height, - "hdrs_not_found", missing, - "took(s)", time.Since(startTime), - ) - } else { - log.Errorw("partial delete with error", - "from_height", from, - "expected_to_height", to, - "actual_to_height", height, - "hdrs_not_found", missing, - "took(s)", time.Since(startTime), - "err", err, - ) - } - } else if to-from > 1 { - log.Debugw("deleted headers", - "from_height", from, - "to_height", to, - "hdrs_not_found", missing, - "took(s)", time.Since(startTime).Seconds(), - ) - } - - if derr := s.setTail(ctx, s.ds, height); derr != nil { - err = errors.Join(err, fmt.Errorf("setting tail to %d: %w", height, derr)) - } - }() - - deleteCtx := ctx - if deadline, ok := ctx.Deadline(); ok { - // allocate 95% of caller's set deadline for deletion - // and give leftover to save progress - // this prevents store's state corruption from partial deletion - sub := deadline.Sub(startTime) / 100 * 95 - var cancel context.CancelFunc - deleteCtx, cancel = context.WithDeadlineCause(ctx, startTime.Add(sub), errDeleteTimeout) - defer cancel() - } - - if to-from < deleteRangeParallelThreshold { - height, missing, err = s.deleteSequential(deleteCtx, from, to) - } else { - height, missing, err = s.deleteParallel(deleteCtx, from, to) - } - - return err -} - // deleteSingle deletes a single header from the store, // its caches and indexies, notifying any registered onDelete handlers. func (s *Store[H]) deleteSingle( @@ -348,3 +230,186 @@ func (s *Store[H]) deleteParallel(ctx context.Context, from, to uint64) (uint64, ) return highest, missing, nil } + +// DeleteRange deletes headers in the range [from:to) from the store. +// It intelligently updates head and/or tail pointers based on what range is being deleted. +func (s *Store[H]) DeleteRange(ctx context.Context, from, to uint64) error { + // ensure all the pending headers are synchronized + err := s.Sync(ctx) + if err != nil { + return err + } + + head, err := s.Head(ctx) + if err != nil { + return fmt.Errorf("header/store: reading head: %w", err) + } + + tail, err := s.Tail(ctx) + if err != nil { + return fmt.Errorf("header/store: reading tail: %w", err) + } + + // validate range parameters + if from >= to { + return fmt.Errorf( + "header/store: invalid range [%d:%d) - from must be less than to", + from, + to, + ) + } + + if from < tail.Height() { + return fmt.Errorf( + "header/store: delete range from %d below current tail(%d)", + from, + tail.Height(), + ) + } + + // Note: Allow deletion beyond head to match original DeleteTo behavior + // Missing headers in the range will be handled gracefully by the deletion logic + + // if range is empty within the current store bounds, it's a no-op + if from > head.Height() || to <= tail.Height() { + return nil + } + + // Validate that deletion won't create gaps in the store + // Only allow deletions that: + // 1. Start from tail (advancing tail forward) + // 2. End at head+1 (moving head backward) + // 3. Delete the entire store + if from > tail.Height() && to <= head.Height() { + return fmt.Errorf( + "header/store: deletion range [%d:%d) would create gaps in the store. "+ + "Only deletion from tail (%d) or to head+1 (%d) is supported", + from, to, tail.Height(), head.Height()+1, + ) + } + + // Check if we're deleting all existing headers (making store empty) + // Only wipe if 'to' is exactly at head+1 (normal case) to avoid accidental wipes + if from <= tail.Height() && to == head.Height()+1 { + // Check if any headers exist at or beyond 'to' + hasHeadersAtOrBeyond := false + for checkHeight := to; checkHeight <= to+10; checkHeight++ { + if _, err := s.getByHeight(ctx, checkHeight); err == nil { + hasHeadersAtOrBeyond = true + break + } + } + + if !hasHeadersAtOrBeyond { + // wipe the entire store + if err := s.wipe(ctx); err != nil { + return fmt.Errorf("header/store: wipe: %w", err) + } + return nil + } + } + + // Determine which pointers need updating + updateTail := from <= tail.Height() + updateHead := to > head.Height() + + // Delete the headers without automatic tail updates + err = s.deleteRangeRaw(ctx, from, to) + if err != nil { + return fmt.Errorf("header/store: delete range [%d:%d): %w", from, to, err) + } + + // Update tail if we deleted from the beginning + if updateTail { + err = s.setTail(ctx, s.ds, to) + if err != nil { + return fmt.Errorf("header/store: setting tail to %d: %w", to, err) + } + } + + // Update head if we deleted from the end + if updateHead && from > tail.Height() { + newHeadHeight := from - 1 + if newHeadHeight >= tail.Height() { + err = s.setHead(ctx, s.ds, newHeadHeight) + if err != nil { + return fmt.Errorf("header/store: setting head to %d: %w", newHeadHeight, err) + } + } + } + + return nil +} + +// deleteRangeRaw deletes [from:to) header range without updating head or tail pointers. +func (s *Store[H]) deleteRangeRaw(ctx context.Context, from, to uint64) (err error) { + startTime := time.Now() + + var ( + height uint64 + missing int + ) + defer func() { + if err != nil { + if errors.Is(err, errDeleteTimeout) { + log.Warnw("partial delete range", + "from_height", from, + "expected_to_height", to, + "actual_to_height", height, + "hdrs_not_found", missing, + "took(s)", time.Since(startTime), + ) + } else { + log.Errorw("partial delete range with error", + "from_height", from, + "expected_to_height", to, + "actual_to_height", height, + "hdrs_not_found", missing, + "took(s)", time.Since(startTime), + "err", err, + ) + } + } else if to-from > 1 { + log.Debugw("deleted range", + "from_height", from, + "to_height", to, + "hdrs_not_found", missing, + "took(s)", time.Since(startTime).Seconds(), + ) + } + }() + + deleteCtx := ctx + if deadline, ok := ctx.Deadline(); ok { + // allocate 95% of caller's set deadline for deletion + // and give leftover to save progress + sub := deadline.Sub(startTime) / 100 * 95 + var cancel context.CancelFunc + deleteCtx, cancel = context.WithDeadlineCause(ctx, startTime.Add(sub), errDeleteTimeout) + defer cancel() + } + + if to-from < deleteRangeParallelThreshold { + height, missing, err = s.deleteSequential(deleteCtx, from, to) + } else { + height, missing, err = s.deleteParallel(deleteCtx, from, to) + } + + return err +} + +// setHead sets the head of the store to the specified height. +func (s *Store[H]) setHead(ctx context.Context, write datastore.Write, to uint64) error { + newHead, err := s.getByHeight(ctx, to) + if err != nil { + return fmt.Errorf("getting head: %w", err) + } + + // update the contiguous head + s.contiguousHead.Store(&newHead) + if err := writeHeaderHashTo(ctx, write, newHead, headKey); err != nil { + return fmt.Errorf("writing headKey in batch: %w", err) + } + + return nil +} diff --git a/store/store_test.go b/store/store_test.go index f9ffca0c..6725a63f 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -525,7 +525,7 @@ func TestStore_GetRange(t *testing.T) { } } -func TestStore_DeleteTo(t *testing.T) { +func TestStore_DeleteRange_Tail(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -590,7 +590,7 @@ func TestStore_DeleteTo(t *testing.T) { ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - err := store.DeleteTo(ctx, tt.to) + err := store.DeleteRange(ctx, from, tt.to) if tt.wantError { assert.Error(t, err) return @@ -612,7 +612,7 @@ func TestStore_DeleteTo(t *testing.T) { } } -func TestStore_DeleteTo_EmptyStore(t *testing.T) { +func TestStore_DeleteRange_EmptyStore(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -629,11 +629,14 @@ func TestStore_DeleteTo_EmptyStore(t *testing.T) { require.NoError(t, err) time.Sleep(10 * time.Millisecond) - err = store.DeleteTo(ctx, 101) + tail, err := store.Tail(ctx) + require.NoError(t, err) + + err = store.DeleteRange(ctx, tail.Height(), 101) require.NoError(t, err) // assert store is empty - tail, err := store.Tail(ctx) + tail, err = store.Tail(ctx) assert.Nil(t, tail) assert.ErrorIs(t, err, header.ErrEmptyStore) head, err := store.Head(ctx) @@ -654,7 +657,7 @@ func TestStore_DeleteTo_EmptyStore(t *testing.T) { assert.ErrorIs(t, err, header.ErrEmptyStore) } -func TestStore_DeleteTo_MoveHeadAndTail(t *testing.T) { +func TestStore_DeleteRange_MoveHeadAndTail(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -677,11 +680,14 @@ func TestStore_DeleteTo_MoveHeadAndTail(t *testing.T) { require.NoError(t, err) time.Sleep(10 * time.Millisecond) - err = store.DeleteTo(ctx, 111) + tail, err := store.Tail(ctx) + require.NoError(t, err) + + err = store.DeleteRange(ctx, tail.Height(), 111) require.NoError(t, err) // assert store is not empty - tail, err := store.Tail(ctx) + tail, err = store.Tail(ctx) require.NoError(t, err) assert.Equal(t, int(gap[len(gap)-1].Height()+1), int(tail.Height())) head, err := store.Head(ctx) @@ -702,32 +708,6 @@ func TestStore_DeleteTo_MoveHeadAndTail(t *testing.T) { assert.Equal(t, suite.Head().Height(), head.Height()) } -func TestStore_DeleteTo_Synchronized(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - t.Cleanup(cancel) - - suite := headertest.NewTestSuite(t) - - ds := sync.MutexWrap(datastore.NewMapDatastore()) - store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) - - err := store.Append(ctx, suite.GenDummyHeaders(50)...) - require.NoError(t, err) - - err = store.Append(ctx, suite.GenDummyHeaders(50)...) - require.NoError(t, err) - - err = store.Append(ctx, suite.GenDummyHeaders(50)...) - require.NoError(t, err) - - err = store.DeleteTo(ctx, 100) - require.NoError(t, err) - - tail, err := store.Tail(ctx) - require.NoError(t, err) - require.EqualValues(t, 100, tail.Height()) -} - func TestStore_OnDelete(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) t.Cleanup(cancel) @@ -758,7 +738,10 @@ func TestStore_OnDelete(t *testing.T) { return nil }) - err = store.DeleteTo(ctx, 101) + tail, err := store.Tail(ctx) + require.NoError(t, err) + + err = store.DeleteRange(ctx, tail.Height(), 101) require.NoError(t, err) assert.Equal(t, 50, deleted) @@ -889,7 +872,10 @@ func TestStore_HasAt(t *testing.T) { require.NoError(t, err) time.Sleep(100 * time.Millisecond) - err = store.DeleteTo(ctx, 50) + tail, err := store.Tail(ctx) + require.NoError(t, err) + + err = store.DeleteRange(ctx, tail.Height(), 50) require.NoError(t, err) has := store.HasAt(ctx, 100) @@ -907,3 +893,378 @@ func TestStore_HasAt(t *testing.T) { has = store.HasAt(ctx, 0) assert.False(t, has) } + +func TestStore_DeleteRange(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + t.Run("delete range from head down", func(t *testing.T) { + suite := headertest.NewTestSuite(t) + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + const count = 20 + in := suite.GenDummyHeaders(count) + err := store.Append(ctx, in...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + // Genesis is at height 1, GenDummyHeaders(20) creates headers 2-21 + // So head should be at height 21, tail at height 1 + head, err := store.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(21), head.Height()) + + // Delete from height 16 to 22 (should delete 16, 17, 18, 19, 20, 21) + err = store.DeleteRange(ctx, 16, 22) + require.NoError(t, err) + + // Verify new head is at height 15 + newHead, err := store.Head(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(15), newHead.Height()) + + // Verify deleted headers are gone + for h := uint64(16); h <= 21; h++ { + has := store.HasAt(ctx, h) + assert.False(t, has, "height %d should be deleted", h) + } + + // Verify remaining headers still exist + for h := uint64(1); h <= 15; h++ { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist", h) + } + }) + + t.Run("delete range in middle should fail", func(t *testing.T) { + suite := headertest.NewTestSuite(t) + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + const count = 20 + in := suite.GenDummyHeaders(count) + err := store.Append(ctx, in...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + // Try to delete a range in the middle (heights 8-12) which would create gaps + err = store.DeleteRange(ctx, 8, 12) + require.Error(t, err) + assert.Contains(t, err.Error(), "would create gaps in the store") + + // Verify all headers still exist since the operation failed + for h := uint64(1); h <= 21; h++ { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist after failed deletion", h) + } + }) + + t.Run("delete range from tail up", func(t *testing.T) { + suite := headertest.NewTestSuite(t) + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + const count = 20 + in := suite.GenDummyHeaders(count) + err := store.Append(ctx, in...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + originalHead, err := store.Head(ctx) + require.NoError(t, err) + + // Delete from tail height to height 10 + err = store.DeleteRange(ctx, 1, 10) + require.NoError(t, err) + + // Verify head is unchanged + head, err := store.Head(ctx) + require.NoError(t, err) + assert.Equal(t, originalHead.Height(), head.Height()) + + // Verify tail moved to height 10 + tail, err := store.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, uint64(10), tail.Height()) + + // Verify deleted headers are gone + for h := uint64(1); h < 10; h++ { + has := store.HasAt(ctx, h) + assert.False(t, has, "height %d should be deleted", h) + } + + // Verify remaining headers still exist + for h := uint64(10); h <= 21; h++ { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist", h) + } + }) + + t.Run("delete range completely out of bounds", func(t *testing.T) { + suite := headertest.NewTestSuite(t) + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + const count = 20 + in := suite.GenDummyHeaders(count) + err := store.Append(ctx, in...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + originalHead, err := store.Head(ctx) + require.NoError(t, err) + originalTail, err := store.Tail(ctx) + require.NoError(t, err) + + // Delete range completely above head - should be no-op + err = store.DeleteRange(ctx, 200, 300) + require.NoError(t, err) + + // Verify head and tail are unchanged + head, err := store.Head(ctx) + require.NoError(t, err) + assert.Equal(t, originalHead.Height(), head.Height()) + + tail, err := store.Tail(ctx) + require.NoError(t, err) + assert.Equal(t, originalTail.Height(), tail.Height()) + + // Verify all original headers still exist + for h := uint64(1); h <= 21; h++ { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist", h) + } + }) + + t.Run("invalid range errors", func(t *testing.T) { + suite := headertest.NewTestSuite(t) + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + const count = 20 + in := suite.GenDummyHeaders(count) + err := store.Append(ctx, in...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + // from >= to should error + err = store.DeleteRange(ctx, 50, 50) + assert.Error(t, err) + assert.Contains(t, err.Error(), "from must be less than to") + + // from > to should error + err = store.DeleteRange(ctx, 60, 50) + assert.Error(t, err) + assert.Contains(t, err.Error(), "from must be less than to") + + // from below tail should error + err = store.DeleteRange(ctx, 0, 5) + assert.Error(t, err) + assert.Contains(t, err.Error(), "below current tail") + + // middle deletion should error + err = store.DeleteRange(ctx, 10, 15) + assert.Error(t, err) + assert.Contains(t, err.Error(), "would create gaps") + }) +} + +func TestStore_DeleteRange_SingleHeader(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + suite := headertest.NewTestSuite(t) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + // Add single header at height 1 (genesis is at 0) + headers := suite.GenDummyHeaders(1) + err := store.Append(ctx, headers...) + require.NoError(t, err) + + // Should not be able to delete below tail + err = store.DeleteRange(ctx, 0, 1) + require.Error(t, err) // should error - would delete below tail +} + +func TestStore_DeleteRange_Synchronized(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + suite := headertest.NewTestSuite(t) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + err := store.Append(ctx, suite.GenDummyHeaders(50)...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + // Ensure sync completes + err = store.Sync(ctx) + require.NoError(t, err) + + // Delete from height 26 to head+1 + head, err := store.Head(ctx) + require.NoError(t, err) + + err = store.DeleteRange(ctx, 26, head.Height()+1) + require.NoError(t, err) + + // Verify head is now at height 25 + newHead, err := store.Head(ctx) + require.NoError(t, err) + require.EqualValues(t, 25, newHead.Height()) + + // Verify headers above 25 are gone + for h := uint64(26); h <= 50; h++ { + has := store.HasAt(ctx, h) + assert.False(t, has, "height %d should be deleted", h) + } + + // Verify headers at and below 25 still exist + for h := uint64(1); h <= 25; h++ { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist", h) + } +} + +func TestStore_DeleteRange_OnDeleteHandlers(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + suite := headertest.NewTestSuite(t) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + err := store.Append(ctx, suite.GenDummyHeaders(50)...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + // Get the actual head height to calculate expected deletions + head, err := store.Head(ctx) + require.NoError(t, err) + + var deletedHeights []uint64 + store.OnDelete(func(ctx context.Context, height uint64) error { + deletedHeights = append(deletedHeights, height) + return nil + }) + + // Delete from height 41 to head+1 + err = store.DeleteRange(ctx, 41, head.Height()+1) + require.NoError(t, err) + + // Verify onDelete was called for each deleted height (from 41 to head height) + var expectedDeleted []uint64 + for h := uint64(41); h <= head.Height(); h++ { + expectedDeleted = append(expectedDeleted, h) + } + assert.ElementsMatch(t, expectedDeleted, deletedHeights) +} + +func TestStore_DeleteRange_LargeRange(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + t.Cleanup(cancel) + + suite := headertest.NewTestSuite(t) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(100)) + + // Create a large number of headers to trigger parallel deletion + const count = 15000 + headers := suite.GenDummyHeaders(count) + err := store.Append(ctx, headers...) + require.NoError(t, err) + + time.Sleep(500 * time.Millisecond) // allow time for large batch to write + + // Get head height for deletion range + head, err := store.Head(ctx) + require.NoError(t, err) + + // Delete a large range to test parallel deletion path (from 5001 to head+1) + const keepHeight = 5000 + err = store.DeleteRange(ctx, keepHeight+1, head.Height()+1) + require.NoError(t, err) + + // Verify new head + newHead, err := store.Head(ctx) + require.NoError(t, err) + require.EqualValues(t, keepHeight, newHead.Height()) + + // Spot check that high numbered headers are gone + for h := uint64(keepHeight + 1000); h <= count; h += 1000 { + has := store.HasAt(ctx, h) + assert.False(t, has, "height %d should be deleted", h) + } + + // Spot check that low numbered headers still exist + for h := uint64(1000); h <= keepHeight; h += 1000 { + has := store.HasAt(ctx, h) + assert.True(t, has, "height %d should still exist", h) + } +} + +func TestStore_DeleteRange_ValidationErrors(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + t.Cleanup(cancel) + + suite := headertest.NewTestSuite(t) + + ds := sync.MutexWrap(datastore.NewMapDatastore()) + store := NewTestStore(t, ctx, ds, suite.Head(), WithWriteBatchSize(10)) + + err := store.Append(ctx, suite.GenDummyHeaders(20)...) + require.NoError(t, err) + + time.Sleep(100 * time.Millisecond) + + tail, err := store.Tail(ctx) + require.NoError(t, err) + + tests := []struct { + name string + from uint64 + to uint64 + errMsg string + }{ + { + name: "delete from below tail boundary", + from: tail.Height() - 1, + to: tail.Height() + 5, + errMsg: "below current tail", + }, + { + name: "invalid range - from equals to", + from: 10, + to: 10, + errMsg: "from must be less than to", + }, + { + name: "invalid range - from greater than to", + from: 15, + to: 10, + errMsg: "from must be less than to", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := store.DeleteRange(ctx, tt.from, tt.to) + require.Error(t, err) + assert.Contains(t, err.Error(), tt.errMsg) + }) + } +} diff --git a/sync/syncer_tail.go b/sync/syncer_tail.go index 877ff407..c7790c99 100644 --- a/sync/syncer_tail.go +++ b/sync/syncer_tail.go @@ -130,7 +130,7 @@ func (s *Syncer[H]) moveTail(ctx context.Context, from, to H) error { switch { case from.Height() < to.Height(): log.Infof("move tail up from %d to %d, pruning the diff...", from.Height(), to.Height()) - err := s.store.DeleteTo(ctx, to.Height()) + err := s.store.DeleteRange(ctx, from.Height(), to.Height()) if err != nil { return fmt.Errorf( "deleting headers up to newly configured tail(%d): %w",