|
4 | 4 | "context"
|
5 | 5 | "errors"
|
6 | 6 | "fmt"
|
| 7 | + "slices" |
| 8 | + "sync" |
7 | 9 | "sync/atomic"
|
8 | 10 | "time"
|
9 | 11 |
|
@@ -58,6 +60,9 @@ type Store[H header.Header[H]] struct {
|
58 | 60 | // syncCh is a channel used to synchronize writes
|
59 | 61 | syncCh chan chan struct{}
|
60 | 62 |
|
| 63 | + onDeleteMu sync.Mutex |
| 64 | + onDelete []func(context.Context, []H) error |
| 65 | + |
61 | 66 | Params Parameters
|
62 | 67 | }
|
63 | 68 |
|
@@ -252,6 +257,16 @@ func (s *Store[H]) GetByHeight(ctx context.Context, height uint64) (H, error) {
|
252 | 257 | }
|
253 | 258 |
|
254 | 259 | func (s *Store[H]) getByHeight(ctx context.Context, height uint64) (H, error) {
|
| 260 | + head, _ := s.Head(ctx) |
| 261 | + if !head.IsZero() && head.Height() == height { |
| 262 | + return head, nil |
| 263 | + } |
| 264 | + |
| 265 | + tail, _ := s.Tail(ctx) |
| 266 | + if !tail.IsZero() && tail.Height() == height { |
| 267 | + return tail, nil |
| 268 | + } |
| 269 | + |
255 | 270 | if h := s.pending.GetByHeight(height); !h.IsZero() {
|
256 | 271 | return h, nil
|
257 | 272 | }
|
@@ -342,6 +357,21 @@ func (s *Store[H]) HasAt(ctx context.Context, height uint64) bool {
|
342 | 357 | return head.Height() >= height && height >= tail.Height()
|
343 | 358 | }
|
344 | 359 |
|
| 360 | +func (s *Store[H]) OnDelete(fn func(context.Context, []H) error) { |
| 361 | + s.onDeleteMu.Lock() |
| 362 | + defer s.onDeleteMu.Unlock() |
| 363 | + |
| 364 | + s.onDelete = append(s.onDelete, func(ctx context.Context, h []H) (rerr error) { |
| 365 | + defer func() { |
| 366 | + err := recover() |
| 367 | + if err != nil { |
| 368 | + rerr = fmt.Errorf("header/store: user provided onDelete panicked with: %s", err) |
| 369 | + } |
| 370 | + }() |
| 371 | + return fn(ctx, h) |
| 372 | + }) |
| 373 | +} |
| 374 | + |
345 | 375 | // DeleteTo implements [header.Store] interface.
|
346 | 376 | func (s *Store[H]) DeleteTo(ctx context.Context, to uint64) error {
|
347 | 377 | // ensure all the pending headers are synchronized
|
@@ -419,6 +449,22 @@ func (s *Store[H]) deleteRange(ctx context.Context, from, to uint64) error {
|
419 | 449 | return fmt.Errorf("new batch: %w", err)
|
420 | 450 | }
|
421 | 451 |
|
| 452 | + s.onDeleteMu.Lock() |
| 453 | + onDelete := slices.Clone(s.onDelete) |
| 454 | + s.onDeleteMu.Unlock() |
| 455 | + for _, deleteFn := range onDelete { |
| 456 | + if err := deleteFn(ctx, headers); err != nil { |
| 457 | + // abort deletion if onDelete handler fails |
| 458 | + // to ensure atomicity between stored headers and user specific data |
| 459 | + // TODO(@Wondertan): Batch is not actually atomic and could write some data at this point |
| 460 | + // but its fine for now: https://github.com/celestiaorg/go-header/issues/307 |
| 461 | + // TODO2(@Wondertan): Once we move to txn, find a way to pass txn through context, |
| 462 | + // so that users can use it in their onDelete handlers |
| 463 | + // to ensure atomicity between deleted headers and user specific data |
| 464 | + return fmt.Errorf("on delete handler: %w", err) |
| 465 | + } |
| 466 | + } |
| 467 | + |
422 | 468 | for _, h := range headers {
|
423 | 469 | if err := batch.Delete(ctx, hashKey(h.Hash())); err != nil {
|
424 | 470 | return fmt.Errorf("delete hash key (%X): %w", h.Hash(), err)
|
|
0 commit comments