Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions x/blockdb/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,16 +148,16 @@ defer db.Close()
// Write a block
height := uint64(100)
blockData := []byte("block data")
err := db.WriteBlock(height, blockData)
err := db.Put(height, blockData)
if err != nil {
fmt.Println("Error writing block:", err)
return
}

// Read a block
blockData, err := db.ReadBlock(height)
blockData, err := db.Get(height)
if err != nil {
if errors.Is(err, blockdb.ErrBlockNotFound) {
if errors.Is(err, database.ErrNotFound) {
fmt.Println("Block doesn't exist at this height")
return
}
Expand Down
53 changes: 25 additions & 28 deletions x/blockdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"go.uber.org/zap"

"github.com/ava-labs/avalanchego/cache/lru"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/utils/compression"
"github.com/ava-labs/avalanchego/utils/logging"

Expand Down Expand Up @@ -49,6 +50,8 @@ type BlockHeight = uint64
type BlockData = []byte

var (
_ database.HeightIndex = (*Database)(nil)

_ encoding.BinaryMarshaler = (*blockEntryHeader)(nil)
_ encoding.BinaryUnmarshaler = (*blockEntryHeader)(nil)
_ encoding.BinaryMarshaler = (*indexEntry)(nil)
Expand Down Expand Up @@ -300,7 +303,7 @@ func (s *Database) Close() error {
defer s.closeMu.Unlock()

if s.closed {
return nil
return database.ErrClosed
}
s.closed = true

Expand All @@ -315,16 +318,16 @@ func (s *Database) Close() error {
return err
}

// WriteBlock inserts a block into the store at the given height.
func (s *Database) WriteBlock(height BlockHeight, block BlockData) error {
// Put inserts a block into the store at the given height.
func (s *Database) Put(height BlockHeight, block BlockData) error {
s.closeMu.RLock()
defer s.closeMu.RUnlock()

if s.closed {
s.log.Error("Failed to write block: database is closed",
zap.Uint64("height", height),
)
return ErrDatabaseClosed
return database.ErrClosed
}

blockSize := len(block)
Expand All @@ -336,12 +339,6 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData) error {
return fmt.Errorf("%w: block size cannot exceed %d bytes", ErrBlockTooLarge, math.MaxUint32)
}

blockDataLen := uint32(blockSize)
if blockDataLen == 0 {
s.log.Error("Failed to write block: empty block", zap.Uint64("height", height))
return ErrBlockEmpty
}

indexFileOffset, err := s.indexEntryOffset(height)
if err != nil {
s.log.Error("Failed to write block: failed to calculate index entry offset",
Expand All @@ -359,7 +356,7 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData) error {
)
return fmt.Errorf("failed to compress block data: %w", err)
}
blockDataLen = uint32(len(blockToWrite))
blockDataLen := uint32(len(blockToWrite))

sizeWithDataHeader, err := safemath.Add(sizeOfBlockEntryHeader, blockDataLen)
if err != nil {
Expand Down Expand Up @@ -423,14 +420,14 @@ func (s *Database) WriteBlock(height BlockHeight, block BlockData) error {
}

// readBlockIndex reads the index entry for the given height.
// It returns ErrBlockNotFound if the block does not exist.
// It returns database.ErrNotFound if the block does not exist.
func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) {
var entry indexEntry
if s.closed {
s.log.Error("Failed to read block index: database is closed",
zap.Uint64("height", height),
)
return entry, ErrDatabaseClosed
return entry, database.ErrClosed
}

// Skip the index entry read if we know the block is past the max height.
Expand All @@ -440,20 +437,20 @@ func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) {
zap.Uint64("height", height),
zap.String("reason", "no blocks written yet"),
)
return entry, fmt.Errorf("%w: no blocks written yet", ErrBlockNotFound)
return entry, fmt.Errorf("%w: no blocks written yet", database.ErrNotFound)
}
if height > heights.maxBlockHeight {
s.log.Debug("Block not found",
zap.Uint64("height", height),
zap.Uint64("maxHeight", heights.maxBlockHeight),
zap.String("reason", "height beyond max"),
)
return entry, fmt.Errorf("%w: height %d is beyond max height %d", ErrBlockNotFound, height, heights.maxBlockHeight)
return entry, fmt.Errorf("%w: height %d is beyond max height %d", database.ErrNotFound, height, heights.maxBlockHeight)
}

entry, err := s.readIndexEntry(height)
if err != nil {
if errors.Is(err, ErrBlockNotFound) {
if errors.Is(err, database.ErrNotFound) {
s.log.Debug("Block not found",
zap.Uint64("height", height),
zap.String("reason", "no index entry found"),
Expand All @@ -471,9 +468,9 @@ func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) {
return entry, nil
}

// ReadBlock retrieves a block by its height.
// Returns ErrBlockNotFound if the block is not found.
func (s *Database) ReadBlock(height BlockHeight) (BlockData, error) {
// Get retrieves a block by its height.
// Returns database.ErrNotFound if the block is not found.
func (s *Database) Get(height BlockHeight) (BlockData, error) {
s.closeMu.RLock()
defer s.closeMu.RUnlock()

Expand Down Expand Up @@ -530,14 +527,14 @@ func (s *Database) ReadBlock(height BlockHeight) (BlockData, error) {
return decompressed, nil
}

// HasBlock checks if a block exists at the given height.
func (s *Database) HasBlock(height BlockHeight) (bool, error) {
// Has checks if a block exists at the given height.
func (s *Database) Has(height BlockHeight) (bool, error) {
s.closeMu.RLock()
defer s.closeMu.RUnlock()

_, err := s.readBlockIndex(height)
if err != nil {
if errors.Is(err, ErrBlockNotFound) || errors.Is(err, ErrInvalidBlockHeight) {
if errors.Is(err, database.ErrNotFound) || errors.Is(err, ErrInvalidBlockHeight) {
return false, nil
}
s.log.Error("Failed to check if block exists: failed to read index entry",
Expand Down Expand Up @@ -568,7 +565,7 @@ func (s *Database) indexEntryOffset(height BlockHeight) (uint64, error) {
}

// readIndexEntry reads the index entry for the given height from the index file.
// Returns ErrBlockNotFound if the block does not exist.
// Returns database.ErrNotFound if the block does not exist.
func (s *Database) readIndexEntry(height BlockHeight) (indexEntry, error) {
var entry indexEntry

Expand All @@ -580,10 +577,10 @@ func (s *Database) readIndexEntry(height BlockHeight) (indexEntry, error) {
buf := make([]byte, sizeOfIndexEntry)
_, err = s.indexFile.ReadAt(buf, int64(offset))
if err != nil {
// Return ErrBlockNotFound if trying to read past the end of the index file
// Return database.ErrNotFound if trying to read past the end of the index file
// for a block that has not been indexed yet.
if errors.Is(err, io.EOF) {
return entry, fmt.Errorf("%w: EOF reading index entry at offset %d for height %d", ErrBlockNotFound, offset, height)
return entry, fmt.Errorf("%w: EOF reading index entry at offset %d for height %d", database.ErrNotFound, offset, height)
}
return entry, fmt.Errorf("failed to read index entry at offset %d for height %d: %w", offset, height, err)
}
Expand All @@ -592,7 +589,7 @@ func (s *Database) readIndexEntry(height BlockHeight) (indexEntry, error) {
}

if entry.IsEmpty() {
return entry, fmt.Errorf("%w: empty index entry for height %d", ErrBlockNotFound, height)
return entry, fmt.Errorf("%w: empty index entry for height %d", database.ErrNotFound, height)
}

return entry, nil
Expand Down Expand Up @@ -1122,7 +1119,7 @@ func (s *Database) updateBlockHeights(writtenBlockHeight BlockHeight) error {
_, err = s.readIndexEntry(nextHeightToVerify)
if err != nil {
// If no block exists at this height, we've reached the end of our contiguous sequence
if errors.Is(err, ErrBlockNotFound) {
if errors.Is(err, database.ErrNotFound) {
break
}

Expand Down Expand Up @@ -1181,7 +1178,7 @@ func (s *Database) updateRecoveredBlockHeights(recoveredHeights []BlockHeight) e
_, err := s.readIndexEntry(nextHeightToVerify)
if err != nil {
// If no block exists at this height, we've reached the end of our contiguous sequence
if errors.Is(err, ErrBlockNotFound) {
if errors.Is(err, database.ErrNotFound) {
break
}

Expand Down
37 changes: 26 additions & 11 deletions x/blockdb/database_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,25 @@ import (
"github.com/stretchr/testify/require"

"github.com/ava-labs/avalanchego/cache/lru"
"github.com/ava-labs/avalanchego/database"
"github.com/ava-labs/avalanchego/database/heightindexdb/dbtest"
"github.com/ava-labs/avalanchego/utils/compression"
"github.com/ava-labs/avalanchego/utils/logging"
)

func TestInterface(t *testing.T) {
for _, test := range dbtest.Tests {
t.Run(test.Name, func(t *testing.T) {
test.Test(t, func() database.HeightIndex {
tempDir := t.TempDir()
db, err := New(DefaultConfig().WithDir(tempDir), logging.NoLog{})
require.NoError(t, err)
return db
})
})
}
}

func TestNew_Params(t *testing.T) {
tempDir := t.TempDir()
tests := []struct {
Expand Down Expand Up @@ -193,8 +208,8 @@ func TestNew_IndexFileConfigPrecedence(t *testing.T) {

// Write a block at height 100 and close db
testBlock := []byte("test block data")
require.NoError(t, db.WriteBlock(100, testBlock))
readBlock, err := db.ReadBlock(100)
require.NoError(t, db.Put(100, testBlock))
readBlock, err := db.Get(100)
require.NoError(t, err)
require.Equal(t, testBlock, readBlock)
require.NoError(t, db.Close())
Expand All @@ -208,20 +223,20 @@ func TestNew_IndexFileConfigPrecedence(t *testing.T) {

// The database should still accept blocks between 100 and 200
testBlock2 := []byte("test block data 2")
require.NoError(t, db2.WriteBlock(150, testBlock2))
readBlock2, err := db2.ReadBlock(150)
require.NoError(t, db2.Put(150, testBlock2))
readBlock2, err := db2.Get(150)
require.NoError(t, err)
require.Equal(t, testBlock2, readBlock2)

// Verify that writing below initial minimum height fails
err = db2.WriteBlock(50, []byte("invalid block"))
err = db2.Put(50, []byte("invalid block"))
require.ErrorIs(t, err, ErrInvalidBlockHeight)

// Write a large block that would exceed the new config's 512KB limit
// but should succeed because we use the original 1MB limit from index file
largeBlock := make([]byte, 768*1024) // 768KB block
require.NoError(t, db2.WriteBlock(200, largeBlock))
readLargeBlock, err := db2.ReadBlock(200)
require.NoError(t, db2.Put(200, largeBlock))
readLargeBlock, err := db2.Get(200)
require.NoError(t, err)
require.Equal(t, largeBlock, readLargeBlock)
}
Expand Down Expand Up @@ -288,7 +303,7 @@ func TestFileCache_Eviction(t *testing.T) {
defer wg.Done()
for i := range numBlocks {
height := uint64((i + goroutineID) % numBlocks)
err := store.WriteBlock(height, blocks[height])
err := store.Put(height, blocks[height])
if err != nil {
writeErrors.Add(1)
errorMu.Lock()
Expand Down Expand Up @@ -317,7 +332,7 @@ func TestFileCache_Eviction(t *testing.T) {

// Verify again that all blocks are readable
for i := range numBlocks {
block, err := store.ReadBlock(uint64(i))
block, err := store.Get(uint64(i))
require.NoError(t, err, "failed to read block at height %d", i)
require.Equal(t, blocks[i], block, "block data mismatch at height %d", i)
}
Expand All @@ -341,12 +356,12 @@ func TestMaxDataFiles_CacheLimit(t *testing.T) {
// Write blocks to force multiple data files
for i := range numBlocks {
block := fixedSizeBlock(t, 512, uint64(i))
require.NoError(t, store.WriteBlock(uint64(i), block))
require.NoError(t, store.Put(uint64(i), block))
}

// Verify all blocks are still readable despite evictions
for i := range numBlocks {
block, err := store.ReadBlock(uint64(i))
block, err := store.Get(uint64(i))
require.NoError(t, err, "failed to read block at height %d after eviction", i)
require.Len(t, block, 512, "block size mismatch at height %d", i)
}
Expand Down
11 changes: 5 additions & 6 deletions x/blockdb/datasplit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ func TestDataSplitting(t *testing.T) {
blocks := make([][]byte, numBlocks)
for i := range numBlocks {
blocks[i] = fixedSizeBlock(t, 1024, uint64(i))
require.NoError(t, store.WriteBlock(uint64(i), blocks[i]))
require.NoError(t, store.Put(uint64(i), blocks[i]))
}

// Verify that multiple data files were created.
Expand All @@ -47,7 +47,7 @@ func TestDataSplitting(t *testing.T) {

// Verify all blocks are readable
for i := range numBlocks {
readBlock, err := store.ReadBlock(uint64(i))
readBlock, err := store.Get(uint64(i))
require.NoError(t, err)
require.Equal(t, blocks[i], readBlock)
}
Expand All @@ -60,7 +60,7 @@ func TestDataSplitting(t *testing.T) {
store.compressor = compression.NewNoCompressor()
defer store.Close()
for i := range numBlocks {
readBlock, err := store.ReadBlock(uint64(i))
readBlock, err := store.Get(uint64(i))
require.NoError(t, err)
require.Equal(t, blocks[i], readBlock)
}
Expand All @@ -76,16 +76,15 @@ func TestDataSplitting_DeletedFile(t *testing.T) {
blocks := make([][]byte, numBlocks)
for i := range numBlocks {
blocks[i] = fixedSizeBlock(t, 1024, uint64(i))
require.NoError(t, store.WriteBlock(uint64(i), blocks[i]))
require.NoError(t, store.Put(uint64(i), blocks[i]))
}
store.Close()
require.NoError(t, store.Close())

// Delete the first data file (blockdb_0.dat)
firstDataFilePath := filepath.Join(store.config.DataDir, fmt.Sprintf(dataFileNameFormat, 0))
require.NoError(t, os.Remove(firstDataFilePath))

// reopen and verify the blocks
require.NoError(t, store.Close())
config = config.WithIndexDir(store.config.IndexDir).WithDataDir(store.config.DataDir)
_, err := New(config, store.log)
require.ErrorIs(t, err, ErrCorrupted)
Expand Down
3 changes: 0 additions & 3 deletions x/blockdb/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@ import "errors"

var (
ErrInvalidBlockHeight = errors.New("blockdb: invalid block height")
ErrBlockEmpty = errors.New("blockdb: block is empty")
ErrDatabaseClosed = errors.New("blockdb: database is closed")
ErrCorrupted = errors.New("blockdb: unrecoverable corruption detected")
ErrBlockTooLarge = errors.New("blockdb: block size too large")
ErrBlockNotFound = errors.New("blockdb: block not found")
)
Loading
Loading